diff --git a/cmd/ipfs/main.go b/cmd/ipfs/main.go index e75a93c9f933..814ae04fda5b 100644 --- a/cmd/ipfs/main.go +++ b/cmd/ipfs/main.go @@ -19,6 +19,7 @@ import ( oldcmds "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" corecmds "github.com/ipfs/go-ipfs/core/commands" + cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv" corehttp "github.com/ipfs/go-ipfs/core/corehttp" loader "github.com/ipfs/go-ipfs/plugin/loader" repo "github.com/ipfs/go-ipfs/repo" @@ -28,6 +29,7 @@ import ( manet "gx/ipfs/QmQVUtnrNGtCRkCMpXgpApfzQjc8FDaDVxHqWH8cnZQeh5/go-multiaddr-net" ma "gx/ipfs/QmRKLtwMw131aK7ugC3G7ybpumMz78YrJe5dzneyindvG1/go-multiaddr" madns "gx/ipfs/QmT4zgnKCyZBpRyxzsvZqUjzUkMWLJ2pZCw7uk6M6Kto5m/go-multiaddr-dns" + cidenc "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/cidenc" osh "gx/ipfs/QmXuBJ7DR6k3rmUEKtvVMhwjmXDuJgXXPUt4LQXKBMsU93/go-os-helper" "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds/cli" @@ -116,6 +118,12 @@ func mainRet() int { } log.Debugf("config path is %s", repoPath) + enc, err := cmdenv.ProcCidBase(req) + if err != nil { + return nil, err + } + cidenc.Default = enc + // this sets up the function that will initialize the node // this is so that we can construct the node lazily. return &oldcmds.Context{ diff --git a/core/commands/add.go b/core/commands/add.go index 6567a8282c75..a3929ec6d5cc 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -225,11 +225,6 @@ You can now check what blocks have been created by: outChan := make(chan interface{}) req := res.Request() - err := cmdenv.ProcCidBaseClientSide(req) - if err != nil { - return err - } - sizeFile, ok := req.Files.(files.SizeFile) if ok { // Could be slow. diff --git a/core/commands/cmdenv/cidbase.go b/core/commands/cmdenv/cidbase.go index ee39b74a180d..d0f57868e1c8 100644 --- a/core/commands/cmdenv/cidbase.go +++ b/core/commands/cmdenv/cidbase.go @@ -24,9 +24,7 @@ func ProcCidBase(req *cmds.Request) (cidenc.Encoder, error) { if err != nil { return e, err } - if !upgradeDefined { - e.Upgrade = true - } + e.Upgrade = true } if upgradeDefined { @@ -36,13 +34,7 @@ func ProcCidBase(req *cmds.Request) (cidenc.Encoder, error) { return e, nil } -// ProcCidBaseClientSide processes the `cid-base` and `output-cidv1` -// options and sets the default encoder based on those options -func ProcCidBaseClientSide(req *cmds.Request) error { - enc, err := ProcCidBase(req) - if err != nil { - return err - } - cidenc.Default = enc - return nil +func CidBaseDefined(req *cmds.Request) bool { + base, _ := req.Options["cid-base"].(string) + return base != "" } diff --git a/core/commands/filestore.go b/core/commands/filestore.go index 798b0ec8e1d4..6d79231b8bca 100644 --- a/core/commands/filestore.go +++ b/core/commands/filestore.go @@ -11,6 +11,7 @@ import ( filestore "github.com/ipfs/go-ipfs/filestore" cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" + apicid "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/apicid" cmds "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" ) @@ -175,6 +176,11 @@ For ERROR entries the error will also be printed to stderr. Type: filestore.ListRes{}, } +type FilestoreDupsOutput struct { + Ref apicid.Hash + Err string +} + var dupsFileStore = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "List blocks that are both in the filestore and standard block storage.", @@ -192,10 +198,10 @@ var dupsFileStore = &cmds.Command{ for cid := range ch { have, err := fs.MainBlockstore().Has(cid) if err != nil { - return res.Emit(&RefWrapper{Err: err.Error()}) + return res.Emit(&FilestoreDupsOutput{Err: err.Error()}) } if have { - if err := res.Emit(&RefWrapper{Ref: cid.String()}); err != nil { + if err := res.Emit(&FilestoreDupsOutput{Ref: apicid.FromCid(cid)}); err != nil { return err } } @@ -203,8 +209,17 @@ var dupsFileStore = &cmds.Command{ return nil }, - Encoders: refsEncoderMap, - Type: RefWrapper{}, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *FilestoreDupsOutput) error { + if out.Err != "" { + return fmt.Errorf(out.Err) + } + fmt.Fprintln(w, out.Ref) + + return nil + }), + }, + Type: FilestoreDupsOutput{}, } func getFilestore(env cmds.Environment) (*core.IpfsNode, *filestore.Filestore, error) { diff --git a/core/commands/ls.go b/core/commands/ls.go index 422f554d1a7f..10f90fb069a4 100644 --- a/core/commands/ls.go +++ b/core/commands/ls.go @@ -14,6 +14,7 @@ import ( uio "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/io" unixfspb "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/pb" blockservice "gx/ipfs/QmVDTbzzTwnuBwNbJdhW3u7LoBQp46bezm9yp4z1RoEepM/go-blockservice" + apicid "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/apicid" offline "gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline" cmds "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" merkledag "gx/ipfs/QmcGt25mrjuB2kKW2zhPbXVZNHc4yoTDQ65NA8m6auP2f1/go-merkledag" @@ -23,9 +24,10 @@ import ( // LsLink contains printable data for a single ipld link in ls output type LsLink struct { - Name, Hash string - Size uint64 - Type unixfspb.Data_DataType + Name string + Hash apicid.Hash + Size uint64 + Type unixfspb.Data_DataType } // LsObject is an element of LsOutput @@ -250,7 +252,7 @@ func makeLsLink(req *cmds.Request, dserv ipld.DAGService, resolve bool, link *ip } return &LsLink{ Name: link.Name, - Hash: link.Cid.String(), + Hash: apicid.FromCid(link.Cid), Size: link.Size, Type: t, }, nil diff --git a/core/commands/pin.go b/core/commands/pin.go index a9629c683759..d86f1260d176 100644 --- a/core/commands/pin.go +++ b/core/commands/pin.go @@ -17,6 +17,7 @@ import ( cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" bserv "gx/ipfs/QmVDTbzzTwnuBwNbJdhW3u7LoBQp46bezm9yp4z1RoEepM/go-blockservice" + apicid "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/apicid" "gx/ipfs/QmYMQuypUbgsdNHmuCBSUJV6wdQVsBHRivNAp3efHJwZJD/go-verifcid" offline "gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline" cmds "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" @@ -39,11 +40,11 @@ var PinCmd = &cmds.Command{ } type PinOutput struct { - Pins []string + Pins []apicid.Hash } type AddPinOutput struct { - Pins []string + Pins []apicid.Hash Progress int `json:",omitempty"` } @@ -92,7 +93,7 @@ var addPinCmd = &cmds.Command{ if err != nil { return err } - return cmds.EmitOnce(res, &AddPinOutput{Pins: cidsToStrings(added)}) + return cmds.EmitOnce(res, &AddPinOutput{Pins: toAPICids(added)}) } v := new(dag.ProgressTracker) @@ -124,7 +125,7 @@ var addPinCmd = &cmds.Command{ return err } } - return res.Emit(&AddPinOutput{Pins: cidsToStrings(val.pins)}) + return res.Emit(&AddPinOutput{Pins: toAPICids(val.pins)}) case <-ticker.C: if err := res.Emit(&AddPinOutput{Progress: v.Value()}); err != nil { return err @@ -220,7 +221,7 @@ collected if needed. (By default, recursively. Use -r=false for direct pins.) return err } - return cmds.EmitOnce(res, &PinOutput{cidsToStrings(removed)}) + return cmds.EmitOnce(res, &PinOutput{toAPICids(removed)}) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *PinOutput) error { @@ -311,7 +312,7 @@ Example: return err } - var keys map[string]RefKeyObject + var keys map[apicid.Hash]RefKeyObject if len(req.Arguments) > 0 { keys, err = pinLsKeys(req.Context, req.Arguments, typeStr, n, api) @@ -347,6 +348,10 @@ const ( pinUnpinOptionName = "unpin" ) +type UpdatePinOutput struct { + Pins []string // really paths +} + var updatePinCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Update a recursive pin", @@ -364,7 +369,7 @@ new pin and removing the old one. Options: []cmdkit.Option{ cmdkit.BoolOption(pinUnpinOptionName, "Remove the old pin.").WithDefault(true), }, - Type: PinOutput{}, + Type: UpdatePinOutput{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env) if err != nil { @@ -388,10 +393,10 @@ new pin and removing the old one. return err } - return cmds.EmitOnce(res, &PinOutput{Pins: []string{from.String(), to.String()}}) + return cmds.EmitOnce(res, &UpdatePinOutput{Pins: []string{from.String(), to.String()}}) }, Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *PinOutput) error { + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *UpdatePinOutput) error { fmt.Fprintf(w, "updated %s to %s\n", out.Pins[0], out.Pins[1]) return nil }), @@ -452,17 +457,17 @@ type RefKeyObject struct { } type RefKeyList struct { - Keys map[string]RefKeyObject + Keys map[apicid.Hash]RefKeyObject } -func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsNode, api iface.CoreAPI) (map[string]RefKeyObject, error) { +func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsNode, api iface.CoreAPI) (map[apicid.Hash]RefKeyObject, error) { mode, ok := pin.StringToMode(typeStr) if !ok { return nil, fmt.Errorf("invalid pin mode '%s'", typeStr) } - keys := make(map[string]RefKeyObject) + keys := make(map[apicid.Hash]RefKeyObject) for _, p := range args { pth, err := iface.ParsePath(p) @@ -489,7 +494,7 @@ func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsN default: pinType = "indirect through " + pinType } - keys[c.Cid().String()] = RefKeyObject{ + keys[apicid.FromCid(c.Cid())] = RefKeyObject{ Type: pinType, } } @@ -497,13 +502,13 @@ func pinLsKeys(ctx context.Context, args []string, typeStr string, n *core.IpfsN return keys, nil } -func pinLsAll(ctx context.Context, typeStr string, n *core.IpfsNode) (map[string]RefKeyObject, error) { +func pinLsAll(ctx context.Context, typeStr string, n *core.IpfsNode) (map[apicid.Hash]RefKeyObject, error) { - keys := make(map[string]RefKeyObject) + keys := make(map[apicid.Hash]RefKeyObject) AddToResultKeys := func(keyList []cid.Cid, typeStr string) { for _, c := range keyList { - keys[c.String()] = RefKeyObject{ + keys[apicid.FromCid(c)] = RefKeyObject{ Type: typeStr, } } @@ -531,7 +536,7 @@ func pinLsAll(ctx context.Context, typeStr string, n *core.IpfsNode) (map[string // PinVerifyRes is the result returned for each pin checked in "pin verify" type PinVerifyRes struct { - Cid string + Cid apicid.Hash PinStatus } @@ -543,7 +548,7 @@ type PinStatus struct { // BadNode is used in PinVerifyRes type BadNode struct { - Cid string + Cid apicid.Hash Err string } @@ -553,7 +558,7 @@ type pinVerifyOpts struct { } func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan interface{} { - visited := make(map[string]PinStatus) + visited := make(map[cid.Cid]PinStatus) bs := n.Blocks.Blockstore() DAG := dag.NewDAGService(bserv.New(bs, offline.Exchange(bs))) @@ -562,7 +567,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan var checkPin func(root cid.Cid) PinStatus checkPin = func(root cid.Cid) PinStatus { - key := root.String() + key := root if status, ok := visited[key]; ok { return status } @@ -570,7 +575,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan if err := verifcid.ValidateCid(root); err != nil { status := PinStatus{Ok: false} if opts.explain { - status.BadNodes = []BadNode{BadNode{Cid: key, Err: err.Error()}} + status.BadNodes = []BadNode{BadNode{Cid: apicid.FromCid(key), Err: err.Error()}} } visited[key] = status return status @@ -580,7 +585,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan if err != nil { status := PinStatus{Ok: false} if opts.explain { - status.BadNodes = []BadNode{BadNode{Cid: key, Err: err.Error()}} + status.BadNodes = []BadNode{BadNode{Cid: apicid.FromCid(key), Err: err.Error()}} } visited[key] = status return status @@ -606,7 +611,7 @@ func pinVerify(ctx context.Context, n *core.IpfsNode, opts pinVerifyOpts) <-chan pinStatus := checkPin(cid) if !pinStatus.Ok || opts.includeOk { select { - case out <- &PinVerifyRes{cid.String(), pinStatus}: + case out <- &PinVerifyRes{apicid.FromCid(cid), pinStatus}: case <-ctx.Done(): return } @@ -629,10 +634,10 @@ func (r PinVerifyRes) Format(out io.Writer) { } } -func cidsToStrings(cs []cid.Cid) []string { - out := make([]string, 0, len(cs)) +func toAPICids(cs []cid.Cid) []apicid.Hash { + out := make([]apicid.Hash, 0, len(cs)) for _, c := range cs { - out = append(out, c.String()) + out = append(out, apicid.FromCid(c)) } return out } diff --git a/core/commands/refs.go b/core/commands/refs.go index 63b6a465db74..5db781f156b2 100644 --- a/core/commands/refs.go +++ b/core/commands/refs.go @@ -12,6 +12,7 @@ import ( cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" path "gx/ipfs/QmVi2uUygezqaMTqs3Yzt5FcZFHJoYD4B7jQ2BELjj7ZuY/go-path" + cidenc "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/cidenc" cmds "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format" cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" @@ -79,6 +80,11 @@ NOTE: List all references recursively by using the flag '-r'. return err } + enc, err := cmdenv.ProcCidBase(req) + if err != nil { + return err + } + unique, _ := req.Options[refsUniqueOptionName].(bool) recursive, _ := req.Options[refsRecursiveOptionName].(bool) maxDepth, _ := req.Options[refsMaxDepthOptionName].(int) @@ -112,7 +118,7 @@ NOTE: List all references recursively by using the flag '-r'. } for _, o := range objs { - if _, err := rw.WriteRefs(o); err != nil { + if _, err := rw.WriteRefs(o, enc); err != nil { if err := res.Emit(&RefWrapper{Err: err.Error()}); err != nil { return err } @@ -194,11 +200,11 @@ type RefWriter struct { } // WriteRefs writes refs of the given object to the underlying writer. -func (rw *RefWriter) WriteRefs(n ipld.Node) (int, error) { - return rw.writeRefsRecursive(n, 0) +func (rw *RefWriter) WriteRefs(n ipld.Node, enc cidenc.Interface) (int, error) { + return rw.writeRefsRecursive(n, 0, enc) } -func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int) (int, error) { +func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int, enc cidenc.Interface) (int, error) { nc := n.Cid() var count int @@ -228,7 +234,7 @@ func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int) (int, error) { // Write this node if not done before (or !Unique) if shouldWrite { - if err := rw.WriteEdge(nc, lc, n.Links()[i].Name); err != nil { + if err := rw.WriteEdge(nc, lc, n.Links()[i].Name, enc); err != nil { return count, err } count++ @@ -240,7 +246,7 @@ func (rw *RefWriter) writeRefsRecursive(n ipld.Node, depth int) (int, error) { // Note when !Unique, branches are always considered // unexplored and only depth limits apply. if goDeeper { - c, err := rw.writeRefsRecursive(nd, depth+1) + c, err := rw.writeRefsRecursive(nd, depth+1, enc) count += c if err != nil { return count, err @@ -309,7 +315,7 @@ func (rw *RefWriter) visit(c cid.Cid, depth int) (bool, bool) { } // Write one edge -func (rw *RefWriter) WriteEdge(from, to cid.Cid, linkname string) error { +func (rw *RefWriter) WriteEdge(from, to cid.Cid, linkname string, enc cidenc.Interface) error { if rw.Ctx != nil { select { case <-rw.Ctx.Done(): // just in case. @@ -322,11 +328,11 @@ func (rw *RefWriter) WriteEdge(from, to cid.Cid, linkname string) error { switch { case rw.PrintFmt != "": s = rw.PrintFmt - s = strings.Replace(s, "", from.String(), -1) - s = strings.Replace(s, "", to.String(), -1) + s = strings.Replace(s, "", enc.Encode(from), -1) + s = strings.Replace(s, "", enc.Encode(to), -1) s = strings.Replace(s, "", linkname, -1) default: - s += to.String() + s += enc.Encode(to) } return rw.res.Emit(&RefWrapper{Ref: s}) diff --git a/core/commands/resolve.go b/core/commands/resolve.go index 810b07b9adfd..a0b271f671d4 100644 --- a/core/commands/resolve.go +++ b/core/commands/resolve.go @@ -15,6 +15,7 @@ import ( nsopts "github.com/ipfs/go-ipfs/namesys/opts" path "gx/ipfs/QmVi2uUygezqaMTqs3Yzt5FcZFHJoYD4B7jQ2BELjj7ZuY/go-path" + cidenc "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/cidenc" cmds "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" ) @@ -94,6 +95,14 @@ Resolve the value of an IPFS DAG path: name := req.Arguments[0] recursive, _ := req.Options[resolveRecursiveOptionName].(bool) + enc, err := cmdenv.ProcCidBase(req) + if err != nil { + return err + } + if !cmdenv.CidBaseDefined(req) { + enc, _ = cidenc.FromPath(enc, name) + } + // the case when ipns is resolved step by step if strings.HasPrefix(name, "/ipns/") && !recursive { rc, rcok := req.Options[resolveDhtRecordCountOptionName].(uint) @@ -140,7 +149,7 @@ Resolve the value of an IPFS DAG path: return fmt.Errorf("found non-link at given path") } - return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: path.Path("/" + rp.Namespace() + "/" + rp.Cid().String())}) + return cmds.EmitOnce(res, &ncmd.ResolvedPath{Path: path.Path("/" + rp.Namespace() + "/" + enc.Encode(rp.Cid()))}) }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, rp *ncmd.ResolvedPath) error { diff --git a/core/commands/tar.go b/core/commands/tar.go index bc672017b7e7..d26cd5d7ad2f 100644 --- a/core/commands/tar.go +++ b/core/commands/tar.go @@ -66,11 +66,6 @@ represent it. Type: coreiface.AddEvent{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *coreiface.AddEvent) error { - err := cmdenv.ProcCidBaseClientSide(req) - if err != nil { - return err - } - fmt.Fprintln(w, out.Hash) return nil }), diff --git a/core/commands/urlstore.go b/core/commands/urlstore.go index 1ccf357da383..5c48ed3a95dd 100644 --- a/core/commands/urlstore.go +++ b/core/commands/urlstore.go @@ -13,6 +13,7 @@ import ( balanced "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/importer/balanced" ihelper "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/importer/helpers" trickle "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/importer/trickle" + apicid "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/apicid" cmds "gx/ipfs/Qma6uuSyjkecGhMFFLfzyJDPyoDtNJSHJNweDccZhaWkgU/go-ipfs-cmds" cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" mh "gx/ipfs/QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW/go-multihash" @@ -24,6 +25,11 @@ var urlStoreCmd = &cmds.Command{ }, } +type UrlstoreAddOutput struct { + Key apicid.Hash + Size int +} + var urlAdd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Add URL via urlstore.", @@ -50,7 +56,7 @@ time. Arguments: []cmdkit.Argument{ cmdkit.StringArg("url", true, false, "URL to add to IPFS"), }, - Type: &BlockStat{}, + Type: &UrlstoreAddOutput{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { url := req.Arguments[0] @@ -107,13 +113,13 @@ time. return err } - return cmds.EmitOnce(res, &BlockStat{ - Key: root.Cid().String(), + return cmds.EmitOnce(res, &UrlstoreAddOutput{ + Key: apicid.FromCid(root.Cid()), Size: int(hres.ContentLength), }) }, Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, bs *BlockStat) error { + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, bs *UrlstoreAddOutput) error { _, err := fmt.Fprintln(w, bs.Key) return err }), diff --git a/core/coreunix/add.go b/core/coreunix/add.go index ad4f033b59d0..cbb62d5971eb 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -23,6 +23,7 @@ import ( ihelper "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/importer/helpers" trickle "gx/ipfs/QmUnHNqhSB1JgzVCxL1Kz3yb4bdyB4q1Z9AD5AUBVmt3fZ/go-unixfs/importer/trickle" mfs "gx/ipfs/QmV8mXUh1M9qztax7vVdL1Apuz4c1eJZC5YactGxaJfWom/go-mfs" + apicid "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/apicid" files "gx/ipfs/QmZMWMvWMVKCbHetJ4RgndbuEF1io2UpUxwQwtNjtYPzSC/go-ipfs-files" dag "gx/ipfs/QmcGt25mrjuB2kKW2zhPbXVZNHc4yoTDQ65NA8m6auP2f1/go-merkledag" ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format" diff --git a/filestore/util.go b/filestore/util.go index af25da2726ac..c606b3dede21 100644 --- a/filestore/util.go +++ b/filestore/util.go @@ -8,6 +8,7 @@ import ( cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" blockstore "gx/ipfs/QmS2aqUZLJp8kF1ihE5rvDGE5LvmKDPnx32w9Z1BW9xLV5/go-ipfs-blockstore" + apicid "gx/ipfs/QmWf8NwKFLbTBvAvZst3bYF7WEEetzxWyMhvQ885cj9MM8/go-cidutil/apicid" dshelp "gx/ipfs/QmauEMWPoSqggfpSDHMMXuDn12DTd7TaFBvn39eeurzKT2/go-ipfs-ds-help" ds "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore" dsq "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/query" @@ -60,7 +61,7 @@ func (s Status) Format() string { type ListRes struct { Status Status ErrorMsg string - Key cid.Cid + Key apicid.Cid FilePath string Offset uint64 Size uint64 @@ -269,14 +270,14 @@ func mkListRes(c cid.Cid, d *pb.DataObj, err error) *ListRes { return &ListRes{ Status: status, ErrorMsg: errorMsg, - Key: c, + Key: apicid.Cid{Cid: c}, } } return &ListRes{ Status: status, ErrorMsg: errorMsg, - Key: c, + Key: apicid.Cid{Cid: c}, FilePath: d.FilePath, Size: d.Size_, Offset: d.Offset, diff --git a/test/sharness/t0045-ls.sh b/test/sharness/t0045-ls.sh index 4d3fb296c971..9d54d3c0ca09 100755 --- a/test/sharness/t0045-ls.sh +++ b/test/sharness/t0045-ls.sh @@ -11,7 +11,6 @@ test_description="Test ls command" test_init_ipfs test_ls_cmd() { - test_expect_success "'ipfs add -r testData' succeeds" ' mkdir -p testData testData/d1 testData/d2 && echo "test" >testData/f1 && @@ -86,6 +85,15 @@ QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN 14 a EOF test_cmp expected_ls_headers actual_ls_headers ' + + test_expect_success "'ipfs ls --cid-base=base32 ' succeeds" ' + ipfs ls --cid-base=base32 $(cid-fmt -v 1 -b base32 %s QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss) >actual_ls_base32 + ' + + test_expect_success "'ipfs ls --cid-base=base32 ' output looks good" ' + cid-fmt -b base32 -v 1 --filter %s < expected_ls > expected_ls_base32 + test_cmp expected_ls_base32 actual_ls_base32 + ' } diff --git a/test/sharness/t0085-pins.sh b/test/sharness/t0085-pins.sh index bdc285edb667..a5893ebaf39b 100755 --- a/test/sharness/t0085-pins.sh +++ b/test/sharness/t0085-pins.sh @@ -11,15 +11,19 @@ test_description="Test ipfs pinning operations" test_pins() { EXTRA_ARGS=$1 + BASE=$2 + if [ -n "$BASE" ]; then + BASE_ARGS="--cid-base=$BASE" + fi - test_expect_success "create some hashes" ' - HASH_A=$(echo "A" | ipfs add -q --pin=false) && - HASH_B=$(echo "B" | ipfs add -q --pin=false) && - HASH_C=$(echo "C" | ipfs add -q --pin=false) && - HASH_D=$(echo "D" | ipfs add -q --pin=false) && - HASH_E=$(echo "E" | ipfs add -q --pin=false) && - HASH_F=$(echo "F" | ipfs add -q --pin=false) && - HASH_G=$(echo "G" | ipfs add -q --pin=false) + test_expect_success "create some hashes $BASE" ' + HASH_A=$(echo "A" | ipfs add $BASE_ARGS -q --pin=false) && + HASH_B=$(echo "B" | ipfs add $BASE_ARGS -q --pin=false) && + HASH_C=$(echo "C" | ipfs add $BASE_ARGS -q --pin=false) && + HASH_D=$(echo "D" | ipfs add $BASE_ARGS -q --pin=false) && + HASH_E=$(echo "E" | ipfs add $BASE_ARGS -q --pin=false) && + HASH_F=$(echo "F" | ipfs add $BASE_ARGS -q --pin=false) && + HASH_G=$(echo "G" | ipfs add $BASE_ARGS -q --pin=false) ' test_expect_success "put all those hashes in a file" ' @@ -32,22 +36,53 @@ test_pins() { echo $HASH_G >> hashes ' + if [ -n "$BASE" ]; then + test_expect_success "make sure hashes are in $BASE" ' + cat hashes | xargs cid-fmt %b | sort -u > actual + echo base32 > expected + test_cmp expected actual + ' + fi + test_expect_success "'ipfs pin add $EXTRA_ARGS' via stdin" ' - cat hashes | ipfs pin add $EXTRA_ARGS + cat hashes | ipfs pin add $EXTRA_ARGS $BASE_ARGS | tee actual + ' + + test_expect_success "'ipfs pin add $EXTRA_ARGS' output looks good" ' + sed -e "s/^/pinned /; s/$/ recursively/" hashes > expected && + test_cmp expected actual ' test_expect_success "see if verify works" ' ipfs pin verify ' - test_expect_success "see if verify --verbose works" ' - ipfs pin verify --verbose > verify_out && - test $(cat verify_out | wc -l) > 8 + test_expect_success "see if verify --verbose $BASE_ARGS works" ' + ipfs pin verify --verbose $BASE_ARGS > verify_out && + test $(cat verify_out | wc -l) -ge 7 && + test_should_contain "$HASH_A ok" verify_out && + test_should_contain "$HASH_B ok" verify_out && + test_should_contain "$HASH_C ok" verify_out && + test_should_contain "$HASH_D ok" verify_out && + test_should_contain "$HASH_E ok" verify_out && + test_should_contain "$HASH_F ok" verify_out && + test_should_contain "$HASH_G ok" verify_out + ' + + test_expect_success "ipfs pin ls $BASE_ARGS works" ' + ipfs pin ls $BASE_ARGS > ls_out && + test_should_contain "$HASH_A" ls_out && + test_should_contain "$HASH_B" ls_out && + test_should_contain "$HASH_C" ls_out && + test_should_contain "$HASH_D" ls_out && + test_should_contain "$HASH_E" ls_out && + test_should_contain "$HASH_F" ls_out && + test_should_contain "$HASH_G" ls_out ' - test_expect_success "test pin ls hash" ' + test_expect_success "test pin ls $BASE_ARGS hash" ' echo $HASH_B | test_must_fail grep /ipfs && # just to be sure - ipfs pin ls $HASH_B > ls_hash_out && + ipfs pin ls $BASE_ARGS $HASH_B > ls_hash_out && echo "$HASH_B recursive" > ls_hash_exp && test_cmp ls_hash_exp ls_hash_out ' @@ -58,11 +93,11 @@ test_pins() { test_expect_success "test pin update" ' ipfs pin add "$HASH_A" && - ipfs pin ls > before_update && + ipfs pin ls $BASE_ARGS | tee before_update && test_should_contain "$HASH_A" before_update && test_must_fail grep -q "$HASH_B" before_update && ipfs pin update --unpin=true "$HASH_A" "$HASH_B" && - ipfs pin ls > after_update && + ipfs pin ls $BASE_ARGS > after_update && test_must_fail grep -q "$HASH_A" after_update && test_should_contain "$HASH_B" after_update && ipfs pin rm "$HASH_B" @@ -129,6 +164,7 @@ test_init_ipfs test_pins test_pins --progress +test_pins '' base32 test_pins_error_reporting test_pins_error_reporting --progress @@ -142,6 +178,7 @@ test_launch_ipfs_daemon --offline test_pins test_pins --progress +test_pins '' base32 test_pins_error_reporting test_pins_error_reporting --progress diff --git a/test/sharness/t0095-refs.sh b/test/sharness/t0095-refs.sh index 67dcdfaba850..d363d043f8c1 100755 --- a/test/sharness/t0095-refs.sh +++ b/test/sharness/t0095-refs.sh @@ -71,8 +71,12 @@ test_expect_success "create and add folders for refs" ' [[ "$root" == "$refsroot" ]] ' -test_expect_success "ipfs refs -r" ' - cat < expected.txt +test_refs_output() { + ARGS=$1 + FILTER=$2 + + test_expect_success "ipfs refs $ARGS -r" ' + cat < expected.txt QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v @@ -87,13 +91,13 @@ QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 EOF - ipfs refs -r $refsroot > refsr.txt - test_cmp expected.txt refsr.txt -' + ipfs refs $ARGS -r $refsroot > refsr.txt + test_cmp expected.txt refsr.txt + ' -# Unique is like above but removing duplicates -test_expect_success "ipfs refs -r --unique" ' - cat < expected.txt + # Unique is like above but removing duplicates + test_expect_success "ipfs refs $ARGS -r --unique" ' + cat < expected.txt QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS @@ -101,40 +105,40 @@ QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH EOF - ipfs refs -r --unique $refsroot > refsr.txt - test_cmp expected.txt refsr.txt -' + ipfs refs $ARGS -r --unique $refsroot > refsr.txt + test_cmp expected.txt refsr.txt + ' -# First level is 1.txt, B, C, D -test_expect_success "ipfs refs" ' - cat < expected.txt + # First level is 1.txt, B, C, D + test_expect_success "ipfs refs $ARGS" ' + cat < expected.txt QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS EOF - ipfs refs $refsroot > refs.txt - test_cmp expected.txt refs.txt -' + ipfs refs $ARGS $refsroot > refs.txt + test_cmp expected.txt refs.txt + ' -# max-depth=0 should return an empty list -test_expect_success "ipfs refs -r --max-depth=0" ' - cat < expected.txt + # max-depth=0 should return an empty list + test_expect_success "ipfs refs $ARGS -r --max-depth=0" ' + cat < expected.txt EOF - ipfs refs -r --max-depth=0 $refsroot > refs.txt - test_cmp expected.txt refs.txt -' - -# max-depth=1 should be equivalent to running without -r -test_expect_success "ipfs refs -r --max-depth=1" ' - ipfs refs -r --max-depth=1 $refsroot > refsr.txt - ipfs refs $refsroot > refs.txt - test_cmp refsr.txt refs.txt -' - -# We should see the depth limit engage at level 2 -test_expect_success "ipfs refs -r --max-depth=2" ' - cat < expected.txt + ipfs refs $ARGS -r --max-depth=0 $refsroot > refs.txt + test_cmp expected.txt refs.txt + ' + + # max-depth=1 should be equivalent to running without -r + test_expect_success "ipfs refs $ARGS -r --max-depth=1" ' + ipfs refs $ARGS -r --max-depth=1 $refsroot > refsr.txt + ipfs refs $ARGS $refsroot > refs.txt + test_cmp refsr.txt refs.txt + ' + + # We should see the depth limit engage at level 2 + test_expect_success "ipfs refs $ARGS -r --max-depth=2" ' + cat < expected.txt QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v @@ -144,33 +148,38 @@ QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 EOF - ipfs refs -r --max-depth=2 $refsroot > refsr.txt - test_cmp refsr.txt expected.txt -' - -# Here branch pruning and re-exploration come into place -# At first it should see D at level 2 and don't go deeper. -# But then after doing C it will see D at level 1 and go deeper -# so that it outputs the hash for 2.txt (-q61). -# We also see that C/B is pruned as it's been shown before. -# -# Excerpt from diagram above: -# -# L0- _______ A_________ -# / | \ \ -# L1- B C D 1.txt -# / \ | | -# L2- D 1.txt B 2.txt -test_expect_success "ipfs refs -r --unique --max-depth=2" ' - cat < expected.txt + ipfs refs $ARGS -r --max-depth=2 $refsroot > refsr.txt + test_cmp refsr.txt expected.txt + ' + + # Here branch pruning and re-exploration come into place + # At first it should see D at level 2 and don't go deeper. + # But then after doing C it will see D at level 1 and go deeper + # so that it outputs the hash for 2.txt (-q61). + # We also see that C/B is pruned as it's been shown before. + # + # Excerpt from diagram above: + # + # L0- _______ A_________ + # / | \ \ + # L1- B C D 1.txt + # / \ | | + # L2- D 1.txt B 2.txt + test_expect_success "ipfs refs $ARGS -r --unique --max-depth=2" ' + cat < expected.txt QmdytmR4wULMd3SLo6ePF4s3WcRHWcpnJZ7bHhoj3QB13v QmNkQvpiyAEtbeLviC7kqfifYoK1GXPcsSxTpP1yS3ykLa QmSanP5DpxpqfDdS4yekHY1MqrVge47gtxQcp2e2yZ4UwS QmXXazTjeNCKFnpW1D65vTKsTs8fbgkCWTv8Em4pdK2coH QmSFxnK675wQ9Kc1uqWKyJUaNxvSc2BP5DbXCD3x93oq61 EOF - ipfs refs -r --unique --max-depth=2 $refsroot > refsr.txt - test_cmp refsr.txt expected.txt -' + ipfs refs $ARGS -r --unique --max-depth=2 $refsroot > refsr.txt + test_cmp refsr.txt expected.txt + ' +} + +test_refs_output '' 'cat' + +test_refs_output '--cid-base=base32' 'ipfs cid base32' test_done diff --git a/test/sharness/t0160-resolve.sh b/test/sharness/t0160-resolve.sh index 3e1e12fa832e..349752b9a2ff 100755 --- a/test/sharness/t0160-resolve.sh +++ b/test/sharness/t0160-resolve.sh @@ -12,6 +12,9 @@ test_expect_success "resolve: prepare files" ' a_hash=$(ipfs add -q -r a | tail -n1) && b_hash=$(ipfs add -q -r a/b | tail -n1) && c_hash=$(ipfs add -q -r a/b/c | tail -n1) + a_hash_b32=$(cid-fmt -v 1 -b b %s $a_hash) + b_hash_b32=$(cid-fmt -v 1 -b b %s $b_hash) + c_hash_b32=$(cid-fmt -v 1 -b b %s $c_hash) ' test_expect_success "resolve: prepare dag" ' @@ -45,9 +48,10 @@ test_resolve_setup_name_fail() { test_resolve() { src=$1 dst=$2 + extra=$3 test_expect_success "resolve succeeds: $src" ' - ipfs resolve -r "$src" >actual + ipfs resolve $extra -r "$src" >actual ' test_expect_success "resolved correctly: $src -> $dst" ' @@ -57,7 +61,6 @@ test_resolve() { } test_resolve_cmd() { - test_resolve "/ipfs/$a_hash" "/ipfs/$a_hash" test_resolve "/ipfs/$a_hash/b" "/ipfs/$b_hash" test_resolve "/ipfs/$a_hash/b/c" "/ipfs/$c_hash" @@ -76,6 +79,30 @@ test_resolve_cmd() { test_resolve "/ipns/$id_hash" "/ipfs/$c_hash" } +test_resolve_cmd_b32() { + # no flags needed, base should be preserved + + test_resolve "/ipfs/$a_hash_b32" "/ipfs/$a_hash_b32" + test_resolve "/ipfs/$a_hash_b32/b" "/ipfs/$b_hash_b32" + test_resolve "/ipfs/$a_hash_b32/b/c" "/ipfs/$c_hash_b32" + test_resolve "/ipfs/$b_hash_b32/c" "/ipfs/$c_hash_b32" + + # flags needed passed in path does not contain cid to derive base + + test_resolve_setup_name "/ipfs/$a_hash_b32" + test_resolve "/ipns/$id_hash" "/ipfs/$a_hash_b32" --cid-base=base32 + test_resolve "/ipns/$id_hash/b" "/ipfs/$b_hash_b32" --cid-base=base32 + test_resolve "/ipns/$id_hash/b/c" "/ipfs/$c_hash_b32" --cid-base=base32 + + test_resolve_setup_name "/ipfs/$b_hash_b32" --cid-base=base32 + test_resolve "/ipns/$id_hash" "/ipfs/$b_hash_b32" --cid-base=base32 + test_resolve "/ipns/$id_hash/c" "/ipfs/$c_hash_b32" --cid-base=base32 + + test_resolve_setup_name "/ipfs/$c_hash_b32" + test_resolve "/ipns/$id_hash" "/ipfs/$c_hash_b32" --cid-base=base32 +} + + #todo remove this once the online resolve is fixed test_resolve_fail() { src=$1 @@ -117,6 +144,7 @@ test_resolve_cmd_fail() { # should work offline test_resolve_cmd +test_resolve_cmd_b32 # should work online test_launch_ipfs_daemon diff --git a/test/sharness/t0271-filestore-utils.sh b/test/sharness/t0271-filestore-utils.sh index 3e0302b40c51..d26af3b4adbd 100755 --- a/test/sharness/t0271-filestore-utils.sh +++ b/test/sharness/t0271-filestore-utils.sh @@ -63,40 +63,42 @@ EOF sort < verify_expect_file_order > verify_expect_key_order +IPFS_CMD="ipfs" + test_filestore_adds() { - test_expect_success "nocopy add succeeds" ' - HASH=$(ipfs add --raw-leaves --nocopy -r -q somedir | tail -n1) + test_expect_success "$IPFS_CMD add nocopy add succeeds" ' + HASH=$($IPFS_CMD add --raw-leaves --nocopy -r -q somedir | tail -n1) ' test_expect_success "nocopy add has right hash" ' test "$HASH" = "$EXPHASH" ' - test_expect_success "'ipfs filestore ls' output looks good'" ' - ipfs filestore ls | sort > ls_actual && + test_expect_success "'$IPFS_CMD filestore ls' output looks good'" ' + $IPFS_CMD filestore ls | sort > ls_actual && test_cmp ls_expect_key_order ls_actual ' - test_expect_success "'ipfs filestore ls --file-order' output looks good'" ' - ipfs filestore ls --file-order > ls_actual && + test_expect_success "'$IPFS_CMD filestore ls --file-order' output looks good'" ' + $IPFS_CMD filestore ls --file-order > ls_actual && test_cmp ls_expect_file_order ls_actual ' - test_expect_success "'ipfs filestore ls HASH' works" ' - ipfs filestore ls $FILE1_HASH > ls_actual && + test_expect_success "'$IPFS_CMD filestore ls HASH' works" ' + $IPFS_CMD filestore ls $FILE1_HASH > ls_actual && grep -q somedir/file1 ls_actual ' test_expect_success "can retrieve multi-block file" ' - ipfs cat $FILE3_HASH > file3.data && + $IPFS_CMD cat $FILE3_HASH > file3.data && test_cmp somedir/file3 file3.data ' } # check that the filestore is in a clean state test_filestore_state() { - test_expect_success "ipfs filestore verify' output looks good'" ' - ipfs filestore verify | LC_ALL=C sort > verify_actual + test_expect_success "$IPFS_CMD filestore verify' output looks good'" ' + $IPFS_CMD filestore verify | LC_ALL=C sort > verify_actual test_cmp verify_expect_key_order verify_actual ' } @@ -104,13 +106,13 @@ test_filestore_state() { test_filestore_verify() { test_filestore_state - test_expect_success "ipfs filestore verify --file-order' output looks good'" ' - ipfs filestore verify --file-order > verify_actual + test_expect_success "$IPFS_CMD filestore verify --file-order' output looks good'" ' + $IPFS_CMD filestore verify --file-order > verify_actual test_cmp verify_expect_file_order verify_actual ' - test_expect_success "'ipfs filestore verify HASH' works" ' - ipfs filestore verify $FILE1_HASH > verify_actual && + test_expect_success "'$IPFS_CMD filestore verify HASH' works" ' + $IPFS_CMD filestore verify $FILE1_HASH > verify_actual && grep -q somedir/file1 verify_actual ' @@ -119,11 +121,11 @@ test_filestore_verify() { ' test_expect_success "can not retrieve block after backing file moved" ' - test_must_fail ipfs cat $FILE1_HASH + test_must_fail $IPFS_CMD cat $FILE1_HASH ' - test_expect_success "'ipfs filestore verify' shows file as missing" ' - ipfs filestore verify > verify_actual && + test_expect_success "'$IPFS_CMD filestore verify' shows file as missing" ' + $IPFS_CMD filestore verify > verify_actual && grep no-file verify_actual | grep -q somedir/file1 ' @@ -132,7 +134,7 @@ test_filestore_verify() { ' test_expect_success "block okay now" ' - ipfs cat $FILE1_HASH > file1.data && + $IPFS_CMD cat $FILE1_HASH > file1.data && test_cmp somedir/file1 file1.data ' @@ -141,11 +143,11 @@ test_filestore_verify() { ' test_expect_success "can not retrieve block after backing file changed" ' - test_must_fail ipfs cat $FILE3_HASH + test_must_fail $IPFS_CMD cat $FILE3_HASH ' - test_expect_success "'ipfs filestore verify' shows file as changed" ' - ipfs filestore verify > verify_actual && + test_expect_success "'$IPFS_CMD filestore verify' shows file as changed" ' + $IPFS_CMD filestore verify > verify_actual && grep changed verify_actual | grep -q somedir/file3 ' @@ -157,9 +159,9 @@ test_filestore_dups() { # make sure the filestore is in a clean state test_filestore_state - test_expect_success "'ipfs filestore dups'" ' - ipfs add --raw-leaves somedir/file1 && - ipfs filestore dups > dups_actual && + test_expect_success "'$IPFS_CMD filestore dups'" ' + $IPFS_CMD add --raw-leaves somedir/file1 && + $IPFS_CMD filestore dups > dups_actual && echo "$FILE1_HASH" > dups_expect test_cmp dups_expect dups_actual ' @@ -195,4 +197,72 @@ test_filestore_dups test_kill_ipfs_daemon +## +## base32 +## + +EXPHASH="bafybeibva2uh4qpwjo2yr5g7m7nd5kfq64atydq77qdlrikh5uejwqdcbi" + +cat < ls_expect_file_order +bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 +bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 +bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 +bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 +bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 +bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 +EOF + +sort < ls_expect_file_order > ls_expect_key_order + +FILE1_HASH=bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq +FILE2_HASH=bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey +FILE3_HASH=bafybeih24zygzr2orr5q62mjnbgmjwgj6rx3tp74pwcqsqth44rloncllq + +cat < verify_expect_file_order +ok bafkreicj3ezgtrh3euw2gyub6w3jydhnouqobxt7stbgtns3mv3iwv6bqq 1000 somedir/file1 0 +ok bafkreibxwxisv4cld6x76ybqbvf2uwbkoswjqt4hut46af6rps2twme7ey 10000 somedir/file2 0 +ok bafkreidntk6ciin24oez6yjz4b25fgwecncvi4ua4uhr2tdyenogpzpid4 262144 somedir/file3 0 +ok bafkreidwie26yauqbhpd2nhhhmod55irq3z372mh6gw4ikl2ifo34c5jra 262144 somedir/file3 262144 +ok bafkreib7piyesy3dr22sawmycdftrmpyt3z4tmhxrdig2zt5zdp7qwbuay 262144 somedir/file3 524288 +ok bafkreigxp5k3k6b3i5sldu4r3im74nfxmoptuuubcvq6rg632nfznskglu 213568 somedir/file3 786432 +EOF + +sort < verify_expect_file_order > verify_expect_key_order + +IPFS_CMD="ipfs --cid-base=base32" + +# +# No daemon +# + +test_init + +test_filestore_adds + +test_filestore_verify + +test_filestore_dups + +# +# With daemon +# + +test_init + +# must be in offline mode so tests that retrieve non-existent blocks +# doesn't hang +test_launch_ipfs_daemon --offline + +test_filestore_adds + +test_filestore_verify + +test_filestore_dups + +test_kill_ipfs_daemon + +test_done + +## + test_done diff --git a/test/sharness/t0272-urlstore.sh b/test/sharness/t0272-urlstore.sh index e646569d5b36..15d0e7783ee4 100755 --- a/test/sharness/t0272-urlstore.sh +++ b/test/sharness/t0272-urlstore.sh @@ -51,6 +51,12 @@ test_expect_success "add files using gateway address via url store" ' ipfs pin add $HASH1 $HASH2 ' +test_expect_success "add files using gateway address via url store using --cid-base=base32" ' + HASH1b32=$(ipfs --cid-base=base32 urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH1a) && + HASH2b32=$(ipfs --cid-base=base32 urlstore add http://127.0.0.1:$GWAY_PORT/ipfs/$HASH2a) +' + + test_expect_success "make sure hashes are different" ' test $HASH1a != $HASH1 && test $HASH2a != $HASH2 @@ -154,4 +160,11 @@ test_expect_success "check that the hashes were correct" ' test $HASH3e = $HASH3 ' +test_expect_success "check that the base32 hashes were correct" ' + HASH1e32=$(ipfs cid base32 $HASH1e) + HASH2e32=$(ipfs cid base32 $HASH2e) + test $HASH1e32 = $HASH1b32 && + test $HASH2e32 = $HASH2b32 +' + test_done