From 189cf0cb03785f005f398f82cad2e66cf8fab028 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Sat, 5 Aug 2017 17:22:34 -0400 Subject: [PATCH 01/16] Test raw leaves in trickle dag tests. License: MIT Signed-off-by: Kevin Atkinson --- importer/trickle/trickle_test.go | 130 ++++++++++++++++++++++++------- importer/trickle/trickledag.go | 72 ++++++++++------- unixfs/mod/dagmodifier_test.go | 6 +- 3 files changed, 154 insertions(+), 54 deletions(-) diff --git a/importer/trickle/trickle_test.go b/importer/trickle/trickle_test.go index b375a8b0d78..8365f4a606b 100644 --- a/importer/trickle/trickle_test.go +++ b/importer/trickle/trickle_test.go @@ -19,10 +19,23 @@ import ( u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" ) -func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.ProtoNode, error) { +type UseRawLeaves bool + +const ( + ProtoBufLeaves UseRawLeaves = false + RawLeaves UseRawLeaves = true +) + +func runBothSubtests(t *testing.T, tfunc func(*testing.T, UseRawLeaves)) { + t.Run("leaves=ProtoBuf", func(t *testing.T) { tfunc(t, ProtoBufLeaves) }) + t.Run("leaves=Raw", func(t *testing.T) { tfunc(t, RawLeaves) }) +} + +func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter, rawLeaves UseRawLeaves) (*merkledag.ProtoNode, error) { dbp := h.DagBuilderParams{ - Dagserv: ds, - Maxlinks: h.DefaultLinksPerBlock, + Dagserv: ds, + Maxlinks: h.DefaultLinksPerBlock, + RawLeaves: bool(rawLeaves), } nd, err := TrickleLayout(dbp.New(spl)) @@ -35,22 +48,31 @@ func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.Proto return nil, merkledag.ErrNotProtobuf } - return pbnd, VerifyTrickleDagStructure(pbnd, ds, dbp.Maxlinks, layerRepeat) + return pbnd, VerifyTrickleDagStructure(pbnd, VerifyParams{ + Getter: ds, + Direct: dbp.Maxlinks, + LayerRepeat: layerRepeat, + RawLeaves: bool(rawLeaves), + }) } //Test where calls to read are smaller than the chunk size func TestSizeBasedSplit(t *testing.T) { + runBothSubtests(t, testSizeBasedSplit) +} + +func testSizeBasedSplit(t *testing.T, rawLeaves UseRawLeaves) { if testing.Short() { t.SkipNow() } bs := chunk.SizeSplitterGen(512) - testFileConsistency(t, bs, 32*512) + testFileConsistency(t, bs, 32*512, rawLeaves) bs = chunk.SizeSplitterGen(4096) - testFileConsistency(t, bs, 32*4096) + testFileConsistency(t, bs, 32*4096, rawLeaves) // Uneven offset - testFileConsistency(t, bs, 31*4095) + testFileConsistency(t, bs, 31*4095, rawLeaves) } func dup(b []byte) []byte { @@ -59,13 +81,13 @@ func dup(b []byte) []byte { return o } -func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int) { +func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int, rawLeaves UseRawLeaves) { should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, bs(read)) + nd, err := buildTestDag(ds, bs(read), rawLeaves) if err != nil { t.Fatal(err) } @@ -87,12 +109,16 @@ func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int) { } func TestBuilderConsistency(t *testing.T) { + runBothSubtests(t, testBuilderConsistency) +} + +func testBuilderConsistency(t *testing.T, rawLeaves UseRawLeaves) { nbytes := 100000 buf := new(bytes.Buffer) io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes)) should := dup(buf.Bytes()) dagserv := mdtest.Mock() - nd, err := buildTestDag(dagserv, chunk.DefaultSplitter(buf)) + nd, err := buildTestDag(dagserv, chunk.DefaultSplitter(buf), rawLeaves) if err != nil { t.Fatal(err) } @@ -125,6 +151,10 @@ func arrComp(a, b []byte) error { } func TestIndirectBlocks(t *testing.T) { + runBothSubtests(t, testIndirectBlocks) +} + +func testIndirectBlocks(t *testing.T, rawLeaves UseRawLeaves) { splitter := chunk.SizeSplitterGen(512) nbytes := 1024 * 1024 buf := make([]byte, nbytes) @@ -133,7 +163,7 @@ func TestIndirectBlocks(t *testing.T) { read := bytes.NewReader(buf) ds := mdtest.Mock() - dag, err := buildTestDag(ds, splitter(read)) + dag, err := buildTestDag(ds, splitter(read), rawLeaves) if err != nil { t.Fatal(err) } @@ -154,13 +184,17 @@ func TestIndirectBlocks(t *testing.T) { } func TestSeekingBasic(t *testing.T) { + runBothSubtests(t, testSeekingBasic) +} + +func testSeekingBasic(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(10 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 512)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 512), rawLeaves) if err != nil { t.Fatal(err) } @@ -191,13 +225,17 @@ func TestSeekingBasic(t *testing.T) { } func TestSeekToBegin(t *testing.T) { + runBothSubtests(t, testSeekToBegin) +} + +func testSeekToBegin(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(10 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves) if err != nil { t.Fatal(err) } @@ -235,13 +273,17 @@ func TestSeekToBegin(t *testing.T) { } func TestSeekToAlmostBegin(t *testing.T) { + runBothSubtests(t, testSeekToAlmostBegin) +} + +func testSeekToAlmostBegin(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(10 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves) if err != nil { t.Fatal(err) } @@ -279,13 +321,17 @@ func TestSeekToAlmostBegin(t *testing.T) { } func TestSeekEnd(t *testing.T) { + runBothSubtests(t, testSeekEnd) +} + +func testSeekEnd(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(50 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves) if err != nil { t.Fatal(err) } @@ -305,13 +351,17 @@ func TestSeekEnd(t *testing.T) { } func TestSeekEndSingleBlockFile(t *testing.T) { + runBothSubtests(t, testSeekEndSingleBlockFile) +} + +func testSeekEndSingleBlockFile(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(100) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 5000)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 5000), rawLeaves) if err != nil { t.Fatal(err) } @@ -331,13 +381,17 @@ func TestSeekEndSingleBlockFile(t *testing.T) { } func TestSeekingStress(t *testing.T) { + runBothSubtests(t, testSeekingStress) +} + +func testSeekingStress(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(1024 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 1000)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 1000), rawLeaves) if err != nil { t.Fatal(err) } @@ -376,13 +430,17 @@ func TestSeekingStress(t *testing.T) { } func TestSeekingConsistency(t *testing.T) { + runBothSubtests(t, testSeekingConsistency) +} + +func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(128 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) read := bytes.NewReader(should) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves) if err != nil { t.Fatal(err) } @@ -419,6 +477,10 @@ func TestSeekingConsistency(t *testing.T) { } func TestAppend(t *testing.T) { + runBothSubtests(t, testAppend) +} + +func testAppend(t *testing.T, rawLeaves UseRawLeaves) { nbytes := int64(128 * 1024) should := make([]byte, nbytes) u.NewTimeSeededRand().Read(should) @@ -426,14 +488,15 @@ func TestAppend(t *testing.T) { // Reader for half the bytes read := bytes.NewReader(should[:nbytes/2]) ds := mdtest.Mock() - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves) if err != nil { t.Fatal(err) } dbp := &h.DagBuilderParams{ - Dagserv: ds, - Maxlinks: h.DefaultLinksPerBlock, + Dagserv: ds, + Maxlinks: h.DefaultLinksPerBlock, + RawLeaves: bool(rawLeaves), } r := bytes.NewReader(should[nbytes/2:]) @@ -444,7 +507,12 @@ func TestAppend(t *testing.T) { t.Fatal(err) } - err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat) + err = VerifyTrickleDagStructure(nnode, VerifyParams{ + Getter: ds, + Direct: dbp.Maxlinks, + LayerRepeat: layerRepeat, + RawLeaves: bool(rawLeaves), + }) if err != nil { t.Fatal(err) } @@ -467,6 +535,10 @@ func TestAppend(t *testing.T) { // This test appends one byte at a time to an empty file func TestMultipleAppends(t *testing.T) { + runBothSubtests(t, testMultipleAppends) +} + +func testMultipleAppends(t *testing.T, rawLeaves UseRawLeaves) { ds := mdtest.Mock() // TODO: fix small size appends and make this number bigger @@ -475,14 +547,15 @@ func TestMultipleAppends(t *testing.T) { u.NewTimeSeededRand().Read(should) read := bytes.NewReader(nil) - nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500)) + nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves) if err != nil { t.Fatal(err) } dbp := &h.DagBuilderParams{ - Dagserv: ds, - Maxlinks: 4, + Dagserv: ds, + Maxlinks: 4, + RawLeaves: bool(rawLeaves), } spl := chunk.SizeSplitterGen(500) @@ -495,7 +568,12 @@ func TestMultipleAppends(t *testing.T) { t.Fatal(err) } - err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat) + err = VerifyTrickleDagStructure(nnode, VerifyParams{ + Getter: ds, + Direct: dbp.Maxlinks, + LayerRepeat: layerRepeat, + RawLeaves: bool(rawLeaves), + }) if err != nil { t.Fatal(err) } diff --git a/importer/trickle/trickledag.go b/importer/trickle/trickledag.go index 374863d2e28..32f86a2fa99 100644 --- a/importer/trickle/trickledag.go +++ b/importer/trickle/trickledag.go @@ -234,34 +234,57 @@ func trickleDepthInfo(node *h.UnixfsNode, maxlinks int) (int, int) { return ((n - maxlinks) / layerRepeat) + 1, (n - maxlinks) % layerRepeat } +// VerifyParams is used by VerifyTrickleDagStructure +type VerifyParams struct { + Getter node.NodeGetter + Direct int + LayerRepeat int + RawLeaves bool +} + // VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure // layout -func VerifyTrickleDagStructure(nd node.Node, ds dag.DAGService, direct int, layerRepeat int) error { - pbnd, ok := nd.(*dag.ProtoNode) - if !ok { - return dag.ErrNotProtobuf - } - - return verifyTDagRec(pbnd, -1, direct, layerRepeat, ds) +func VerifyTrickleDagStructure(nd node.Node, p VerifyParams) error { + return verifyTDagRec(nd, -1, p) } // Recursive call for verifying the structure of a trickledag -func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAGService) error { +func verifyTDagRec(n node.Node, depth int, p VerifyParams) error { if depth == 0 { - // zero depth dag is raw data block - if len(nd.Links()) > 0 { + if len(n.Links()) > 0 { return errors.New("expected direct block") } + // zero depth dag is raw data block + switch nd := n.(type) { + case *dag.ProtoNode: + pbn, err := ft.FromBytes(nd.Data()) + if err != nil { + return err + } - pbn, err := ft.FromBytes(nd.Data()) - if err != nil { - return err - } + if pbn.GetType() != ft.TRaw { + return errors.New("Expected raw block") + } + + if p.RawLeaves { + return errors.New("expected raw leaf, got a protobuf node") + } + + return nil + case *dag.RawNode: + if !p.RawLeaves { + return errors.New("expected protobuf node as leaf") + } - if pbn.GetType() != ft.TRaw { - return errors.New("Expected raw block") + return nil + default: + return errors.New("expected ProtoNode or RawNode") } - return nil + } + + nd, ok := n.(*dag.ProtoNode) + if !ok { + return errors.New("expected ProtoNode") } // Verify this is a branch node @@ -279,29 +302,24 @@ func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAG } for i := 0; i < len(nd.Links()); i++ { - childi, err := nd.Links()[i].GetNode(context.TODO(), ds) + child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter) if err != nil { return err } - childpb, ok := childi.(*dag.ProtoNode) - if !ok { - return fmt.Errorf("cannot operate on non-protobuf nodes") - } - - if i < direct { + if i < p.Direct { // Direct blocks - err := verifyTDagRec(childpb, 0, direct, layerRepeat, ds) + err := verifyTDagRec(child, 0, p) if err != nil { return err } } else { // Recursive trickle dags - rdepth := ((i - direct) / layerRepeat) + 1 + rdepth := ((i - p.Direct) / p.LayerRepeat) + 1 if rdepth >= depth && depth > 0 { return errors.New("Child dag was too deep!") } - err := verifyTDagRec(childpb, rdepth, direct, layerRepeat, ds) + err := verifyTDagRec(child, rdepth, p) if err != nil { return err } diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index 314178dd5eb..a79436b8d5e 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -41,7 +41,11 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) t.Fatal(err) } - err = trickle.VerifyTrickleDagStructure(nd, dm.dagserv, h.DefaultLinksPerBlock, 4) + err = trickle.VerifyTrickleDagStructure(nd, trickle.VerifyParams{ + Getter: dm.dagserv, + Direct: h.DefaultLinksPerBlock, + LayerRepeat: 4, + }) if err != nil { t.Fatal(err) } From 7302c3ab66127aa7ef26cae3d4fe6e7caf8b027a Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Tue, 8 Aug 2017 01:56:49 -0400 Subject: [PATCH 02/16] Provide support for raw leaves in DAG modifier. License: MIT Signed-off-by: Kevin Atkinson --- importer/helpers/dagbuilder.go | 6 +- unixfs/io/dagreader_test.go | 12 +- unixfs/mod/dagmodifier.go | 222 ++++++++++++++++++--------------- unixfs/mod/dagmodifier_test.go | 112 +++++++++++++---- unixfs/test/utils.go | 29 +++-- 5 files changed, 238 insertions(+), 143 deletions(-) diff --git a/importer/helpers/dagbuilder.go b/importer/helpers/dagbuilder.go index 9ec93bd6fdf..23a27e0cb2a 100644 --- a/importer/helpers/dagbuilder.go +++ b/importer/helpers/dagbuilder.go @@ -120,8 +120,8 @@ func (db *DagBuilderHelper) NewUnixfsNode() *UnixfsNode { return n } -// NewUnixfsBlock creates a new Unixfs node to represent a raw data block -func (db *DagBuilderHelper) NewUnixfsBlock() *UnixfsNode { +// newUnixfsBlock creates a new Unixfs node to represent a raw data block +func (db *DagBuilderHelper) newUnixfsBlock() *UnixfsNode { n := &UnixfsNode{ node: new(dag.ProtoNode), ufmt: &ft.FSNode{Type: ft.TRaw}, @@ -181,7 +181,7 @@ func (db *DagBuilderHelper) GetNextDataNode() (*UnixfsNode, error) { }, nil } } else { - blk := db.NewUnixfsBlock() + blk := db.newUnixfsBlock() blk.SetData(data) return blk, nil } diff --git a/unixfs/io/dagreader_test.go b/unixfs/io/dagreader_test.go index 3ac82fc5fae..85c805e9ced 100644 --- a/unixfs/io/dagreader_test.go +++ b/unixfs/io/dagreader_test.go @@ -17,7 +17,7 @@ import ( func TestBasicRead(t *testing.T) { dserv := testu.GetDAGServ() - inbuf, node := testu.GetRandomNode(t, dserv, 1024) + inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.ProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() @@ -44,7 +44,7 @@ func TestSeekAndRead(t *testing.T) { inbuf[i] = byte(i) } - node := testu.GetNode(t, dserv, inbuf) + node := testu.GetNode(t, dserv, inbuf, testu.ProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() @@ -84,7 +84,7 @@ func TestRelativeSeek(t *testing.T) { } inbuf[1023] = 1 // force the reader to be 1024 bytes - node := testu.GetNode(t, dserv, inbuf) + node := testu.GetNode(t, dserv, inbuf, testu.ProtoBufLeaves) reader, err := NewDagReader(ctx, node, dserv) if err != nil { @@ -160,7 +160,7 @@ func TestBadPBData(t *testing.T) { func TestMetadataNode(t *testing.T) { dserv := testu.GetDAGServ() - rdata, rnode := testu.GetRandomNode(t, dserv, 512) + rdata, rnode := testu.GetRandomNode(t, dserv, 512, testu.ProtoBufLeaves) _, err := dserv.Add(rnode) if err != nil { t.Fatal(err) @@ -203,7 +203,7 @@ func TestMetadataNode(t *testing.T) { func TestWriteTo(t *testing.T) { dserv := testu.GetDAGServ() - inbuf, node := testu.GetRandomNode(t, dserv, 1024) + inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.ProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() @@ -225,7 +225,7 @@ func TestWriteTo(t *testing.T) { func TestReaderSzie(t *testing.T) { dserv := testu.GetDAGServ() size := int64(1024) - _, node := testu.GetRandomNode(t, dserv, size) + _, node := testu.GetRandomNode(t, dserv, size, testu.ProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index e3955e20c0b..5eaad4779df 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -40,6 +40,8 @@ type DagModifier struct { curWrOff uint64 wrBuf *bytes.Buffer + RawLeaves bool + read uio.DagReader } @@ -113,17 +115,7 @@ func (dm *DagModifier) expandSparse(size int64) error { return err } _, err = dm.dagserv.Add(nnode) - if err != nil { - return err - } - - pbnnode, ok := nnode.(*mdag.ProtoNode) - if !ok { - return mdag.ErrNotProtobuf - } - - dm.curNode = pbnnode - return nil + return err } // Write continues writing to the dag at the current offset @@ -149,26 +141,28 @@ func (dm *DagModifier) Write(b []byte) (int, error) { return n, nil } -var ErrNoRawYet = fmt.Errorf("currently only fully support protonodes in the dagmodifier") - // Size returns the Filesize of the node func (dm *DagModifier) Size() (int64, error) { - switch nd := dm.curNode.(type) { + fileSize, err := fileSize(dm.curNode) + if err != nil { + return 0, err + } + if dm.wrBuf != nil && int64(dm.wrBuf.Len())+int64(dm.writeStart) > int64(fileSize) { + return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil + } + return int64(fileSize), nil +} + +func fileSize(n node.Node) (uint64, error) { + switch nd := n.(type) { case *mdag.ProtoNode: - pbn, err := ft.FromBytes(nd.Data()) + f, err := ft.FromBytes(nd.Data()) if err != nil { return 0, err } - if dm.wrBuf != nil && uint64(dm.wrBuf.Len())+dm.writeStart > pbn.GetFilesize() { - return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil - } - return int64(pbn.GetFilesize()), nil + return f.GetFilesize(), nil case *mdag.RawNode: - if dm.wrBuf != nil { - return 0, ErrNoRawYet - } - sz, err := nd.Size() - return int64(sz), err + return uint64(len(nd.RawData())), nil default: return 0, ErrNotUnixfs } @@ -196,36 +190,22 @@ func (dm *DagModifier) Sync() error { return err } - nd, err := dm.dagserv.Get(dm.ctx, thisc) + dm.curNode, err = dm.dagserv.Get(dm.ctx, thisc) if err != nil { return err } - pbnd, ok := nd.(*mdag.ProtoNode) - if !ok { - return mdag.ErrNotProtobuf - } - - dm.curNode = pbnd - // need to write past end of current dag if !done { - nd, err := dm.appendData(dm.curNode, dm.splitter(dm.wrBuf)) + dm.curNode, err = dm.appendData(dm.curNode, dm.splitter(dm.wrBuf)) if err != nil { return err } - _, err = dm.dagserv.Add(nd) + _, err = dm.dagserv.Add(dm.curNode) if err != nil { return err } - - pbnode, ok := nd.(*mdag.ProtoNode) - if !ok { - return mdag.ErrNotProtobuf - } - - dm.curNode = pbnode } dm.writeStart += uint64(buflen) @@ -238,43 +218,86 @@ func (dm *DagModifier) Sync() error { // returns the new key of the passed in node and whether or not all the data in the reader // has been consumed. func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) { - node, ok := n.(*mdag.ProtoNode) - if !ok { - return nil, false, ErrNoRawYet - } + // If we've reached a leaf node. + if len(n.Links()) == 0 { + switch nd0 := n.(type) { + case *mdag.ProtoNode: + f, err := ft.FromBytes(nd0.Data()) + if err != nil { + return nil, false, err + } - f, err := ft.FromBytes(node.Data()) - if err != nil { - return nil, false, err - } + n, err := data.Read(f.Data[offset:]) + if err != nil && err != io.EOF { + return nil, false, err + } - // If we've reached a leaf node. - if len(node.Links()) == 0 { - n, err := data.Read(f.Data[offset:]) - if err != nil && err != io.EOF { - return nil, false, err - } + // Update newly written node.. + b, err := proto.Marshal(f) + if err != nil { + return nil, false, err + } - // Update newly written node.. - b, err := proto.Marshal(f) - if err != nil { - return nil, false, err - } + nd := new(mdag.ProtoNode) + nd.SetData(b) + k, err := dm.dagserv.Add(nd) + if err != nil { + return nil, false, err + } - nd := new(mdag.ProtoNode) - nd.SetData(b) - k, err := dm.dagserv.Add(nd) - if err != nil { - return nil, false, err - } + // Hey look! we're done! + var done bool + if n < len(f.Data[offset:]) { + done = true + } + + return k, done, nil + case *mdag.RawNode: + origData := nd0.RawData() + bytes := make([]byte, len(origData)) - // Hey look! we're done! - var done bool - if n < len(f.Data[offset:]) { - done = true + // copy orig data up to offset + copy(bytes, origData[:offset]) + + // copy in new data + n, err := data.Read(bytes[offset:]) + if err != nil && err != io.EOF { + return nil, false, err + } + + // copy remaining data + offsetPlusN := int(offset) + n + if offsetPlusN < len(origData) { + copy(bytes[offsetPlusN:], origData[offsetPlusN:]) + } + + nd, err := mdag.NewRawNodeWPrefix(bytes, nd0.Cid().Prefix()) + if err != nil { + return nil, false, err + } + k, err := dm.dagserv.Add(nd) + if err != nil { + return nil, false, err + } + + // Hey look! we're done! + var done bool + if n < len(bytes[offset:]) { + done = true + } + + return k, done, nil } + } - return k, done, nil + node, ok := n.(*mdag.ProtoNode) + if !ok { + return nil, false, ErrNotUnixfs + } + + f, err := ft.FromBytes(node.Data()) + if err != nil { + return nil, false, err } var cur uint64 @@ -287,12 +310,7 @@ func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*c return nil, false, err } - childpb, ok := child.(*mdag.ProtoNode) - if !ok { - return nil, false, mdag.ErrNotProtobuf - } - - k, sdone, err := dm.modifyDag(childpb, offset-cur, data) + k, sdone, err := dm.modifyDag(child, offset-cur, data) if err != nil { return nil, false, err } @@ -323,14 +341,13 @@ func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*c // appendData appends the blocks from the given chan to the end of this dag func (dm *DagModifier) appendData(nd node.Node, spl chunk.Splitter) (node.Node, error) { switch nd := nd.(type) { - case *mdag.ProtoNode: + case *mdag.ProtoNode, *mdag.RawNode: dbp := &help.DagBuilderParams{ - Dagserv: dm.dagserv, - Maxlinks: help.DefaultLinksPerBlock, + Dagserv: dm.dagserv, + Maxlinks: help.DefaultLinksPerBlock, + RawLeaves: dm.RawLeaves, } return trickle.TrickleAppend(dm.ctx, nd, dbp.New(spl)) - case *mdag.RawNode: - return nil, fmt.Errorf("appending to raw node types not yet supported") default: return nil, ErrNotUnixfs } @@ -478,26 +495,30 @@ func (dm *DagModifier) Truncate(size int64) error { } // dagTruncate truncates the given node to 'size' and returns the modified Node -func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) { - nd, ok := n.(*mdag.ProtoNode) - if !ok { - return nil, ErrNoRawYet - } - - if len(nd.Links()) == 0 { - // TODO: this can likely be done without marshaling and remarshaling - pbn, err := ft.FromBytes(nd.Data()) - if err != nil { - return nil, err +func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGService) (node.Node, error) { + if len(n.Links()) == 0 { + switch nd := n.(type) { + case *mdag.ProtoNode: + // TODO: this can likely be done without marshaling and remarshaling + pbn, err := ft.FromBytes(nd.Data()) + if err != nil { + return nil, err + } + nd.SetData(ft.WrapData(pbn.Data[:size])) + return nd, nil + case *mdag.RawNode: + return mdag.NewRawNodeWPrefix(nd.RawData()[:size], nd.Cid().Prefix()) } + } - nd.SetData(ft.WrapData(pbn.Data[:size])) - return nd, nil + nd, ok := n.(*mdag.ProtoNode) + if !ok { + return nil, ErrNotUnixfs } var cur uint64 end := 0 - var modified *mdag.ProtoNode + var modified node.Node ndata := new(ft.FSNode) for i, lnk := range nd.Links() { child, err := lnk.GetNode(ctx, ds) @@ -505,19 +526,14 @@ func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGServi return nil, err } - childpb, ok := child.(*mdag.ProtoNode) - if !ok { - return nil, err - } - - childsize, err := ft.DataSize(childpb.Data()) + childsize, err := fileSize(child) if err != nil { return nil, err } // found the child we want to cut if size < cur+childsize { - nchild, err := dagTruncate(ctx, childpb, size-cur, ds) + nchild, err := dagTruncate(ctx, child, size-cur, ds) if err != nil { return nil, err } diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index a79436b8d5e..7b15b8532dd 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -9,15 +9,14 @@ import ( h "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" - mdag "github.com/ipfs/go-ipfs/merkledag" - ft "github.com/ipfs/go-ipfs/unixfs" + uio "github.com/ipfs/go-ipfs/unixfs/io" testu "github.com/ipfs/go-ipfs/unixfs/test" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" ) -func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte { +func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, rawLeaves testu.UseRawLeaves) []byte { newdata := make([]byte, size) r := u.NewTimeSeededRand() r.Read(newdata) @@ -45,9 +44,10 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) Getter: dm.dagserv, Direct: h.DefaultLinksPerBlock, LayerRepeat: 4, + RawLeaves: bool(rawLeaves), }) if err != nil { - t.Fatal(err) + t.Error(err) } rd, err := uio.NewDagReader(context.Background(), nd, dm.dagserv) @@ -67,9 +67,17 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) return orig } +func runBothSubtests(t *testing.T, tfunc func(*testing.T, testu.UseRawLeaves)) { + t.Run("leaves=ProtoBuf", func(t *testing.T) { tfunc(t, testu.ProtoBufLeaves) }) + t.Run("leaves=Raw", func(t *testing.T) { tfunc(t, testu.RawLeaves) }) +} + func TestDagModifierBasic(t *testing.T) { + runBothSubtests(t, testDagModifierBasic) +} +func testDagModifierBasic(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - b, n := testu.GetRandomNode(t, dserv, 50000) + b, n := testu.GetRandomNode(t, dserv, 50000, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -77,32 +85,33 @@ func TestDagModifierBasic(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) // Within zero block beg := uint64(15) length := uint64(60) t.Log("Testing mod within zero block") - b = testModWrite(t, beg, length, b, dagmod) + b = testModWrite(t, beg, length, b, dagmod, rawLeaves) // Within bounds of existing file beg = 1000 length = 4000 t.Log("Testing mod within bounds of existing multiblock file.") - b = testModWrite(t, beg, length, b, dagmod) + b = testModWrite(t, beg, length, b, dagmod, rawLeaves) // Extend bounds beg = 49500 length = 4000 t.Log("Testing mod that extends file.") - b = testModWrite(t, beg, length, b, dagmod) + b = testModWrite(t, beg, length, b, dagmod, rawLeaves) // "Append" beg = uint64(len(b)) length = 3000 t.Log("Testing pure append") - _ = testModWrite(t, beg, length, b, dagmod) + _ = testModWrite(t, beg, length, b, dagmod, rawLeaves) // Verify reported length node, err := dagmod.GetNode() @@ -110,7 +119,7 @@ func TestDagModifierBasic(t *testing.T) { t.Fatal(err) } - size, err := ft.DataSize(node.(*mdag.ProtoNode).Data()) + size, err := fileSize(node) if err != nil { t.Fatal(err) } @@ -122,8 +131,11 @@ func TestDagModifierBasic(t *testing.T) { } func TestMultiWrite(t *testing.T) { + runBothSubtests(t, testMultiWrite) +} +func testMultiWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -132,6 +144,7 @@ func TestMultiWrite(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) data := make([]byte, 4000) u.NewTimeSeededRand().Read(data) @@ -175,8 +188,11 @@ func TestMultiWrite(t *testing.T) { } func TestMultiWriteAndFlush(t *testing.T) { + runBothSubtests(t, testMultiWriteAndFlush) +} +func testMultiWriteAndFlush(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -185,6 +201,7 @@ func TestMultiWriteAndFlush(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) data := make([]byte, 20) u.NewTimeSeededRand().Read(data) @@ -223,8 +240,11 @@ func TestMultiWriteAndFlush(t *testing.T) { } func TestWriteNewFile(t *testing.T) { + runBothSubtests(t, testWriteNewFile) +} +func testWriteNewFile(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -233,6 +253,7 @@ func TestWriteNewFile(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) towrite := make([]byte, 2000) u.NewTimeSeededRand().Read(towrite) @@ -266,8 +287,11 @@ func TestWriteNewFile(t *testing.T) { } func TestMultiWriteCoal(t *testing.T) { + runBothSubtests(t, testMultiWriteCoal) +} +func testMultiWriteCoal(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -276,6 +300,7 @@ func TestMultiWriteCoal(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) data := make([]byte, 1000) u.NewTimeSeededRand().Read(data) @@ -300,6 +325,8 @@ func TestMultiWriteCoal(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) + rbuf, err := ioutil.ReadAll(read) if err != nil { t.Fatal(err) @@ -312,8 +339,11 @@ func TestMultiWriteCoal(t *testing.T) { } func TestLargeWriteChunks(t *testing.T) { + runBothSubtests(t, testLargeWriteChunks) +} +func testLargeWriteChunks(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -322,6 +352,7 @@ func TestLargeWriteChunks(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) wrsize := 1000 datasize := 10000000 @@ -351,8 +382,11 @@ func TestLargeWriteChunks(t *testing.T) { } func TestDagTruncate(t *testing.T) { + runBothSubtests(t, testDagTruncate) +} +func testDagTruncate(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - b, n := testu.GetRandomNode(t, dserv, 50000) + b, n := testu.GetRandomNode(t, dserv, 50000, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -360,6 +394,7 @@ func TestDagTruncate(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) err = dagmod.Truncate(12345) if err != nil { @@ -418,8 +453,11 @@ func TestDagTruncate(t *testing.T) { } func TestSparseWrite(t *testing.T) { + runBothSubtests(t, testSparseWrite) +} +func testSparseWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -427,6 +465,7 @@ func TestSparseWrite(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) buf := make([]byte, 5000) u.NewTimeSeededRand().Read(buf[2500:]) @@ -456,8 +495,11 @@ func TestSparseWrite(t *testing.T) { } func TestSeekPastEndWrite(t *testing.T) { + runBothSubtests(t, testSeekPastEndWrite) +} +func testSeekPastEndWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -465,6 +507,7 @@ func TestSeekPastEndWrite(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) buf := make([]byte, 5000) u.NewTimeSeededRand().Read(buf[2500:]) @@ -503,8 +546,11 @@ func TestSeekPastEndWrite(t *testing.T) { } func TestRelativeSeek(t *testing.T) { + runBothSubtests(t, testRelativeSeek) +} +func testRelativeSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -512,6 +558,7 @@ func TestRelativeSeek(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) for i := 0; i < 64; i++ { dagmod.Write([]byte{byte(i)}) @@ -533,8 +580,11 @@ func TestRelativeSeek(t *testing.T) { } func TestInvalidSeek(t *testing.T) { + runBothSubtests(t, testInvalidSeek) +} +func testInvalidSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -542,6 +592,8 @@ func TestInvalidSeek(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) + _, err = dagmod.Seek(10, -10) if err != ErrUnrecognizedWhence { @@ -550,9 +602,12 @@ func TestInvalidSeek(t *testing.T) { } func TestEndSeek(t *testing.T) { + runBothSubtests(t, testEndSeek) +} +func testEndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -560,6 +615,7 @@ func TestEndSeek(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) _, err = dagmod.Write(make([]byte, 100)) if err != nil { @@ -592,9 +648,12 @@ func TestEndSeek(t *testing.T) { } func TestReadAndSeek(t *testing.T) { + runBothSubtests(t, testReadAndSeek) +} +func testReadAndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -602,6 +661,7 @@ func TestReadAndSeek(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) writeBuf := []byte{0, 1, 2, 3, 4, 5, 6, 7} dagmod.Write(writeBuf) @@ -660,9 +720,12 @@ func TestReadAndSeek(t *testing.T) { } func TestCtxRead(t *testing.T) { + runBothSubtests(t, testCtxRead) +} +func testCtxRead(t *testing.T, rawLeaves testu.UseRawLeaves) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv) + n := testu.GetEmptyNode(t, dserv, rawLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -670,6 +733,7 @@ func TestCtxRead(t *testing.T) { if err != nil { t.Fatal(err) } + dagmod.RawLeaves = bool(rawLeaves) _, err = dagmod.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7}) if err != nil { @@ -693,7 +757,7 @@ func TestCtxRead(t *testing.T) { func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(b, dserv) + n := testu.GetEmptyNode(b, dserv, testu.ProtoBufLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/unixfs/test/utils.go b/unixfs/test/utils.go index c0b8ae18d08..933493f3678 100644 --- a/unixfs/test/utils.go +++ b/unixfs/test/utils.go @@ -8,8 +8,9 @@ import ( "io/ioutil" "testing" - imp "github.com/ipfs/go-ipfs/importer" "github.com/ipfs/go-ipfs/importer/chunk" + h "github.com/ipfs/go-ipfs/importer/helpers" + trickle "github.com/ipfs/go-ipfs/importer/trickle" mdag "github.com/ipfs/go-ipfs/merkledag" mdagmock "github.com/ipfs/go-ipfs/merkledag/test" ft "github.com/ipfs/go-ipfs/unixfs" @@ -28,9 +29,23 @@ func GetDAGServ() mdag.DAGService { return mdagmock.Mock() } -func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) node.Node { +type UseRawLeaves bool + +const ( + ProtoBufLeaves UseRawLeaves = false + RawLeaves UseRawLeaves = true +) + +func GetNode(t testing.TB, dserv mdag.DAGService, data []byte, rawLeaves UseRawLeaves) node.Node { in := bytes.NewReader(data) - node, err := imp.BuildTrickleDagFromReader(dserv, SizeSplitterGen(500)(in)) + + dbp := h.DagBuilderParams{ + Dagserv: dserv, + Maxlinks: h.DefaultLinksPerBlock, + RawLeaves: bool(rawLeaves), + } + + node, err := trickle.TrickleLayout(dbp.New(SizeSplitterGen(500)(in))) if err != nil { t.Fatal(err) } @@ -38,18 +53,18 @@ func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) node.Node { return node } -func GetEmptyNode(t testing.TB, dserv mdag.DAGService) node.Node { - return GetNode(t, dserv, []byte{}) +func GetEmptyNode(t testing.TB, dserv mdag.DAGService, rawLeaves UseRawLeaves) node.Node { + return GetNode(t, dserv, []byte{}, rawLeaves) } -func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, node.Node) { +func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64, rawLeaves UseRawLeaves) ([]byte, node.Node) { in := io.LimitReader(u.NewTimeSeededRand(), size) buf, err := ioutil.ReadAll(in) if err != nil { t.Fatal(err) } - node := GetNode(t, dserv, buf) + node := GetNode(t, dserv, buf, rawLeaves) return buf, node } From a3bd2c23b3b84aaf3090457021f04efeed3cca70 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Tue, 8 Aug 2017 14:33:47 -0400 Subject: [PATCH 03/16] Add "--raw-leaves" option to "ipfs files" License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 3 + mfs/file.go | 3 + test/sharness/t0250-files-api.sh | 396 ++++++++++++++++--------------- 3 files changed, 213 insertions(+), 189 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index 8b2a53f7ef7..f159ea88ad8 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -43,6 +43,7 @@ operations. }, Options: []cmds.Option{ cmds.BoolOption("f", "flush", "Flush target and ancestors after write.").Default(true), + cmds.BoolOption("raw-leaves", "Use raw blocks for newly created leaf nodes. (experimental)"), }, Subcommands: map[string]*cmds.Command{ "read": FilesReadCmd, @@ -598,6 +599,7 @@ stat' on the file or any of its ancestors. create, _, _ := req.Option("create").Bool() trunc, _, _ := req.Option("truncate").Bool() flush, _, _ := req.Option("flush").Bool() + rawLeaves, _, _ := req.Option("raw-leaves").Bool() nd, err := req.InvocContext().GetNode() if err != nil { @@ -620,6 +622,7 @@ stat' on the file or any of its ancestors. res.SetError(err, cmds.ErrNormal) return } + fi.RawLeaves = rawLeaves wfd, err := fi.Open(mfs.OpenWriteOnly, flush) if err != nil { diff --git a/mfs/file.go b/mfs/file.go index 6e249e3294b..85c9e59bc25 100644 --- a/mfs/file.go +++ b/mfs/file.go @@ -23,6 +23,8 @@ type File struct { dserv dag.DAGService node node.Node nodelk sync.Mutex + + RawLeaves bool } // NewFile returns a NewFile object with the given parameters @@ -79,6 +81,7 @@ func (fi *File) Open(flags int, sync bool) (FileDescriptor, error) { if err != nil { return nil, err } + dmod.RawLeaves = fi.RawLeaves return &fileDescriptor{ inode: fi, diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index 283a4898559..be993b9ee53 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -89,114 +89,116 @@ test_sharding() { } test_files_api() { - ROOT_HASH=$1 + local EXTRA ARGS + EXTRA=$1 + ARGS=$2 - test_expect_success "can mkdir in root" ' - ipfs files mkdir /cats + test_expect_success "can mkdir in root $EXTRA" ' + ipfs files $ARGS mkdir /cats ' - test_expect_success "'files ls' lists root by default" ' - ipfs files ls >actual && + test_expect_success "'files ls' lists root by default $EXTRA" ' + ipfs files $ARGS ls >actual && echo "cats" >expected && test_cmp expected actual ' - test_expect_success "directory was created" ' + test_expect_success "directory was created $EXTRA" ' verify_path_exists /cats ' - test_expect_success "directory is empty" ' + test_expect_success "directory is empty $EXTRA" ' verify_dir_contents /cats ' # we do verification of stat formatting now as we depend on it - test_expect_success "stat works" ' - ipfs files stat / >stat + test_expect_success "stat works $EXTRA" ' + ipfs files $ARGS stat / >stat ' - test_expect_success "hash is first line of stat" ' + test_expect_success "hash is first line of stat $EXTRA" ' ipfs ls $(head -1 stat) | grep "cats" ' - test_expect_success "stat --hash gives only hash" ' - ipfs files stat --hash / >actual && + test_expect_success "stat --hash gives only hash $EXTRA" ' + ipfs files $ARGS stat --hash / >actual && head -n1 stat >expected && test_cmp expected actual ' - test_expect_success "stat with multiple format options should fail" ' - test_must_fail ipfs files stat --hash --size / + test_expect_success "stat with multiple format options should fail $EXTRA" ' + test_must_fail ipfs files $ARGS stat --hash --size / ' - test_expect_success "compare hash option with format" ' - ipfs files stat --hash / >expected && - ipfs files stat --format='"'"''"'"' / >actual && + test_expect_success "compare hash option with format $EXTRA" ' + ipfs files $ARGS stat --hash / >expected && + ipfs files $ARGS stat --format='"'"''"'"' / >actual && test_cmp expected actual ' - test_expect_success "compare size option with format" ' - ipfs files stat --size / >expected && - ipfs files stat --format='"'"''"'"' / >actual && + test_expect_success "compare size option with format $EXTRA" ' + ipfs files $ARGS stat --size / >expected && + ipfs files $ARGS stat --format='"'"''"'"' / >actual && test_cmp expected actual ' - test_expect_success "check root hash" ' - ipfs files stat --hash / > roothash + test_expect_success "check root hash $EXTRA" ' + ipfs files $ARGS stat --hash / > roothash ' - test_expect_success "cannot mkdir /" ' - test_expect_code 1 ipfs files mkdir / + test_expect_success "cannot mkdir / $EXTRA" ' + test_expect_code 1 ipfs files $ARGS mkdir / ' - test_expect_success "check root hash was not changed" ' - ipfs files stat --hash / > roothashafter && + test_expect_success "check root hash was not changed $EXTRA" ' + ipfs files $ARGS stat --hash / > roothashafter && test_cmp roothash roothashafter ' - test_expect_success "can put files into directory" ' - ipfs files cp /ipfs/$FILE1 /cats/file1 + test_expect_success "can put files into directory $EXTRA" ' + ipfs files $ARGS cp /ipfs/$FILE1 /cats/file1 ' - test_expect_success "file shows up in directory" ' + test_expect_success "file shows up in directory $EXTRA" ' verify_dir_contents /cats file1 ' - test_expect_success "file has correct hash and size in directory" ' + test_expect_success "file has correct hash and size in directory $EXTRA" ' echo "file1 $FILE1 4" > ls_l_expected && - ipfs files ls -l /cats > ls_l_actual && + ipfs files $ARGS ls -l /cats > ls_l_actual && test_cmp ls_l_expected ls_l_actual ' - test_expect_success "can read file" ' - ipfs files read /cats/file1 > file1out + test_expect_success "can read file $EXTRA" ' + ipfs files $ARGS read /cats/file1 > file1out ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo foo > expected && test_cmp expected file1out ' - test_expect_success "can put another file into root" ' - ipfs files cp /ipfs/$FILE2 /file2 + test_expect_success "can put another file into root $EXTRA" ' + ipfs files $ARGS cp /ipfs/$FILE2 /file2 ' - test_expect_success "file shows up in root" ' + test_expect_success "file shows up in root $EXTRA" ' verify_dir_contents / file2 cats ' - test_expect_success "can read file" ' - ipfs files read /file2 > file2out + test_expect_success "can read file $EXTRA" ' + ipfs files $ARGS read /file2 > file2out ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo bar > expected && test_cmp expected file2out ' - test_expect_success "can make deep directory" ' - ipfs files mkdir -p /cats/this/is/a/dir + test_expect_success "can make deep directory $EXTRA" ' + ipfs files $ARGS mkdir -p /cats/this/is/a/dir ' - test_expect_success "directory was created correctly" ' + test_expect_success "directory was created correctly $EXTRA" ' verify_path_exists /cats/this/is/a/dir && verify_dir_contents /cats this file1 && verify_dir_contents /cats/this is && @@ -205,362 +207,378 @@ test_files_api() { verify_dir_contents /cats/this/is/a/dir ' - test_expect_success "can copy file into new dir" ' - ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 + test_expect_success "can copy file into new dir $EXTRA" ' + ipfs files $ARGS cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 ' - test_expect_success "can read file" ' - ipfs files read /cats/this/is/a/dir/file3 > output + test_expect_success "can read file $EXTRA" ' + ipfs files $ARGS read /cats/this/is/a/dir/file3 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo baz > expected && test_cmp expected output ' - test_expect_success "file shows up in dir" ' + test_expect_success "file shows up in dir $EXTRA" ' verify_dir_contents /cats/this/is/a/dir file3 ' - test_expect_success "can remove file" ' - ipfs files rm /cats/this/is/a/dir/file3 + test_expect_success "can remove file $EXTRA" ' + ipfs files $ARGS rm /cats/this/is/a/dir/file3 ' - test_expect_success "file no longer appears" ' + test_expect_success "file no longer appears $EXTRA" ' verify_dir_contents /cats/this/is/a/dir ' - test_expect_success "can remove dir" ' - ipfs files rm -r /cats/this/is/a/dir + test_expect_success "can remove dir $EXTRA" ' + ipfs files $ARGS rm -r /cats/this/is/a/dir ' - test_expect_success "dir no longer appears" ' + test_expect_success "dir no longer appears $EXTRA" ' verify_dir_contents /cats/this/is/a ' - test_expect_success "can remove file from root" ' - ipfs files rm /file2 + test_expect_success "can remove file from root $EXTRA" ' + ipfs files $ARGS rm /file2 ' - test_expect_success "file no longer appears" ' + test_expect_success "file no longer appears $EXTRA" ' verify_dir_contents / cats ' - test_expect_success "check root hash" ' - ipfs files stat --hash / > roothash + test_expect_success "check root hash $EXTRA" ' + ipfs files $ARGS stat --hash / > roothash ' - test_expect_success "cannot remove root" ' - test_expect_code 1 ipfs files rm -r / + test_expect_success "cannot remove root $EXTRA" ' + test_expect_code 1 ipfs files $ARGS rm -r / ' - test_expect_success "check root hash was not changed" ' - ipfs files stat --hash / > roothashafter && + test_expect_success "check root hash was not changed $EXTRA" ' + ipfs files $ARGS stat --hash / > roothashafter && test_cmp roothash roothashafter ' # test read options - test_expect_success "read from offset works" ' - ipfs files read -o 1 /cats/file1 > output + test_expect_success "read from offset works $EXTRA" ' + ipfs files $ARGS read -o 1 /cats/file1 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo oo > expected && test_cmp expected output ' - test_expect_success "read with size works" ' - ipfs files read -n 2 /cats/file1 > output + test_expect_success "read with size works $EXTRA" ' + ipfs files $ARGS read -n 2 /cats/file1 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' printf fo > expected && test_cmp expected output ' - test_expect_success "cannot read from negative offset" ' - test_expect_code 1 ipfs files read --offset -3 /cats/file1 + test_expect_success "cannot read from negative offset $EXTRA" ' + test_expect_code 1 ipfs files $ARGS read --offset -3 /cats/file1 ' - test_expect_success "read from offset 0 works" ' - ipfs files read --offset 0 /cats/file1 > output + test_expect_success "read from offset 0 works $EXTRA" ' + ipfs files $ARGS read --offset 0 /cats/file1 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo foo > expected && test_cmp expected output ' - test_expect_success "read last byte works" ' - ipfs files read --offset 2 /cats/file1 > output + test_expect_success "read last byte works $EXTRA" ' + ipfs files $ARGS read --offset 2 /cats/file1 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo o > expected && test_cmp expected output ' - test_expect_success "offset past end of file fails" ' - test_expect_code 1 ipfs files read --offset 5 /cats/file1 + test_expect_success "offset past end of file fails $EXTRA" ' + test_expect_code 1 ipfs files $ARGS read --offset 5 /cats/file1 ' - test_expect_success "cannot read negative count bytes" ' + test_expect_success "cannot read negative count bytes $EXTRA" ' test_expect_code 1 ipfs read --count -1 /cats/file1 ' - test_expect_success "reading zero bytes prints nothing" ' - ipfs files read --count 0 /cats/file1 > output + test_expect_success "reading zero bytes prints nothing $EXTRA" ' + ipfs files $ARGS read --count 0 /cats/file1 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' printf "" > expected && test_cmp expected output ' - test_expect_success "count > len(file) prints entire file" ' - ipfs files read --count 200 /cats/file1 > output + test_expect_success "count > len(file) prints entire file $EXTRA" ' + ipfs files $ARGS read --count 200 /cats/file1 > output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo foo > expected && test_cmp expected output ' # test write - test_expect_success "can write file" ' + test_expect_success "can write file $EXTRA" ' echo "ipfs rocks" > tmpfile && - cat tmpfile | ipfs files write --create /cats/ipfs + cat tmpfile | ipfs files $ARGS write --create /cats/ipfs ' - test_expect_success "file was created" ' + test_expect_success "file was created $EXTRA" ' verify_dir_contents /cats ipfs file1 this ' - test_expect_success "can read file we just wrote" ' - ipfs files read /cats/ipfs > output + test_expect_success "can read file we just wrote $EXTRA" ' + ipfs files $ARGS read /cats/ipfs > output ' - test_expect_success "can write to offset" ' - echo "is super cool" | ipfs files write -o 5 /cats/ipfs + test_expect_success "can write to offset $EXTRA" ' + echo "is super cool" | ipfs files $ARGS write -o 5 /cats/ipfs ' - test_expect_success "file looks correct" ' + test_expect_success "file looks correct $EXTRA" ' echo "ipfs is super cool" > expected && - ipfs files read /cats/ipfs > output && + ipfs files $ARGS read /cats/ipfs > output && test_cmp expected output ' - test_expect_success "cant write to negative offset" ' - ipfs files stat --hash /cats/ipfs > filehash && - test_expect_code 1 ipfs files write --offset -1 /cats/ipfs < output + test_expect_success "file hash correct $EXTRA" ' + echo $FILE_HASH > filehash_expected && + ipfs files $ARGS stat --hash /cats/ipfs > filehash && + test_cmp filehash_expected filehash ' - test_expect_success "verify file was not changed" ' - ipfs files stat --hash /cats/ipfs > afterhash && + test_expect_success "cant write to negative offset $EXTRA" ' + test_expect_code 1 ipfs files $ARGS write --offset -1 /cats/ipfs < output + ' + + test_expect_success "verify file was not changed $EXTRA" ' + ipfs files $ARGS stat --hash /cats/ipfs > afterhash && test_cmp filehash afterhash ' - test_expect_success "write new file for testing" ' - echo foobar | ipfs files write --create /fun + test_expect_success "write new file for testing $EXTRA" ' + echo foobar | ipfs files $ARGS write --create /fun ' - test_expect_success "write to offset past end works" ' - echo blah | ipfs files write --offset 50 /fun + test_expect_success "write to offset past end works $EXTRA" ' + echo blah | ipfs files $ARGS write --offset 50 /fun ' - test_expect_success "can read file" ' - ipfs files read /fun > sparse_output + test_expect_success "can read file $EXTRA" ' + ipfs files $ARGS read /fun > sparse_output ' - test_expect_success "output looks good" ' + test_expect_success "output looks good $EXTRA" ' echo foobar > sparse_expected && echo blah | dd of=sparse_expected bs=50 seek=1 && test_cmp sparse_expected sparse_output ' - test_expect_success "cleanup" ' - ipfs files rm /fun + test_expect_success "cleanup $EXTRA" ' + ipfs files $ARGS rm /fun ' - test_expect_success "cannot write to directory" ' - ipfs files stat --hash /cats > dirhash && - test_expect_code 1 ipfs files write /cats < output + test_expect_success "cannot write to directory $EXTRA" ' + ipfs files $ARGS stat --hash /cats > dirhash && + test_expect_code 1 ipfs files $ARGS write /cats < output ' - test_expect_success "verify dir was not changed" ' - ipfs files stat --hash /cats > afterdirhash && + test_expect_success "verify dir was not changed $EXTRA" ' + ipfs files $ARGS stat --hash /cats > afterdirhash && test_cmp dirhash afterdirhash ' - test_expect_success "cannot write to nonexistant path" ' - test_expect_code 1 ipfs files write /cats/bar/ < output + test_expect_success "cannot write to nonexistant path $EXTRA" ' + test_expect_code 1 ipfs files $ARGS write /cats/bar/ < output ' - test_expect_success "no new paths were created" ' + test_expect_success "no new paths were created $EXTRA" ' verify_dir_contents /cats file1 ipfs this ' - test_expect_success "write 'no-flush' succeeds" ' - echo "testing" | ipfs files write -f=false -e /cats/walrus + test_expect_success "write 'no-flush' succeeds $EXTRA" ' + echo "testing" | ipfs files $ARGS write -f=false -e /cats/walrus ' - test_expect_success "root hash not bubbled up yet" ' + test_expect_success "root hash not bubbled up yet $EXTRA" ' test -z "$ONLINE" || (ipfs refs local > refsout && test_expect_code 1 grep $ROOT_HASH refsout) ' - test_expect_success "changes bubbled up to root on inspection" ' - ipfs files stat --hash / > root_hash + test_expect_success "changes bubbled up to root on inspection $EXTRA" ' + ipfs files $ARGS stat --hash / > root_hash ' - test_expect_success "root hash looks good" ' + test_expect_success "root hash looks good $EXTRA" ' export EXP_ROOT_HASH="$ROOT_HASH" && echo $EXP_ROOT_HASH > root_hash_exp && test_cmp root_hash_exp root_hash ' - test_expect_success "flush root succeeds" ' - ipfs files flush / + test_expect_success "flush root succeeds $EXTRA" ' + ipfs files $ARGS flush / ' # test mv - test_expect_success "can mv dir" ' - ipfs files mv /cats/this/is /cats/ + test_expect_success "can mv dir $EXTRA" ' + ipfs files $ARGS mv /cats/this/is /cats/ ' - test_expect_success "mv worked" ' + test_expect_success "mv worked $EXTRA" ' verify_dir_contents /cats file1 ipfs this is walrus && verify_dir_contents /cats/this ' - test_expect_success "cleanup, remove 'cats'" ' - ipfs files rm -r /cats + test_expect_success "cleanup, remove 'cats' $EXTRA" ' + ipfs files $ARGS rm -r /cats ' - test_expect_success "cleanup looks good" ' + test_expect_success "cleanup looks good $EXTRA" ' verify_dir_contents / ' # test truncating - test_expect_success "create a new file" ' - echo "some content" | ipfs files write --create /cats + test_expect_success "create a new file $EXTRA" ' + echo "some content" | ipfs files $ARGS write --create /cats ' - test_expect_success "truncate and write over that file" ' - echo "fish" | ipfs files write --truncate /cats + test_expect_success "truncate and write over that file $EXTRA" ' + echo "fish" | ipfs files $ARGS write --truncate /cats ' - test_expect_success "output looks good" ' - ipfs files read /cats > file_out && + test_expect_success "output looks good $EXTRA" ' + ipfs files $ARGS read /cats > file_out && echo "fish" > file_exp && test_cmp file_out file_exp ' - test_expect_success "cleanup" ' - ipfs files rm /cats + test_expect_success "cleanup $EXTRA" ' + ipfs files $ARGS rm /cats ' # test flush flags - test_expect_success "mkdir --flush works" ' - ipfs files mkdir --flush --parents /flushed/deep + test_expect_success "mkdir --flush works $EXTRA" ' + ipfs files $ARGS mkdir --flush --parents /flushed/deep ' - test_expect_success "mkdir --flush works a second time" ' - ipfs files mkdir --flush --parents /flushed/deep + test_expect_success "mkdir --flush works a second time $EXTRA" ' + ipfs files $ARGS mkdir --flush --parents /flushed/deep ' - test_expect_success "dir looks right" ' + test_expect_success "dir looks right $EXTRA" ' verify_dir_contents / flushed ' - test_expect_success "child dir looks right" ' + test_expect_success "child dir looks right $EXTRA" ' verify_dir_contents /flushed deep ' - test_expect_success "cleanup" ' - ipfs files rm -r /flushed + test_expect_success "cleanup $EXTRA" ' + ipfs files $ARGS rm -r /flushed ' - test_expect_success "child dir looks right" ' + test_expect_success "child dir looks right $EXTRA" ' verify_dir_contents / ' # test for https://github.com/ipfs/go-ipfs/issues/2654 - test_expect_success "create and remove dir" ' - ipfs files mkdir /test_dir && - ipfs files rm -r "/test_dir" + test_expect_success "create and remove dir $EXTRA" ' + ipfs files $ARGS mkdir /test_dir && + ipfs files $ARGS rm -r "/test_dir" ' - test_expect_success "create test file" ' - echo "content" | ipfs files write -e "/test_file" + test_expect_success "create test file $EXTRA" ' + echo "content" | ipfs files $ARGS write -e "/test_file" ' - test_expect_success "copy test file onto test dir" ' - ipfs files cp "/test_file" "/test_dir" + test_expect_success "copy test file onto test dir $EXTRA" ' + ipfs files $ARGS cp "/test_file" "/test_dir" ' - test_expect_success "test /test_dir" ' - ipfs files stat "/test_dir" | grep -q "^Type: file" + test_expect_success "test /test_dir $EXTRA" ' + ipfs files $ARGS stat "/test_dir" | grep -q "^Type: file" ' - test_expect_success "clean up /test_dir and /test_file" ' - ipfs files rm -r /test_dir && - ipfs files rm -r /test_file + test_expect_success "clean up /test_dir and /test_file $EXTRA" ' + ipfs files $ARGS rm -r /test_dir && + ipfs files $ARGS rm -r /test_file ' - test_expect_success "make a directory and a file" ' - ipfs files mkdir /adir && - echo "blah" | ipfs files write --create /foobar + test_expect_success "make a directory and a file $EXTRA" ' + ipfs files $ARGS mkdir /adir && + echo "blah" | ipfs files $ARGS write --create /foobar ' - test_expect_success "copy a file into a directory" ' - ipfs files cp /foobar /adir/ + test_expect_success "copy a file into a directory $EXTRA" ' + ipfs files $ARGS cp /foobar /adir/ ' - test_expect_success "file made it into directory" ' - ipfs files ls /adir | grep foobar + test_expect_success "file made it into directory $EXTRA" ' + ipfs files $ARGS ls /adir | grep foobar ' - test_expect_success "clean up" ' - ipfs files rm -r /foobar && - ipfs files rm -r /adir + test_expect_success "clean up $EXTRA" ' + ipfs files $ARGS rm -r /foobar && + ipfs files $ARGS rm -r /adir ' - test_expect_success "root mfs entry is empty" ' + test_expect_success "root mfs entry is empty $EXTRA" ' verify_dir_contents / ' - test_expect_success "repo gc" ' + test_expect_success "repo gc $EXTRA" ' ipfs repo gc ' } # test offline and online -test_expect_success "can create some files for testing" ' - create_files -' -test_files_api QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt -test_expect_success "can create some files for testing with raw-leaves" ' - create_files --raw-leaves -' -test_files_api QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ +tests_for_files_api() { + local EXTRA + EXTRA=$1 + + test_expect_success "can create some files for testing ($extra)" ' + create_files + ' + ROOT_HASH=QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt + FILE_HASH=QmQdQt9qooenjeaNhiKHF3hBvmNteB4MQBtgu3jxgf9c7i + test_files_api "($EXTRA)" + + test_expect_success "can create some files for testing with raw-leaves ($extra)" ' + create_files --raw-leaves + ' + ROOT_HASH=QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ + test_files_api "($EXTRA, partial raw-leaves)" + + test_expect_success "can create some files for testing with raw-leaves ($extra)" ' + create_files --raw-leaves + ' + ROOT_HASH=QmW3dMSU6VNd1mEdpk9S3ZYRuR1YwwoXjGaZhkyK6ru9YU + FILE_HASH=QmRCgHeoKxCqK2Es6M6nPUDVWz19yNQPnsXGsXeuTkSKpN + test_files_api "($EXTRA, raw-leaves)" --raw-leaves +} + +tests_for_files_api "online" test_launch_ipfs_daemon --offline ONLINE=1 # set online flag so tests can easily tell -test_expect_success "can create some files for testing" ' - create_files -' -test_files_api QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt -test_expect_success "can create some files for testing with raw-leaves" ' - create_files --raw-leaves -' -test_files_api QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ +tests_for_files_api "offline" test_kill_ipfs_daemon --offline From 2e15dcb647dfe50d9fdab3a58ee261ef7f76ca96 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Tue, 8 Aug 2017 18:19:46 -0400 Subject: [PATCH 04/16] Enable CidV1 (and other prefixes) in the Dag Modifier. License: MIT Signed-off-by: Kevin Atkinson --- importer/trickle/trickledag.go | 28 +++- merkledag/node.go | 3 + unixfs/io/dagreader_test.go | 12 +- unixfs/mod/dagmodifier.go | 24 +++- unixfs/mod/dagmodifier_test.go | 247 ++++++++++++++------------------- unixfs/test/utils.go | 29 ++-- 6 files changed, 178 insertions(+), 165 deletions(-) diff --git a/importer/trickle/trickledag.go b/importer/trickle/trickledag.go index 32f86a2fa99..4064d8b90c1 100644 --- a/importer/trickle/trickledag.go +++ b/importer/trickle/trickledag.go @@ -9,6 +9,7 @@ import ( dag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format" ) @@ -239,6 +240,7 @@ type VerifyParams struct { Getter node.NodeGetter Direct int LayerRepeat int + Prefix *cid.Prefix RawLeaves bool } @@ -250,6 +252,7 @@ func VerifyTrickleDagStructure(nd node.Node, p VerifyParams) error { // Recursive call for verifying the structure of a trickledag func verifyTDagRec(n node.Node, depth int, p VerifyParams) error { + codec := cid.DagProtobuf if depth == 0 { if len(n.Links()) > 0 { return errors.New("expected direct block") @@ -269,19 +272,36 @@ func verifyTDagRec(n node.Node, depth int, p VerifyParams) error { if p.RawLeaves { return errors.New("expected raw leaf, got a protobuf node") } - - return nil case *dag.RawNode: if !p.RawLeaves { return errors.New("expected protobuf node as leaf") } - - return nil + codec = cid.Raw default: return errors.New("expected ProtoNode or RawNode") } } + // verify prefix + if p.Prefix != nil { + prefix := n.Cid().Prefix() + expect := *p.Prefix // make a copy + expect.Codec = uint64(codec) + if codec == cid.Raw && expect.Version == 0 { + expect.Version = 1 + } + if expect.MhLength == -1 { + expect.MhLength = prefix.MhLength + } + if prefix != expect { + return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix) + } + } + + if depth == 0 { + return nil + } + nd, ok := n.(*dag.ProtoNode) if !ok { return errors.New("expected ProtoNode") diff --git a/merkledag/node.go b/merkledag/node.go index ad4f246cce0..fae3fa7fc14 100644 --- a/merkledag/node.go +++ b/merkledag/node.go @@ -42,6 +42,9 @@ var v1CidPrefix = cid.Prefix{ Version: 1, } +func V0CidPrefix() cid.Prefix { return v0CidPrefix } +func V1CidPrefix() cid.Prefix { return v1CidPrefix } + // PrefixForCidVersion returns the Protobuf prefix for a given CID version func PrefixForCidVersion(version int) (cid.Prefix, error) { switch version { diff --git a/unixfs/io/dagreader_test.go b/unixfs/io/dagreader_test.go index 85c805e9ced..a5ed6dd3960 100644 --- a/unixfs/io/dagreader_test.go +++ b/unixfs/io/dagreader_test.go @@ -17,7 +17,7 @@ import ( func TestBasicRead(t *testing.T) { dserv := testu.GetDAGServ() - inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.ProtoBufLeaves) + inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.UseProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() @@ -44,7 +44,7 @@ func TestSeekAndRead(t *testing.T) { inbuf[i] = byte(i) } - node := testu.GetNode(t, dserv, inbuf, testu.ProtoBufLeaves) + node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() @@ -84,7 +84,7 @@ func TestRelativeSeek(t *testing.T) { } inbuf[1023] = 1 // force the reader to be 1024 bytes - node := testu.GetNode(t, dserv, inbuf, testu.ProtoBufLeaves) + node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves) reader, err := NewDagReader(ctx, node, dserv) if err != nil { @@ -160,7 +160,7 @@ func TestBadPBData(t *testing.T) { func TestMetadataNode(t *testing.T) { dserv := testu.GetDAGServ() - rdata, rnode := testu.GetRandomNode(t, dserv, 512, testu.ProtoBufLeaves) + rdata, rnode := testu.GetRandomNode(t, dserv, 512, testu.UseProtoBufLeaves) _, err := dserv.Add(rnode) if err != nil { t.Fatal(err) @@ -203,7 +203,7 @@ func TestMetadataNode(t *testing.T) { func TestWriteTo(t *testing.T) { dserv := testu.GetDAGServ() - inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.ProtoBufLeaves) + inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.UseProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() @@ -225,7 +225,7 @@ func TestWriteTo(t *testing.T) { func TestReaderSzie(t *testing.T) { dserv := testu.GetDAGServ() size := int64(1024) - _, node := testu.GetRandomNode(t, dserv, size, testu.ProtoBufLeaves) + _, node := testu.GetRandomNode(t, dserv, size, testu.UseProtoBufLeaves) ctx, closer := context.WithCancel(context.Background()) defer closer() diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index 5eaad4779df..23c1945a50b 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -40,6 +40,7 @@ type DagModifier struct { curWrOff uint64 wrBuf *bytes.Buffer + Prefix cid.Prefix RawLeaves bool read uio.DagReader @@ -47,6 +48,10 @@ type DagModifier struct { var ErrNotUnixfs = fmt.Errorf("dagmodifier only supports unixfs nodes (proto or raw)") +// NewDagModifier returns a new DagModifier, the Cid prefix for newly +// created nodes will be inherted from the passed in node. If the Cid +// version if not 0 raw leaves will also be enabled. The Prefix and +// RawLeaves options can be overridden by changing them after the call. func NewDagModifier(ctx context.Context, from node.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) { switch from.(type) { case *mdag.ProtoNode, *mdag.RawNode: @@ -55,11 +60,20 @@ func NewDagModifier(ctx context.Context, from node.Node, serv mdag.DAGService, s return nil, ErrNotUnixfs } + prefix := from.Cid().Prefix() + prefix.Codec = cid.DagProtobuf + rawLeaves := false + if prefix.Version > 0 { + rawLeaves = true + } + return &DagModifier{ - curNode: from.Copy(), - dagserv: serv, - splitter: spl, - ctx: ctx, + curNode: from.Copy(), + dagserv: serv, + splitter: spl, + ctx: ctx, + Prefix: prefix, + RawLeaves: rawLeaves, }, nil } @@ -240,6 +254,7 @@ func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*c nd := new(mdag.ProtoNode) nd.SetData(b) + nd.SetPrefix(&nd0.Prefix) k, err := dm.dagserv.Add(nd) if err != nil { return nil, false, err @@ -345,6 +360,7 @@ func (dm *DagModifier) appendData(nd node.Node, spl chunk.Splitter) (node.Node, dbp := &help.DagBuilderParams{ Dagserv: dm.dagserv, Maxlinks: help.DefaultLinksPerBlock, + Prefix: &dm.Prefix, RawLeaves: dm.RawLeaves, } return trickle.TrickleAppend(dm.ctx, nd, dbp.New(spl)) diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index 7b15b8532dd..1b1cc52f748 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -16,7 +16,7 @@ import ( u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" ) -func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, rawLeaves testu.UseRawLeaves) []byte { +func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, opts testu.NodeOpts) []byte { newdata := make([]byte, size) r := u.NewTimeSeededRand() r.Read(newdata) @@ -35,6 +35,12 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, t.Fatalf("Mod length not correct! %d != %d", nmod, size) } + verifyNode(t, orig, dm, opts) + + return orig +} + +func verifyNode(t *testing.T, orig []byte, dm *DagModifier, opts testu.NodeOpts) { nd, err := dm.GetNode() if err != nil { t.Fatal(err) @@ -44,10 +50,11 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, Getter: dm.dagserv, Direct: h.DefaultLinksPerBlock, LayerRepeat: 4, - RawLeaves: bool(rawLeaves), + Prefix: &opts.Prefix, + RawLeaves: opts.RawLeavesUsed, }) if err != nil { - t.Error(err) + t.Fatal(err) } rd, err := uio.NewDagReader(context.Background(), nd, dm.dagserv) @@ -64,20 +71,20 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, if err != nil { t.Fatal(err) } - return orig } -func runBothSubtests(t *testing.T, tfunc func(*testing.T, testu.UseRawLeaves)) { - t.Run("leaves=ProtoBuf", func(t *testing.T) { tfunc(t, testu.ProtoBufLeaves) }) - t.Run("leaves=Raw", func(t *testing.T) { tfunc(t, testu.RawLeaves) }) +func runAllSubtests(t *testing.T, tfunc func(*testing.T, testu.NodeOpts)) { + t.Run("opts=ProtoBufLeaves", func(t *testing.T) { tfunc(t, testu.UseProtoBufLeaves) }) + t.Run("opts=RawLeaves", func(t *testing.T) { tfunc(t, testu.UseRawLeaves) }) + t.Run("opts=CidV1", func(t *testing.T) { tfunc(t, testu.UseCidV1) }) } func TestDagModifierBasic(t *testing.T) { - runBothSubtests(t, testDagModifierBasic) + runAllSubtests(t, testDagModifierBasic) } -func testDagModifierBasic(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - b, n := testu.GetRandomNode(t, dserv, 50000, rawLeaves) + b, n := testu.GetRandomNode(t, dserv, 50000, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -85,33 +92,35 @@ func testDagModifierBasic(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } // Within zero block beg := uint64(15) length := uint64(60) t.Log("Testing mod within zero block") - b = testModWrite(t, beg, length, b, dagmod, rawLeaves) + b = testModWrite(t, beg, length, b, dagmod, opts) // Within bounds of existing file beg = 1000 length = 4000 t.Log("Testing mod within bounds of existing multiblock file.") - b = testModWrite(t, beg, length, b, dagmod, rawLeaves) + b = testModWrite(t, beg, length, b, dagmod, opts) // Extend bounds beg = 49500 length = 4000 t.Log("Testing mod that extends file.") - b = testModWrite(t, beg, length, b, dagmod, rawLeaves) + b = testModWrite(t, beg, length, b, dagmod, opts) // "Append" beg = uint64(len(b)) length = 3000 t.Log("Testing pure append") - _ = testModWrite(t, beg, length, b, dagmod, rawLeaves) + _ = testModWrite(t, beg, length, b, dagmod, opts) // Verify reported length node, err := dagmod.GetNode() @@ -131,11 +140,11 @@ func testDagModifierBasic(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestMultiWrite(t *testing.T) { - runBothSubtests(t, testMultiWrite) + runAllSubtests(t, testMultiWrite) } -func testMultiWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testMultiWrite(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -144,7 +153,9 @@ func testMultiWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } data := make([]byte, 4000) u.NewTimeSeededRand().Read(data) @@ -167,32 +178,16 @@ func testMultiWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { t.Fatal("Size was reported incorrectly") } } - nd, err := dagmod.GetNode() - if err != nil { - t.Fatal(err) - } - read, err := uio.NewDagReader(context.Background(), nd, dserv) - if err != nil { - t.Fatal(err) - } - rbuf, err := ioutil.ReadAll(read) - if err != nil { - t.Fatal(err) - } - - err = testu.ArrComp(rbuf, data) - if err != nil { - t.Fatal(err) - } + verifyNode(t, data, dagmod, opts) } func TestMultiWriteAndFlush(t *testing.T) { - runBothSubtests(t, testMultiWriteAndFlush) + runAllSubtests(t, testMultiWriteAndFlush) } -func testMultiWriteAndFlush(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -201,7 +196,9 @@ func testMultiWriteAndFlush(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } data := make([]byte, 20) u.NewTimeSeededRand().Read(data) @@ -219,32 +216,16 @@ func testMultiWriteAndFlush(t *testing.T, rawLeaves testu.UseRawLeaves) { t.Fatal(err) } } - nd, err := dagmod.GetNode() - if err != nil { - t.Fatal(err) - } - read, err := uio.NewDagReader(context.Background(), nd, dserv) - if err != nil { - t.Fatal(err) - } - rbuf, err := ioutil.ReadAll(read) - if err != nil { - t.Fatal(err) - } - - err = testu.ArrComp(rbuf, data) - if err != nil { - t.Fatal(err) - } + verifyNode(t, data, dagmod, opts) } func TestWriteNewFile(t *testing.T) { - runBothSubtests(t, testWriteNewFile) + runAllSubtests(t, testWriteNewFile) } -func testWriteNewFile(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testWriteNewFile(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -253,7 +234,9 @@ func testWriteNewFile(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } towrite := make([]byte, 2000) u.NewTimeSeededRand().Read(towrite) @@ -266,32 +249,15 @@ func testWriteNewFile(t *testing.T, rawLeaves testu.UseRawLeaves) { t.Fatal("Wrote wrong amount") } - nd, err := dagmod.GetNode() - if err != nil { - t.Fatal(err) - } - - read, err := uio.NewDagReader(ctx, nd, dserv) - if err != nil { - t.Fatal(err) - } - - data, err := ioutil.ReadAll(read) - if err != nil { - t.Fatal(err) - } - - if err := testu.ArrComp(data, towrite); err != nil { - t.Fatal(err) - } + verifyNode(t, towrite, dagmod, opts) } func TestMultiWriteCoal(t *testing.T) { - runBothSubtests(t, testMultiWriteCoal) + runAllSubtests(t, testMultiWriteCoal) } -func testMultiWriteCoal(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -300,7 +266,9 @@ func testMultiWriteCoal(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } data := make([]byte, 1000) u.NewTimeSeededRand().Read(data) @@ -316,34 +284,16 @@ func testMultiWriteCoal(t *testing.T, rawLeaves testu.UseRawLeaves) { } } - nd, err := dagmod.GetNode() - if err != nil { - t.Fatal(err) - } - read, err := uio.NewDagReader(context.Background(), nd, dserv) - if err != nil { - t.Fatal(err) - } - dagmod.RawLeaves = bool(rawLeaves) - - rbuf, err := ioutil.ReadAll(read) - if err != nil { - t.Fatal(err) - } - - err = testu.ArrComp(rbuf, data) - if err != nil { - t.Fatal(err) - } + verifyNode(t, data, dagmod, opts) } func TestLargeWriteChunks(t *testing.T) { - runBothSubtests(t, testLargeWriteChunks) + runAllSubtests(t, testLargeWriteChunks) } -func testLargeWriteChunks(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -352,7 +302,9 @@ func testLargeWriteChunks(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } wrsize := 1000 datasize := 10000000 @@ -378,15 +330,14 @@ func testLargeWriteChunks(t *testing.T, rawLeaves testu.UseRawLeaves) { if err = testu.ArrComp(out, data); err != nil { t.Fatal(err) } - } func TestDagTruncate(t *testing.T) { - runBothSubtests(t, testDagTruncate) + runAllSubtests(t, testDagTruncate) } -func testDagTruncate(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testDagTruncate(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - b, n := testu.GetRandomNode(t, dserv, 50000, rawLeaves) + b, n := testu.GetRandomNode(t, dserv, 50000, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -394,7 +345,9 @@ func testDagTruncate(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } err = dagmod.Truncate(12345) if err != nil { @@ -453,11 +406,11 @@ func testDagTruncate(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestSparseWrite(t *testing.T) { - runBothSubtests(t, testSparseWrite) + runAllSubtests(t, testSparseWrite) } -func testSparseWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testSparseWrite(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -465,7 +418,9 @@ func testSparseWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } buf := make([]byte, 5000) u.NewTimeSeededRand().Read(buf[2500:]) @@ -495,11 +450,11 @@ func testSparseWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestSeekPastEndWrite(t *testing.T) { - runBothSubtests(t, testSeekPastEndWrite) + runAllSubtests(t, testSeekPastEndWrite) } -func testSeekPastEndWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -507,7 +462,9 @@ func testSeekPastEndWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } buf := make([]byte, 5000) u.NewTimeSeededRand().Read(buf[2500:]) @@ -546,11 +503,11 @@ func testSeekPastEndWrite(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestRelativeSeek(t *testing.T) { - runBothSubtests(t, testRelativeSeek) + runAllSubtests(t, testRelativeSeek) } -func testRelativeSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testRelativeSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -558,7 +515,9 @@ func testRelativeSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } for i := 0; i < 64; i++ { dagmod.Write([]byte{byte(i)}) @@ -580,11 +539,11 @@ func testRelativeSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestInvalidSeek(t *testing.T) { - runBothSubtests(t, testInvalidSeek) + runAllSubtests(t, testInvalidSeek) } -func testInvalidSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testInvalidSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -592,7 +551,9 @@ func testInvalidSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } _, err = dagmod.Seek(10, -10) @@ -602,12 +563,12 @@ func testInvalidSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestEndSeek(t *testing.T) { - runBothSubtests(t, testEndSeek) + runAllSubtests(t, testEndSeek) } -func testEndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testEndSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -615,7 +576,9 @@ func testEndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } _, err = dagmod.Write(make([]byte, 100)) if err != nil { @@ -648,12 +611,12 @@ func testEndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestReadAndSeek(t *testing.T) { - runBothSubtests(t, testReadAndSeek) + runAllSubtests(t, testReadAndSeek) } -func testReadAndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testReadAndSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -661,7 +624,9 @@ func testReadAndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } writeBuf := []byte{0, 1, 2, 3, 4, 5, 6, 7} dagmod.Write(writeBuf) @@ -720,12 +685,12 @@ func testReadAndSeek(t *testing.T, rawLeaves testu.UseRawLeaves) { } func TestCtxRead(t *testing.T) { - runBothSubtests(t, testCtxRead) + runAllSubtests(t, testCtxRead) } -func testCtxRead(t *testing.T, rawLeaves testu.UseRawLeaves) { +func testCtxRead(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(t, dserv, rawLeaves) + n := testu.GetEmptyNode(t, dserv, opts) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -733,7 +698,9 @@ func testCtxRead(t *testing.T, rawLeaves testu.UseRawLeaves) { if err != nil { t.Fatal(err) } - dagmod.RawLeaves = bool(rawLeaves) + if opts.ForceRawLeaves { + dagmod.RawLeaves = true + } _, err = dagmod.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7}) if err != nil { @@ -757,7 +724,7 @@ func testCtxRead(t *testing.T, rawLeaves testu.UseRawLeaves) { func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv := testu.GetDAGServ() - n := testu.GetEmptyNode(b, dserv, testu.ProtoBufLeaves) + n := testu.GetEmptyNode(b, dserv, testu.UseProtoBufLeaves) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/unixfs/test/utils.go b/unixfs/test/utils.go index 933493f3678..fc9a04be366 100644 --- a/unixfs/test/utils.go +++ b/unixfs/test/utils.go @@ -15,6 +15,7 @@ import ( mdagmock "github.com/ipfs/go-ipfs/merkledag/test" ft "github.com/ipfs/go-ipfs/unixfs" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" ) @@ -29,20 +30,26 @@ func GetDAGServ() mdag.DAGService { return mdagmock.Mock() } -type UseRawLeaves bool +type NodeOpts struct { + Prefix cid.Prefix + // ForceRawLeaves if true will force the use of raw leaves + ForceRawLeaves bool + // RawLeavesUsed is true if raw leaves or either implicitly or explicitly enabled + RawLeavesUsed bool +} -const ( - ProtoBufLeaves UseRawLeaves = false - RawLeaves UseRawLeaves = true -) +var UseProtoBufLeaves = NodeOpts{Prefix: mdag.V0CidPrefix()} +var UseRawLeaves = NodeOpts{Prefix: mdag.V0CidPrefix(), ForceRawLeaves: true, RawLeavesUsed: true} +var UseCidV1 = NodeOpts{Prefix: mdag.V1CidPrefix(), RawLeavesUsed: true} -func GetNode(t testing.TB, dserv mdag.DAGService, data []byte, rawLeaves UseRawLeaves) node.Node { +func GetNode(t testing.TB, dserv mdag.DAGService, data []byte, opts NodeOpts) node.Node { in := bytes.NewReader(data) dbp := h.DagBuilderParams{ Dagserv: dserv, Maxlinks: h.DefaultLinksPerBlock, - RawLeaves: bool(rawLeaves), + Prefix: &opts.Prefix, + RawLeaves: opts.RawLeavesUsed, } node, err := trickle.TrickleLayout(dbp.New(SizeSplitterGen(500)(in))) @@ -53,18 +60,18 @@ func GetNode(t testing.TB, dserv mdag.DAGService, data []byte, rawLeaves UseRawL return node } -func GetEmptyNode(t testing.TB, dserv mdag.DAGService, rawLeaves UseRawLeaves) node.Node { - return GetNode(t, dserv, []byte{}, rawLeaves) +func GetEmptyNode(t testing.TB, dserv mdag.DAGService, opts NodeOpts) node.Node { + return GetNode(t, dserv, []byte{}, opts) } -func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64, rawLeaves UseRawLeaves) ([]byte, node.Node) { +func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64, opts NodeOpts) ([]byte, node.Node) { in := io.LimitReader(u.NewTimeSeededRand(), size) buf, err := ioutil.ReadAll(in) if err != nil { t.Fatal(err) } - node := GetNode(t, dserv, buf, rawLeaves) + node := GetNode(t, dserv, buf, opts) return buf, node } From 77e9b8ddf9e7e6e945305b8456f934885d22e836 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Tue, 8 Aug 2017 18:20:56 -0400 Subject: [PATCH 05/16] Test for alternative hash function in Dag Modifier. License: MIT Signed-off-by: Kevin Atkinson --- unixfs/mod/dagmodifier_test.go | 1 + unixfs/test/utils.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index 1b1cc52f748..473d34294c6 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -77,6 +77,7 @@ func runAllSubtests(t *testing.T, tfunc func(*testing.T, testu.NodeOpts)) { t.Run("opts=ProtoBufLeaves", func(t *testing.T) { tfunc(t, testu.UseProtoBufLeaves) }) t.Run("opts=RawLeaves", func(t *testing.T) { tfunc(t, testu.UseRawLeaves) }) t.Run("opts=CidV1", func(t *testing.T) { tfunc(t, testu.UseCidV1) }) + t.Run("opts=Blake2b256", func(t *testing.T) { tfunc(t, testu.UseBlake2b256) }) } func TestDagModifierBasic(t *testing.T) { diff --git a/unixfs/test/utils.go b/unixfs/test/utils.go index fc9a04be366..8b18ad9cd07 100644 --- a/unixfs/test/utils.go +++ b/unixfs/test/utils.go @@ -18,6 +18,7 @@ import ( cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format" u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util" + mh "gx/ipfs/QmU9a9NV9RdPNwZQDYd5uKsm6N6LJLSvLbywDDYFbaaC6P/go-multihash" ) func SizeSplitterGen(size int64) chunk.SplitterGen { @@ -41,6 +42,13 @@ type NodeOpts struct { var UseProtoBufLeaves = NodeOpts{Prefix: mdag.V0CidPrefix()} var UseRawLeaves = NodeOpts{Prefix: mdag.V0CidPrefix(), ForceRawLeaves: true, RawLeavesUsed: true} var UseCidV1 = NodeOpts{Prefix: mdag.V1CidPrefix(), RawLeavesUsed: true} +var UseBlake2b256 NodeOpts + +func init() { + UseBlake2b256 = UseCidV1 + UseBlake2b256.Prefix.MhType = mh.Names["blake2b-256"] + UseBlake2b256.Prefix.MhLength = -1 +} func GetNode(t testing.TB, dserv mdag.DAGService, data []byte, opts NodeOpts) node.Node { in := bytes.NewReader(data) From f2fbfdf2917af25f61085f55c731a66e46ea09e2 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Wed, 16 Aug 2017 19:18:05 -0400 Subject: [PATCH 06/16] mfs: inherit CID prefix from from parent directory License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 2 ++ mfs/dir.go | 6 ++++++ mfs/file.go | 11 ++++++++--- mfs/ops.go | 8 ++++++-- test/sharness/t0260-sharding-flag.sh | 4 ++-- unixfs/hamt/hamt.go | 7 +++++++ unixfs/io/dirbuilder.go | 9 +++++++++ 7 files changed, 40 insertions(+), 7 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index f159ea88ad8..ace17609a30 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -890,8 +890,10 @@ func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { if !ok { return nil, fmt.Errorf("%s was not a directory", dirname) } + prefix := pdir.GetPrefix() nd := dag.NodeWithData(ft.FilePBData(nil, 0)) + nd.SetPrefix(prefix) err = pdir.AddChild(fname, nd) if err != nil { return nil, err diff --git a/mfs/dir.go b/mfs/dir.go index a489336d612..219dc4cceb0 100644 --- a/mfs/dir.go +++ b/mfs/dir.go @@ -58,6 +58,11 @@ func NewDirectory(ctx context.Context, name string, node node.Node, parent child }, nil } +// GetPrefix gets the CID prefix of the root node +func (d *Directory) GetPrefix() *cid.Prefix { + return d.dirbuilder.GetPrefix() +} + // SetPrefix sets the CID prefix func (d *Directory) SetPrefix(prefix *cid.Prefix) { d.dirbuilder.SetPrefix(prefix) @@ -299,6 +304,7 @@ func (d *Directory) Mkdir(name string) (*Directory, error) { } ndir := ft.EmptyDirNode() + ndir.SetPrefix(d.GetPrefix()) _, err = d.dserv.Add(ndir) if err != nil { diff --git a/mfs/file.go b/mfs/file.go index 85c9e59bc25..0ff8b41decb 100644 --- a/mfs/file.go +++ b/mfs/file.go @@ -27,14 +27,19 @@ type File struct { RawLeaves bool } -// NewFile returns a NewFile object with the given parameters +// NewFile returns a NewFile object with the given parameters. If the +// Cid version is non-zero RawLeaves will be enabled. func NewFile(name string, node node.Node, parent childCloser, dserv dag.DAGService) (*File, error) { - return &File{ + fi := &File{ dserv: dserv, parent: parent, name: name, node: node, - }, nil + } + if node.Cid().Prefix().Version > 0 { + fi.RawLeaves = true + } + return fi, nil } const ( diff --git a/mfs/ops.go b/mfs/ops.go index a086e86025e..5b72adcadc3 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -129,7 +129,9 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { if err != nil { return err } - mkd.SetPrefix(r.Prefix) + if r.Prefix != nil { + mkd.SetPrefix(r.Prefix) + } fsn = mkd } else if err != nil { return err @@ -148,7 +150,9 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { return err } } - final.SetPrefix(r.Prefix) + if r.Prefix != nil { + final.SetPrefix(r.Prefix) + } if flush { err := final.Flush() diff --git a/test/sharness/t0260-sharding-flag.sh b/test/sharness/t0260-sharding-flag.sh index 8be494be605..18dd6a05e3f 100755 --- a/test/sharness/t0260-sharding-flag.sh +++ b/test/sharness/t0260-sharding-flag.sh @@ -75,8 +75,8 @@ test_add_large_dir_v1() { ' } -# this hash implies both the directory and the leaf entries are CIDv1 -SHARDEDV1="zdj7WX91spg4DsnNpvoBLjyjXUGgcTTWavygBbSifpmJdgPUA" +# this hash implies the directory is CIDv1 and leaf entries are CIDv1 and raw +SHARDEDV1="zdj7WY8aNcxF49q1ZpFXfchNmbswnUxiVDVjmrHb53xRM8W4C" test_add_large_dir_v1 "$SHARDEDV1" test_launch_ipfs_daemon diff --git a/unixfs/hamt/hamt.go b/unixfs/hamt/hamt.go index a360c37c2d1..bd280930180 100644 --- a/unixfs/hamt/hamt.go +++ b/unixfs/hamt/hamt.go @@ -121,6 +121,7 @@ func NewHamtFromDag(dserv dag.DAGService, nd node.Node) (*HamtShard, error) { ds.children = make([]child, len(pbnd.Links())) ds.bitfield = new(big.Int).SetBytes(pbd.GetData()) ds.hashFunc = pbd.GetHashType() + ds.prefix = &ds.nd.Prefix return ds, nil } @@ -130,6 +131,11 @@ func (ds *HamtShard) SetPrefix(prefix *cid.Prefix) { ds.prefix = prefix } +// GetPrefix gets the CID Prefix, may be nil if unset +func (ds *HamtShard) Prefix() *cid.Prefix { + return ds.prefix +} + // Node serializes the HAMT structure into a merkledag node with unixfs formatting func (ds *HamtShard) Node() (node.Node, error) { out := new(dag.ProtoNode) @@ -500,6 +506,7 @@ func (ds *HamtShard) modifyValue(ctx context.Context, hv *hashBits, key string, if err != nil { return err } + ns.prefix = ds.prefix chhv := &hashBits{ b: hash([]byte(child.key)), consumed: hv.consumed, diff --git a/unixfs/io/dirbuilder.go b/unixfs/io/dirbuilder.go index 76ec34faab9..9ca587e2c21 100644 --- a/unixfs/io/dirbuilder.go +++ b/unixfs/io/dirbuilder.go @@ -115,6 +115,7 @@ func (d *Directory) switchToSharding(ctx context.Context) error { if err != nil { return err } + s.SetPrefix(&d.dirnode.Prefix) d.shard = s for _, lnk := range d.dirnode.Links() { @@ -192,3 +193,11 @@ func (d *Directory) GetNode() (node.Node, error) { return d.shard.Node() } + +func (d *Directory) GetPrefix() *cid.Prefix { + if d.shard == nil { + return &d.dirnode.Prefix + } + + return d.shard.Prefix() +} From 18a6344bddec304e739601d3e27bfeaec0dac08a Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Sun, 13 Aug 2017 14:12:39 -0400 Subject: [PATCH 07/16] Add --cid-version and --hash-fun option to files API License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 76 +++++++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 6 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index ace17609a30..9c2fc2d9197 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -18,8 +18,10 @@ import ( ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" + mh "gx/ipfs/QmU9a9NV9RdPNwZQDYd5uKsm6N6LJLSvLbywDDYFbaaC6P/go-multihash" ) var log = logging.Logger("cmds/files") @@ -39,11 +41,21 @@ of consistency guarantees. If the daemon is unexpectedly killed before running 'ipfs files flush' on the files in question, then data may be lost. This also applies to running 'ipfs repo gc' concurrently with '--flush=false' operations. + +The --cid-version and --hash-fun option only apply to newly created files +and directories. If not specified these proprieties are inhertied +from the parent directory. `, }, Options: []cmds.Option{ cmds.BoolOption("f", "flush", "Flush target and ancestors after write.").Default(true), cmds.BoolOption("raw-leaves", "Use raw blocks for newly created leaf nodes. (experimental)"), + cmds.IntOption("cid-version", "cid-ver", "Cid version. Non-zero value will change default of 'raw-leaves' to true. (experimental)"), + cmds.StringOption("hash-fun", "Hash function to use. Will set Cid version to 1 if used. (experimental)"), + // ^^fixme: can't use just "hash" as the option name as the + // conflicts with "--hash" usage by the stat command, this is + // unfortunate as it creates an inconsistency with the "add" + // that uses "hash" }, Subcommands: map[string]*cmds.Command{ "read": FilesReadCmd, @@ -599,7 +611,13 @@ stat' on the file or any of its ancestors. create, _, _ := req.Option("create").Bool() trunc, _, _ := req.Option("truncate").Bool() flush, _, _ := req.Option("flush").Bool() - rawLeaves, _, _ := req.Option("raw-leaves").Bool() + rawLeaves, rawLeavesDef, _ := req.Option("raw-leaves").Bool() + + prefix, err := getPrefix(req) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } nd, err := req.InvocContext().GetNode() if err != nil { @@ -617,12 +635,14 @@ stat' on the file or any of its ancestors. return } - fi, err := getFileHandle(nd.FilesRoot, path, create) + fi, err := getFileHandle(nd.FilesRoot, path, create, prefix) if err != nil { res.SetError(err, cmds.ErrNormal) return } - fi.RawLeaves = rawLeaves + if rawLeavesDef { + fi.RawLeaves = rawLeaves + } wfd, err := fi.Open(mfs.OpenWriteOnly, flush) if err != nil { @@ -719,7 +739,21 @@ Examples: flush, _, _ := req.Option("flush").Bool() - err = mfs.Mkdir(n.FilesRoot, dirtomake, dashp, flush) + prefix, err := getPrefix(req) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + root := n.FilesRoot + if prefix != nil { + // FIXME: This is ugly and may not be correct either + // -- kevina + newRoot := *root + root = &newRoot + root.Prefix = prefix + } + + err = mfs.Mkdir(root, dirtomake, dashp, flush) if err != nil { res.SetError(err, cmds.ErrNormal) return @@ -863,8 +897,36 @@ Remove files or directories. }, } -func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { +func getPrefix(req cmds.Request) (*cid.Prefix, error) { + cidVer, cidVerSet, _ := req.Option("cid-version").Int() + hashFunStr, hashFunSet, _ := req.Option("hash-fun").String() + + if !cidVerSet && !hashFunSet { + return nil, nil + } + + if hashFunSet && cidVer == 0 { + cidVer = 1 + } + + prefix, err := dag.PrefixForCidVersion(cidVer) + if err != nil { + return nil, err + } + if hashFunSet { + hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] + if !ok { + return nil, fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr)) + } + prefix.MhType = hashFunCode + prefix.MhLength = -1 + } + + return &prefix, nil +} + +func getFileHandle(r *mfs.Root, path string, create bool, prefix *cid.Prefix) (*mfs.File, error) { target, err := mfs.Lookup(r, path) switch err { case nil: @@ -890,7 +952,9 @@ func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { if !ok { return nil, fmt.Errorf("%s was not a directory", dirname) } - prefix := pdir.GetPrefix() + if prefix == nil { + prefix = pdir.GetPrefix() + } nd := dag.NodeWithData(ft.FilePBData(nil, 0)) nd.SetPrefix(prefix) From 63ddffdb5d1d070b48765cdad4239130cde9f816 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Sun, 13 Aug 2017 04:57:48 -0400 Subject: [PATCH 08/16] Add "files update" command. License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 82 ++++++++++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 9 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index 9c2fc2d9197..752c2c46c85 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -58,15 +58,16 @@ from the parent directory. // that uses "hash" }, Subcommands: map[string]*cmds.Command{ - "read": FilesReadCmd, - "write": FilesWriteCmd, - "mv": FilesMvCmd, - "cp": FilesCpCmd, - "ls": FilesLsCmd, - "mkdir": FilesMkdirCmd, - "stat": FilesStatCmd, - "rm": FilesRmCmd, - "flush": FilesFlushCmd, + "read": FilesReadCmd, + "write": FilesWriteCmd, + "mv": FilesMvCmd, + "cp": FilesCpCmd, + "ls": FilesLsCmd, + "mkdir": FilesMkdirCmd, + "stat": FilesStatCmd, + "rm": FilesRmCmd, + "flush": FilesFlushCmd, + "update": FilesUpdateCmd, }, } @@ -793,6 +794,69 @@ are run with the '--flush=false'. }, } +var FilesUpdateCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Change the cid version of hash function of the root node of a given path.", + ShortDescription: ` +Flush a given path to disk. This is only useful when other commands +are run with the '--flush=false'. +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", false, false, "Path to flush. Default: '/'."), + }, + Run: func(req cmds.Request, res cmds.Response) { + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path := "/" + if len(req.Arguments()) > 0 { + path = req.Arguments()[0] + } + + flush, _, _ := req.Option("flush").Bool() + + prefix, err := getPrefix(req) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + err = updatePath(nd.FilesRoot, path, prefix, flush) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +func updatePath(rt *mfs.Root, pth string, prefix *cid.Prefix, flush bool) error { + if prefix == nil { + return nil + } + + nd, err := mfs.Lookup(rt, pth) + if err != nil { + return err + } + + switch n := nd.(type) { + case *mfs.Directory: + n.SetPrefix(prefix) + default: + return fmt.Errorf("can only update directories") + } + + if flush { + nd.Flush() + } + + return nil +} + var FilesRmCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Remove a file.", From 47c198ceb826f7b49b0681880b8e3554a1f6ee0c Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Wed, 16 Aug 2017 17:24:59 -0400 Subject: [PATCH 09/16] Enhance sharness tests. License: MIT Signed-off-by: Kevin Atkinson --- test/sharness/t0250-files-api.sh | 101 ++++++++++++++++++++++++++----- 1 file changed, 86 insertions(+), 15 deletions(-) diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index be993b9ee53..70e3bf71f52 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -46,11 +46,15 @@ verify_dir_contents() { } test_sharding() { - test_expect_success "make a directory" ' - ipfs files mkdir /foo + local EXTRA ARGS + EXTRA=$1 + ARGS=$2 # only applied to the initial directory + + test_expect_success "make a directory $EXTRA" ' + ipfs files $ARGS mkdir /foo ' - test_expect_success "can make 100 files in a directory" ' + test_expect_success "can make 100 files in a directory $EXTRA" ' printf "" > list_exp_raw for i in `seq 100` do @@ -59,33 +63,37 @@ test_sharding() { done ' - test_expect_success "listing works" ' + test_expect_success "listing works $EXTRA" ' ipfs files ls /foo |sort > list_out && sort list_exp_raw > list_exp && test_cmp list_exp list_out ' - test_expect_success "can read a file from sharded directory" ' + test_expect_success "can read a file from sharded directory $EXTRA" ' ipfs files read /foo/file65 > file_out && echo "65" > file_exp && test_cmp file_out file_exp ' - test_expect_success "can pin a file from sharded directory" ' + test_expect_success "can pin a file from sharded directory $EXTRA" ' ipfs files stat --hash /foo/file42 > pin_file_hash && ipfs pin add < pin_file_hash > pin_hash ' - test_expect_success "can unpin a file from sharded directory" ' + test_expect_success "can unpin a file from sharded directory $EXTRA" ' read -r _ HASH _ < pin_hash && ipfs pin rm $HASH ' - test_expect_success "output object was really sharded" ' + test_expect_success "output object was really sharded and has correct hash $EXTRA" ' ipfs files stat --hash /foo > expected_foo_hash && - echo QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se > actual_foo_hash && + echo $SHARD_HASH > actual_foo_hash && test_cmp expected_foo_hash actual_foo_hash ' + + test_expect_success "clean up $EXTRA" ' + ipfs files rm -r /foo + ' } test_files_api() { @@ -429,6 +437,13 @@ test_files_api() { test_cmp root_hash_exp root_hash ' + test_expect_success "/cats hash looks good $EXTRA" ' + export EXP_CATS_HASH="$CATS_HASH" && + echo $EXP_CATS_HASH > cats_hash_exp && + ipfs files stat --hash /cats > cats_hash + test_cmp cats_hash_exp cats_hash + ' + test_expect_success "flush root succeeds $EXTRA" ' ipfs files $ARGS flush / ' @@ -466,6 +481,12 @@ test_files_api() { test_cmp file_out file_exp ' + test_expect_success "file hash correct $EXTRA" ' + echo $TRUNC_HASH > filehash_expected && + ipfs files $ARGS stat --hash /cats > filehash && + test_cmp filehash_expected filehash + ' + test_expect_success "cleanup $EXTRA" ' ipfs files $ARGS rm /cats ' @@ -555,21 +576,65 @@ tests_for_files_api() { create_files ' ROOT_HASH=QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt + CATS_HASH=Qma88m8ErTGkZHbBWGqy1C7VmEmX8wwNDWNpGyCaNmEgwC FILE_HASH=QmQdQt9qooenjeaNhiKHF3hBvmNteB4MQBtgu3jxgf9c7i + TRUNC_HASH=QmdaQZbLwK5ykweGdCVovNnvBom7QhikovDUVqTPHQG4L8 test_files_api "($EXTRA)" test_expect_success "can create some files for testing with raw-leaves ($extra)" ' create_files --raw-leaves ' - ROOT_HASH=QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ - test_files_api "($EXTRA, partial raw-leaves)" - test_expect_success "can create some files for testing with raw-leaves ($extra)" ' - create_files --raw-leaves - ' + if [ "$EXTRA" = "offline" ]; then + ROOT_HASH=QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ + CATS_HASH=QmPhPkmtUGGi8ySPHoPu1qbfryLJKKq1GYxpgLyyCruvGe + test_files_api "($EXTRA, partial raw-leaves)" + fi + ROOT_HASH=QmW3dMSU6VNd1mEdpk9S3ZYRuR1YwwoXjGaZhkyK6ru9YU + CATS_HASH=QmPqWDEg7NoWRX8Y4vvYjZtmdg5umbfsTQ9zwNr12JoLmt FILE_HASH=QmRCgHeoKxCqK2Es6M6nPUDVWz19yNQPnsXGsXeuTkSKpN + TRUNC_HASH=QmRFJEKWF5A5FyFYZgNhusLw2UziW9zBKYr4huyHjzcB6o test_files_api "($EXTRA, raw-leaves)" --raw-leaves + + ROOT_HASH=QmageRWxC7wWjPv5p36NeAgBAiFdBHaNfxAehBSwzNech2 + CATS_HASH=zdj7WkEzPLNAr5TYJSQC8CFcBjLvWFfGdx6kaBrJXnBguwWeX + FILE_HASH=zdj7WYHvf5sBRgSBjYnq64QFr449CCbgupXfBvoYL3aHC1DzJ + TRUNC_HASH=zdj7WYLYbka6Ydg8gZUJRLKnFBVehCADhQKBsFbNiMxZSB5Gj + if [ "$EXTRA" = "offline" ]; then + test_files_api "($EXTRA, cidv1)" --cid-version=1 + fi + + test_expect_success "can update root hash to cidv1" ' + ipfs files --cid-version=1 update / && + echo zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi > hash_expect && + ipfs files stat --hash / > hash_actual && + test_cmp hash_expect hash_actual + ' + + ROOT_HASH=zdj7Whmtnx23bR7c7E1Yn3zWYWjnvT4tpzWYGaBMyqcopDWrx + test_files_api "($EXTRA, cidv1 root)" + + if [ "$EXTRA" = "offline" ]; then + test_expect_success "can update root hash to blake2b-256" ' + ipfs files --hash-fun=blake2b-256 update / && + echo zDMZof1kvswQMT8txrmnb3JGBuna6qXCTry6hSifrkZEd6VmHbBm > hash_expect && + ipfs files stat --hash / > hash_actual && + test_cmp hash_expect hash_actual + ' + ROOT_HASH=zDMZof1kxEsAwSgCZsGQRVcHCMtHLjkUQoiZUbZ87erpPQJGUeW8 + CATS_HASH=zDMZof1kuAhr3zBkxq48V7o9HJZCTVyu1Wd9wnZtVcPJLW8xnGft + FILE_HASH=zDMZof1kxbB9CvxgRioBzESbGnZUxtSCsZ18H1EUkxDdWt1DYEkK + TRUNC_HASH=zDMZof1kxXqKdVsVo231qVdN3hCTF5a34UuQZpzmm5K7CbRJ4u2S + test_files_api "($EXTRA, blake2b-256 root)" + fi + + test_expect_success "can update root hash back to cidv0" ' + ipfs files --cid-version=0 update / && + echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > hash_expect && + ipfs files stat --hash / > hash_actual && + test_cmp hash_expect hash_actual + ' } tests_for_files_api "online" @@ -587,7 +652,13 @@ test_expect_success "enable sharding in config" ' ' test_launch_ipfs_daemon --offline -test_sharding + +SHARD_HASH=QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se +test_sharding "(cidv0)" + +SHARD_HASH=zdj7WZXr6vG2Ne7ZLHGEKrGyF3pHBfAViEnmH9CoyvjrFQM8E +test_sharding "(cidv1 root)" "--cid-version=1" + test_kill_ipfs_daemon test_done From fd7b72fa169dc905741de0b5d51f820610a26d35 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Fri, 8 Sep 2017 20:09:43 -0400 Subject: [PATCH 10/16] Fix help text for "update" command. License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index 752c2c46c85..645f24aa8a7 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -796,14 +796,13 @@ are run with the '--flush=false'. var FilesUpdateCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Change the cid version of hash function of the root node of a given path.", + Tagline: "Change the cid version or hash function of the root node of a given path.", ShortDescription: ` -Flush a given path to disk. This is only useful when other commands -are run with the '--flush=false'. +Change the cid version or hash function of the root node of a given path. `, }, Arguments: []cmds.Argument{ - cmds.StringArg("path", false, false, "Path to flush. Default: '/'."), + cmds.StringArg("path", false, false, "Path to change. Default: '/'."), }, Run: func(req cmds.Request, res cmds.Response) { nd, err := req.InvocContext().GetNode() From d29930c03193291a51fcae56f8cfde404018ff76 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Fri, 8 Sep 2017 21:00:55 -0400 Subject: [PATCH 11/16] Eliminate Prefix field from MFS root, use MkdirOpts. License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 13 +++++-------- core/coreunix/add.go | 14 +++++++++++--- mfs/mfs_test.go | 4 ++-- mfs/ops.go | 26 +++++++++++++++++--------- mfs/system.go | 3 --- 5 files changed, 35 insertions(+), 25 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index 645f24aa8a7..a39a0767b0e 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -746,15 +746,12 @@ Examples: return } root := n.FilesRoot - if prefix != nil { - // FIXME: This is ugly and may not be correct either - // -- kevina - newRoot := *root - root = &newRoot - root.Prefix = prefix - } - err = mfs.Mkdir(root, dirtomake, dashp, flush) + err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{ + Mkparents: dashp, + Flush: flush, + Prefix: prefix, + }) if err != nil { res.SetError(err, cmds.ErrNormal) return diff --git a/core/coreunix/add.go b/core/coreunix/add.go index 9617fc46c2b..eca42b0a838 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -119,7 +119,6 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) { rnode := unixfs.EmptyDirNode() rnode.SetPrefix(adder.Prefix) mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil) - mr.Prefix = adder.Prefix if err != nil { return nil, err } @@ -398,7 +397,12 @@ func (adder *Adder) addNode(node node.Node, path string) error { } dir := gopath.Dir(path) if dir != "." { - if err := mfs.Mkdir(mr, dir, true, false); err != nil { + opts := mfs.MkdirOpts{ + Mkparents: true, + Flush: false, + Prefix: adder.Prefix, + } + if err := mfs.Mkdir(mr, dir, opts); err != nil { return err } } @@ -496,7 +500,11 @@ func (adder *Adder) addDir(dir files.File) error { if err != nil { return err } - err = mfs.Mkdir(mr, dir.FileName(), true, false) + err = mfs.Mkdir(mr, dir.FileName(), mfs.MkdirOpts{ + Mkparents: true, + Flush: false, + Prefix: adder.Prefix, + }) if err != nil { return err } diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index 09e9de00dd5..bebfa8d308d 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -735,7 +735,7 @@ func TestMfsHugeDir(t *testing.T) { _, rt := setupRoot(ctx, t) for i := 0; i < 10000; i++ { - err := Mkdir(rt, fmt.Sprintf("/dir%d", i), false, false) + err := Mkdir(rt, fmt.Sprintf("/dir%d", i), MkdirOpts{Mkparents: false, Flush: false}) if err != nil { t.Fatal(err) } @@ -747,7 +747,7 @@ func TestMkdirP(t *testing.T) { defer cancel() _, rt := setupRoot(ctx, t) - err := Mkdir(rt, "/a/b/c/d/e/f", true, true) + err := Mkdir(rt, "/a/b/c/d/e/f", MkdirOpts{Mkparents: true, Flush: true}) if err != nil { t.Fatal(err) } diff --git a/mfs/ops.go b/mfs/ops.go index 5b72adcadc3..49ce398d44f 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -9,6 +9,7 @@ import ( path "github.com/ipfs/go-ipfs/path" + cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid" node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format" ) @@ -97,9 +98,16 @@ func PutNode(r *Root, path string, nd node.Node) error { return pdir.AddChild(filename, nd) } +// MkdirOpts is used by Mkdir +type MkdirOpts struct { + Mkparents bool + Flush bool + Prefix *cid.Prefix +} + // Mkdir creates a directory at 'path' under the directory 'd', creating // intermediary directories as needed if 'mkparents' is set to true -func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { +func Mkdir(r *Root, pth string, opts MkdirOpts) error { if pth == "" { return fmt.Errorf("no path given to Mkdir") } @@ -115,7 +123,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { if len(parts) == 0 { // this will only happen on 'mkdir /' - if mkparents { + if opts.Mkparents { return nil } return fmt.Errorf("cannot create directory '/': Already exists") @@ -124,13 +132,13 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { cur := r.GetValue().(*Directory) for i, d := range parts[:len(parts)-1] { fsn, err := cur.Child(d) - if err == os.ErrNotExist && mkparents { + if err == os.ErrNotExist && opts.Mkparents { mkd, err := cur.Mkdir(d) if err != nil { return err } - if r.Prefix != nil { - mkd.SetPrefix(r.Prefix) + if opts.Prefix != nil { + mkd.SetPrefix(opts.Prefix) } fsn = mkd } else if err != nil { @@ -146,15 +154,15 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { final, err := cur.Mkdir(parts[len(parts)-1]) if err != nil { - if !mkparents || err != os.ErrExist || final == nil { + if !opts.Mkparents || err != os.ErrExist || final == nil { return err } } - if r.Prefix != nil { - final.SetPrefix(r.Prefix) + if opts.Prefix != nil { + final.SetPrefix(opts.Prefix) } - if flush { + if opts.Flush { err := final.Flush() if err != nil { return err diff --git a/mfs/system.go b/mfs/system.go index 0641704cf38..fc5be0f6e2f 100644 --- a/mfs/system.go +++ b/mfs/system.go @@ -61,9 +61,6 @@ type Root struct { dserv dag.DAGService Type string - - // Prefix to use for any children created - Prefix *cid.Prefix } type PubFunc func(context.Context, *cid.Cid) error From b5b61f7527c5e35a948b43b6d1b29cfa0d8d9bee Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Sun, 10 Sep 2017 19:46:35 -0400 Subject: [PATCH 12/16] Rename "files update" to "files chcid". License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 22 +++++++++++----------- test/sharness/t0250-files-api.sh | 6 +++--- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index a39a0767b0e..d6a2d65cf11 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -58,16 +58,16 @@ from the parent directory. // that uses "hash" }, Subcommands: map[string]*cmds.Command{ - "read": FilesReadCmd, - "write": FilesWriteCmd, - "mv": FilesMvCmd, - "cp": FilesCpCmd, - "ls": FilesLsCmd, - "mkdir": FilesMkdirCmd, - "stat": FilesStatCmd, - "rm": FilesRmCmd, - "flush": FilesFlushCmd, - "update": FilesUpdateCmd, + "read": FilesReadCmd, + "write": FilesWriteCmd, + "mv": FilesMvCmd, + "cp": FilesCpCmd, + "ls": FilesLsCmd, + "mkdir": FilesMkdirCmd, + "stat": FilesStatCmd, + "rm": FilesRmCmd, + "flush": FilesFlushCmd, + "chcid": FilesChcidCmd, }, } @@ -791,7 +791,7 @@ are run with the '--flush=false'. }, } -var FilesUpdateCmd = &cmds.Command{ +var FilesChcidCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Change the cid version or hash function of the root node of a given path.", ShortDescription: ` diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index 70e3bf71f52..cd9981c96d0 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -606,7 +606,7 @@ tests_for_files_api() { fi test_expect_success "can update root hash to cidv1" ' - ipfs files --cid-version=1 update / && + ipfs files --cid-version=1 chcid / && echo zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual @@ -617,7 +617,7 @@ tests_for_files_api() { if [ "$EXTRA" = "offline" ]; then test_expect_success "can update root hash to blake2b-256" ' - ipfs files --hash-fun=blake2b-256 update / && + ipfs files --hash-fun=blake2b-256 chcid / && echo zDMZof1kvswQMT8txrmnb3JGBuna6qXCTry6hSifrkZEd6VmHbBm > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual @@ -630,7 +630,7 @@ tests_for_files_api() { fi test_expect_success "can update root hash back to cidv0" ' - ipfs files --cid-version=0 update / && + ipfs files --cid-version=0 chcid / && echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual From 00a3b1df213dd4f09931b7cf974668505f1657a2 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Wed, 18 Oct 2017 13:35:00 -0400 Subject: [PATCH 13/16] Address C.R. in t0250-files-api.sh. License: MIT Signed-off-by: Kevin Atkinson --- test/sharness/t0250-files-api.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index cd9981c96d0..338facf5636 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -572,7 +572,7 @@ tests_for_files_api() { local EXTRA EXTRA=$1 - test_expect_success "can create some files for testing ($extra)" ' + test_expect_success "can create some files for testing ($EXTRA)" ' create_files ' ROOT_HASH=QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt @@ -581,7 +581,7 @@ tests_for_files_api() { TRUNC_HASH=QmdaQZbLwK5ykweGdCVovNnvBom7QhikovDUVqTPHQG4L8 test_files_api "($EXTRA)" - test_expect_success "can create some files for testing with raw-leaves ($extra)" ' + test_expect_success "can create some files for testing with raw-leaves ($EXTRA)" ' create_files --raw-leaves ' From 798afedf8d11af78baeb5bfd50e028d8bd33d04f Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Thu, 19 Oct 2017 11:53:37 -0400 Subject: [PATCH 14/16] Make --hash and related option local to sub-commands that use them. License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 35 ++++--- test/sharness/t0250-files-api.sh | 163 ++++++++++++++++--------------- 2 files changed, 105 insertions(+), 93 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index d6a2d65cf11..ef7903e7de4 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -41,21 +41,10 @@ of consistency guarantees. If the daemon is unexpectedly killed before running 'ipfs files flush' on the files in question, then data may be lost. This also applies to running 'ipfs repo gc' concurrently with '--flush=false' operations. - -The --cid-version and --hash-fun option only apply to newly created files -and directories. If not specified these proprieties are inhertied -from the parent directory. `, }, Options: []cmds.Option{ cmds.BoolOption("f", "flush", "Flush target and ancestors after write.").Default(true), - cmds.BoolOption("raw-leaves", "Use raw blocks for newly created leaf nodes. (experimental)"), - cmds.IntOption("cid-version", "cid-ver", "Cid version. Non-zero value will change default of 'raw-leaves' to true. (experimental)"), - cmds.StringOption("hash-fun", "Hash function to use. Will set Cid version to 1 if used. (experimental)"), - // ^^fixme: can't use just "hash" as the option name as the - // conflicts with "--hash" usage by the stat command, this is - // unfortunate as it creates an inconsistency with the "add" - // that uses "hash" }, Subcommands: map[string]*cmds.Command{ "read": FilesReadCmd, @@ -71,6 +60,9 @@ from the parent directory. }, } +var cidVersionOption = cmds.IntOption("cid-version", "cid-ver", "Cid version to use. (experimental)") +var hashOption = cmds.StringOption("hash", "Hash function to use. Will set Cid version to 1 if used. (experimental)") + var formatError = errors.New("Format was set by multiple options. Only one format option is allowed") var FilesStatCmd = &cmds.Command{ @@ -576,6 +568,13 @@ a beginning offset to write to. The entire length of the input will be written. If the '--create' option is specified, the file will be created if it does not exist. Nonexistant intermediate directories will not be created. +Newly created files will have the same CID version and hash function of the +parent directory unless the --cid-version and --hash options are used. + +Newly created leaves will be in the legacy format (Protobuf) if the +CID version is 0, or raw is the CID version is non-zero. Use of the +--raw-leaves option will override this behavior. + If the '--flush' option is set to false, changes will not be propogated to the merkledag root. This can make operations much faster when doing a large number of writes to a deeper directory structure. @@ -601,6 +600,9 @@ stat' on the file or any of its ancestors. cmds.BoolOption("create", "e", "Create the file if it does not exist."), cmds.BoolOption("truncate", "t", "Truncate the file to size zero before writing."), cmds.IntOption("count", "n", "Maximum number of bytes to read."), + cmds.BoolOption("raw-leaves", "Use raw blocks for newly created leaf nodes. (experimental)"), + cidVersionOption, + hashOption, }, Run: func(req cmds.Request, res cmds.Response) { path, err := checkPath(req.Arguments()[0]) @@ -709,6 +711,9 @@ var FilesMkdirCmd = &cmds.Command{ ShortDescription: ` Create the directory if it does not already exist. +The directory will have the same CID version and hash function of the +parent directory unless the --cid-version and --hash options are used. + NOTE: All paths must be absolute. Examples: @@ -723,6 +728,8 @@ Examples: }, Options: []cmds.Option{ cmds.BoolOption("parents", "p", "No error if existing, make parent directories as needed."), + cidVersionOption, + hashOption, }, Run: func(req cmds.Request, res cmds.Response) { n, err := req.InvocContext().GetNode() @@ -801,6 +808,10 @@ Change the cid version or hash function of the root node of a given path. Arguments: []cmds.Argument{ cmds.StringArg("path", false, false, "Path to change. Default: '/'."), }, + Options: []cmds.Option{ + cidVersionOption, + hashOption, + }, Run: func(req cmds.Request, res cmds.Response) { nd, err := req.InvocContext().GetNode() if err != nil { @@ -959,7 +970,7 @@ Remove files or directories. func getPrefix(req cmds.Request) (*cid.Prefix, error) { cidVer, cidVerSet, _ := req.Option("cid-version").Int() - hashFunStr, hashFunSet, _ := req.Option("hash-fun").String() + hashFunStr, hashFunSet, _ := req.Option("hash").String() if !cidVerSet && !hashFunSet { return nil, nil diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index 338facf5636..7f1f08a68f4 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -51,7 +51,7 @@ test_sharding() { ARGS=$2 # only applied to the initial directory test_expect_success "make a directory $EXTRA" ' - ipfs files $ARGS mkdir /foo + ipfs files mkdir $ARGS /foo ' test_expect_success "can make 100 files in a directory $EXTRA" ' @@ -97,16 +97,17 @@ test_sharding() { } test_files_api() { - local EXTRA ARGS + local EXTRA ARGS RAW_LEAVES EXTRA=$1 ARGS=$2 + RAW_LEAVES=$3 test_expect_success "can mkdir in root $EXTRA" ' - ipfs files $ARGS mkdir /cats + ipfs files mkdir $ARGS /cats ' test_expect_success "'files ls' lists root by default $EXTRA" ' - ipfs files $ARGS ls >actual && + ipfs files ls >actual && echo "cats" >expected && test_cmp expected actual ' @@ -121,7 +122,7 @@ test_files_api() { # we do verification of stat formatting now as we depend on it test_expect_success "stat works $EXTRA" ' - ipfs files $ARGS stat / >stat + ipfs files stat / >stat ' test_expect_success "hash is first line of stat $EXTRA" ' @@ -129,41 +130,41 @@ test_files_api() { ' test_expect_success "stat --hash gives only hash $EXTRA" ' - ipfs files $ARGS stat --hash / >actual && + ipfs files stat --hash / >actual && head -n1 stat >expected && test_cmp expected actual ' test_expect_success "stat with multiple format options should fail $EXTRA" ' - test_must_fail ipfs files $ARGS stat --hash --size / + test_must_fail ipfs files stat --hash --size / ' test_expect_success "compare hash option with format $EXTRA" ' - ipfs files $ARGS stat --hash / >expected && - ipfs files $ARGS stat --format='"'"''"'"' / >actual && + ipfs files stat --hash / >expected && + ipfs files stat --format='"'"''"'"' / >actual && test_cmp expected actual ' test_expect_success "compare size option with format $EXTRA" ' - ipfs files $ARGS stat --size / >expected && - ipfs files $ARGS stat --format='"'"''"'"' / >actual && + ipfs files stat --size / >expected && + ipfs files stat --format='"'"''"'"' / >actual && test_cmp expected actual ' test_expect_success "check root hash $EXTRA" ' - ipfs files $ARGS stat --hash / > roothash + ipfs files stat --hash / > roothash ' test_expect_success "cannot mkdir / $EXTRA" ' - test_expect_code 1 ipfs files $ARGS mkdir / + test_expect_code 1 ipfs files mkdir $ARGS / ' test_expect_success "check root hash was not changed $EXTRA" ' - ipfs files $ARGS stat --hash / > roothashafter && + ipfs files stat --hash / > roothashafter && test_cmp roothash roothashafter ' test_expect_success "can put files into directory $EXTRA" ' - ipfs files $ARGS cp /ipfs/$FILE1 /cats/file1 + ipfs files cp /ipfs/$FILE1 /cats/file1 ' test_expect_success "file shows up in directory $EXTRA" ' @@ -172,12 +173,12 @@ test_files_api() { test_expect_success "file has correct hash and size in directory $EXTRA" ' echo "file1 $FILE1 4" > ls_l_expected && - ipfs files $ARGS ls -l /cats > ls_l_actual && + ipfs files ls -l /cats > ls_l_actual && test_cmp ls_l_expected ls_l_actual ' test_expect_success "can read file $EXTRA" ' - ipfs files $ARGS read /cats/file1 > file1out + ipfs files read /cats/file1 > file1out ' test_expect_success "output looks good $EXTRA" ' @@ -186,7 +187,7 @@ test_files_api() { ' test_expect_success "can put another file into root $EXTRA" ' - ipfs files $ARGS cp /ipfs/$FILE2 /file2 + ipfs files cp /ipfs/$FILE2 /file2 ' test_expect_success "file shows up in root $EXTRA" ' @@ -194,7 +195,7 @@ test_files_api() { ' test_expect_success "can read file $EXTRA" ' - ipfs files $ARGS read /file2 > file2out + ipfs files read /file2 > file2out ' test_expect_success "output looks good $EXTRA" ' @@ -203,7 +204,7 @@ test_files_api() { ' test_expect_success "can make deep directory $EXTRA" ' - ipfs files $ARGS mkdir -p /cats/this/is/a/dir + ipfs files mkdir $ARGS -p /cats/this/is/a/dir ' test_expect_success "directory was created correctly $EXTRA" ' @@ -216,11 +217,11 @@ test_files_api() { ' test_expect_success "can copy file into new dir $EXTRA" ' - ipfs files $ARGS cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 + ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 ' test_expect_success "can read file $EXTRA" ' - ipfs files $ARGS read /cats/this/is/a/dir/file3 > output + ipfs files read /cats/this/is/a/dir/file3 > output ' test_expect_success "output looks good $EXTRA" ' @@ -233,7 +234,7 @@ test_files_api() { ' test_expect_success "can remove file $EXTRA" ' - ipfs files $ARGS rm /cats/this/is/a/dir/file3 + ipfs files rm /cats/this/is/a/dir/file3 ' test_expect_success "file no longer appears $EXTRA" ' @@ -241,7 +242,7 @@ test_files_api() { ' test_expect_success "can remove dir $EXTRA" ' - ipfs files $ARGS rm -r /cats/this/is/a/dir + ipfs files rm -r /cats/this/is/a/dir ' test_expect_success "dir no longer appears $EXTRA" ' @@ -249,7 +250,7 @@ test_files_api() { ' test_expect_success "can remove file from root $EXTRA" ' - ipfs files $ARGS rm /file2 + ipfs files rm /file2 ' test_expect_success "file no longer appears $EXTRA" ' @@ -257,22 +258,22 @@ test_files_api() { ' test_expect_success "check root hash $EXTRA" ' - ipfs files $ARGS stat --hash / > roothash + ipfs files stat --hash / > roothash ' test_expect_success "cannot remove root $EXTRA" ' - test_expect_code 1 ipfs files $ARGS rm -r / + test_expect_code 1 ipfs files rm -r / ' test_expect_success "check root hash was not changed $EXTRA" ' - ipfs files $ARGS stat --hash / > roothashafter && + ipfs files stat --hash / > roothashafter && test_cmp roothash roothashafter ' # test read options test_expect_success "read from offset works $EXTRA" ' - ipfs files $ARGS read -o 1 /cats/file1 > output + ipfs files read -o 1 /cats/file1 > output ' test_expect_success "output looks good $EXTRA" ' @@ -281,7 +282,7 @@ test_files_api() { ' test_expect_success "read with size works $EXTRA" ' - ipfs files $ARGS read -n 2 /cats/file1 > output + ipfs files read -n 2 /cats/file1 > output ' test_expect_success "output looks good $EXTRA" ' @@ -290,11 +291,11 @@ test_files_api() { ' test_expect_success "cannot read from negative offset $EXTRA" ' - test_expect_code 1 ipfs files $ARGS read --offset -3 /cats/file1 + test_expect_code 1 ipfs files read --offset -3 /cats/file1 ' test_expect_success "read from offset 0 works $EXTRA" ' - ipfs files $ARGS read --offset 0 /cats/file1 > output + ipfs files read --offset 0 /cats/file1 > output ' test_expect_success "output looks good $EXTRA" ' @@ -303,7 +304,7 @@ test_files_api() { ' test_expect_success "read last byte works $EXTRA" ' - ipfs files $ARGS read --offset 2 /cats/file1 > output + ipfs files read --offset 2 /cats/file1 > output ' test_expect_success "output looks good $EXTRA" ' @@ -312,7 +313,7 @@ test_files_api() { ' test_expect_success "offset past end of file fails $EXTRA" ' - test_expect_code 1 ipfs files $ARGS read --offset 5 /cats/file1 + test_expect_code 1 ipfs files read --offset 5 /cats/file1 ' test_expect_success "cannot read negative count bytes $EXTRA" ' @@ -320,7 +321,7 @@ test_files_api() { ' test_expect_success "reading zero bytes prints nothing $EXTRA" ' - ipfs files $ARGS read --count 0 /cats/file1 > output + ipfs files read --count 0 /cats/file1 > output ' test_expect_success "output looks good $EXTRA" ' @@ -329,7 +330,7 @@ test_files_api() { ' test_expect_success "count > len(file) prints entire file $EXTRA" ' - ipfs files $ARGS read --count 200 /cats/file1 > output + ipfs files read --count 200 /cats/file1 > output ' test_expect_success "output looks good $EXTRA" ' @@ -341,7 +342,7 @@ test_files_api() { test_expect_success "can write file $EXTRA" ' echo "ipfs rocks" > tmpfile && - cat tmpfile | ipfs files $ARGS write --create /cats/ipfs + cat tmpfile | ipfs files write $ARGS $RAW_LEAVES --create /cats/ipfs ' test_expect_success "file was created $EXTRA" ' @@ -349,44 +350,44 @@ test_files_api() { ' test_expect_success "can read file we just wrote $EXTRA" ' - ipfs files $ARGS read /cats/ipfs > output + ipfs files read /cats/ipfs > output ' test_expect_success "can write to offset $EXTRA" ' - echo "is super cool" | ipfs files $ARGS write -o 5 /cats/ipfs + echo "is super cool" | ipfs files write $ARGS $RAW_LEAVES -o 5 /cats/ipfs ' test_expect_success "file looks correct $EXTRA" ' echo "ipfs is super cool" > expected && - ipfs files $ARGS read /cats/ipfs > output && + ipfs files read /cats/ipfs > output && test_cmp expected output ' test_expect_success "file hash correct $EXTRA" ' echo $FILE_HASH > filehash_expected && - ipfs files $ARGS stat --hash /cats/ipfs > filehash && + ipfs files stat --hash /cats/ipfs > filehash && test_cmp filehash_expected filehash ' test_expect_success "cant write to negative offset $EXTRA" ' - test_expect_code 1 ipfs files $ARGS write --offset -1 /cats/ipfs < output + test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES --offset -1 /cats/ipfs < output ' test_expect_success "verify file was not changed $EXTRA" ' - ipfs files $ARGS stat --hash /cats/ipfs > afterhash && + ipfs files stat --hash /cats/ipfs > afterhash && test_cmp filehash afterhash ' test_expect_success "write new file for testing $EXTRA" ' - echo foobar | ipfs files $ARGS write --create /fun + echo foobar | ipfs files write $ARGS $RAW_LEAVES --create /fun ' test_expect_success "write to offset past end works $EXTRA" ' - echo blah | ipfs files $ARGS write --offset 50 /fun + echo blah | ipfs files write $ARGS $RAW_LEAVES --offset 50 /fun ' test_expect_success "can read file $EXTRA" ' - ipfs files $ARGS read /fun > sparse_output + ipfs files read /fun > sparse_output ' test_expect_success "output looks good $EXTRA" ' @@ -396,21 +397,21 @@ test_files_api() { ' test_expect_success "cleanup $EXTRA" ' - ipfs files $ARGS rm /fun + ipfs files rm /fun ' test_expect_success "cannot write to directory $EXTRA" ' - ipfs files $ARGS stat --hash /cats > dirhash && - test_expect_code 1 ipfs files $ARGS write /cats < output + ipfs files stat --hash /cats > dirhash && + test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES /cats < output ' test_expect_success "verify dir was not changed $EXTRA" ' - ipfs files $ARGS stat --hash /cats > afterdirhash && + ipfs files stat --hash /cats > afterdirhash && test_cmp dirhash afterdirhash ' test_expect_success "cannot write to nonexistant path $EXTRA" ' - test_expect_code 1 ipfs files $ARGS write /cats/bar/ < output + test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES /cats/bar/ < output ' test_expect_success "no new paths were created $EXTRA" ' @@ -418,7 +419,7 @@ test_files_api() { ' test_expect_success "write 'no-flush' succeeds $EXTRA" ' - echo "testing" | ipfs files $ARGS write -f=false -e /cats/walrus + echo "testing" | ipfs files write $ARGS $RAW_LEAVES -f=false -e /cats/walrus ' test_expect_success "root hash not bubbled up yet $EXTRA" ' @@ -428,7 +429,7 @@ test_files_api() { ' test_expect_success "changes bubbled up to root on inspection $EXTRA" ' - ipfs files $ARGS stat --hash / > root_hash + ipfs files stat --hash / > root_hash ' test_expect_success "root hash looks good $EXTRA" ' @@ -445,12 +446,12 @@ test_files_api() { ' test_expect_success "flush root succeeds $EXTRA" ' - ipfs files $ARGS flush / + ipfs files flush / ' # test mv test_expect_success "can mv dir $EXTRA" ' - ipfs files $ARGS mv /cats/this/is /cats/ + ipfs files mv /cats/this/is /cats/ ' test_expect_success "mv worked $EXTRA" ' @@ -459,7 +460,7 @@ test_files_api() { ' test_expect_success "cleanup, remove 'cats' $EXTRA" ' - ipfs files $ARGS rm -r /cats + ipfs files rm -r /cats ' test_expect_success "cleanup looks good $EXTRA" ' @@ -468,36 +469,36 @@ test_files_api() { # test truncating test_expect_success "create a new file $EXTRA" ' - echo "some content" | ipfs files $ARGS write --create /cats + echo "some content" | ipfs files write $ARGS $RAW_LEAVES --create /cats ' test_expect_success "truncate and write over that file $EXTRA" ' - echo "fish" | ipfs files $ARGS write --truncate /cats + echo "fish" | ipfs files write $ARGS $RAW_LEAVES --truncate /cats ' test_expect_success "output looks good $EXTRA" ' - ipfs files $ARGS read /cats > file_out && + ipfs files read /cats > file_out && echo "fish" > file_exp && test_cmp file_out file_exp ' test_expect_success "file hash correct $EXTRA" ' echo $TRUNC_HASH > filehash_expected && - ipfs files $ARGS stat --hash /cats > filehash && + ipfs files stat --hash /cats > filehash && test_cmp filehash_expected filehash ' test_expect_success "cleanup $EXTRA" ' - ipfs files $ARGS rm /cats + ipfs files rm /cats ' # test flush flags test_expect_success "mkdir --flush works $EXTRA" ' - ipfs files $ARGS mkdir --flush --parents /flushed/deep + ipfs files mkdir $ARGS --flush --parents /flushed/deep ' test_expect_success "mkdir --flush works a second time $EXTRA" ' - ipfs files $ARGS mkdir --flush --parents /flushed/deep + ipfs files mkdir $ARGS --flush --parents /flushed/deep ' test_expect_success "dir looks right $EXTRA" ' @@ -509,7 +510,7 @@ test_files_api() { ' test_expect_success "cleanup $EXTRA" ' - ipfs files $ARGS rm -r /flushed + ipfs files rm -r /flushed ' test_expect_success "child dir looks right $EXTRA" ' @@ -518,43 +519,43 @@ test_files_api() { # test for https://github.com/ipfs/go-ipfs/issues/2654 test_expect_success "create and remove dir $EXTRA" ' - ipfs files $ARGS mkdir /test_dir && - ipfs files $ARGS rm -r "/test_dir" + ipfs files mkdir $ARGS /test_dir && + ipfs files rm -r "/test_dir" ' test_expect_success "create test file $EXTRA" ' - echo "content" | ipfs files $ARGS write -e "/test_file" + echo "content" | ipfs files write $ARGS $RAW_LEAVES -e "/test_file" ' test_expect_success "copy test file onto test dir $EXTRA" ' - ipfs files $ARGS cp "/test_file" "/test_dir" + ipfs files cp "/test_file" "/test_dir" ' test_expect_success "test /test_dir $EXTRA" ' - ipfs files $ARGS stat "/test_dir" | grep -q "^Type: file" + ipfs files stat "/test_dir" | grep -q "^Type: file" ' test_expect_success "clean up /test_dir and /test_file $EXTRA" ' - ipfs files $ARGS rm -r /test_dir && - ipfs files $ARGS rm -r /test_file + ipfs files rm -r /test_dir && + ipfs files rm -r /test_file ' test_expect_success "make a directory and a file $EXTRA" ' - ipfs files $ARGS mkdir /adir && - echo "blah" | ipfs files $ARGS write --create /foobar + ipfs files mkdir $ARGS /adir && + echo "blah" | ipfs files write $ARGS $RAW_LEAVES --create /foobar ' test_expect_success "copy a file into a directory $EXTRA" ' - ipfs files $ARGS cp /foobar /adir/ + ipfs files cp /foobar /adir/ ' test_expect_success "file made it into directory $EXTRA" ' - ipfs files $ARGS ls /adir | grep foobar + ipfs files ls /adir | grep foobar ' test_expect_success "clean up $EXTRA" ' - ipfs files $ARGS rm -r /foobar && - ipfs files $ARGS rm -r /adir + ipfs files rm -r /foobar && + ipfs files rm -r /adir ' test_expect_success "root mfs entry is empty $EXTRA" ' @@ -595,7 +596,7 @@ tests_for_files_api() { CATS_HASH=QmPqWDEg7NoWRX8Y4vvYjZtmdg5umbfsTQ9zwNr12JoLmt FILE_HASH=QmRCgHeoKxCqK2Es6M6nPUDVWz19yNQPnsXGsXeuTkSKpN TRUNC_HASH=QmRFJEKWF5A5FyFYZgNhusLw2UziW9zBKYr4huyHjzcB6o - test_files_api "($EXTRA, raw-leaves)" --raw-leaves + test_files_api "($EXTRA, raw-leaves)" '' --raw-leaves ROOT_HASH=QmageRWxC7wWjPv5p36NeAgBAiFdBHaNfxAehBSwzNech2 CATS_HASH=zdj7WkEzPLNAr5TYJSQC8CFcBjLvWFfGdx6kaBrJXnBguwWeX @@ -606,7 +607,7 @@ tests_for_files_api() { fi test_expect_success "can update root hash to cidv1" ' - ipfs files --cid-version=1 chcid / && + ipfs files chcid --cid-version=1 / && echo zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual @@ -617,7 +618,7 @@ tests_for_files_api() { if [ "$EXTRA" = "offline" ]; then test_expect_success "can update root hash to blake2b-256" ' - ipfs files --hash-fun=blake2b-256 chcid / && + ipfs files chcid --hash=blake2b-256 / && echo zDMZof1kvswQMT8txrmnb3JGBuna6qXCTry6hSifrkZEd6VmHbBm > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual @@ -630,7 +631,7 @@ tests_for_files_api() { fi test_expect_success "can update root hash back to cidv0" ' - ipfs files --cid-version=0 chcid / && + ipfs files chcid / --cid-version=0 && echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > hash_expect && ipfs files stat --hash / > hash_actual && test_cmp hash_expect hash_actual From 0182e5977c2c26f044d9555aa3a19f3e7985ee95 Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Thu, 19 Oct 2017 12:09:56 -0400 Subject: [PATCH 15/16] Documentation. License: MIT Signed-off-by: Kevin Atkinson --- merkledag/node.go | 3 +++ unixfs/hamt/hamt.go | 2 +- unixfs/io/dirbuilder.go | 1 + unixfs/test/utils.go | 1 + 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/merkledag/node.go b/merkledag/node.go index fae3fa7fc14..ad021fa308d 100644 --- a/merkledag/node.go +++ b/merkledag/node.go @@ -42,7 +42,10 @@ var v1CidPrefix = cid.Prefix{ Version: 1, } +// V0CidPrefix returns a prefix for CIDv0 func V0CidPrefix() cid.Prefix { return v0CidPrefix } + +// V1CidPrefix returns a prefix for CIDv1 with the default settings func V1CidPrefix() cid.Prefix { return v1CidPrefix } // PrefixForCidVersion returns the Protobuf prefix for a given CID version diff --git a/unixfs/hamt/hamt.go b/unixfs/hamt/hamt.go index bd280930180..fecf23b46a7 100644 --- a/unixfs/hamt/hamt.go +++ b/unixfs/hamt/hamt.go @@ -131,7 +131,7 @@ func (ds *HamtShard) SetPrefix(prefix *cid.Prefix) { ds.prefix = prefix } -// GetPrefix gets the CID Prefix, may be nil if unset +// Prefix gets the CID Prefix, may be nil if unset func (ds *HamtShard) Prefix() *cid.Prefix { return ds.prefix } diff --git a/unixfs/io/dirbuilder.go b/unixfs/io/dirbuilder.go index 9ca587e2c21..f86d23fb775 100644 --- a/unixfs/io/dirbuilder.go +++ b/unixfs/io/dirbuilder.go @@ -194,6 +194,7 @@ func (d *Directory) GetNode() (node.Node, error) { return d.shard.Node() } +// GetPrefix returns the CID Prefix used func (d *Directory) GetPrefix() *cid.Prefix { if d.shard == nil { return &d.dirnode.Prefix diff --git a/unixfs/test/utils.go b/unixfs/test/utils.go index 8b18ad9cd07..24359d377b8 100644 --- a/unixfs/test/utils.go +++ b/unixfs/test/utils.go @@ -31,6 +31,7 @@ func GetDAGServ() mdag.DAGService { return mdagmock.Mock() } +// NodeOpts is used by GetNode, GetEmptyNode and GetRandomNode type NodeOpts struct { Prefix cid.Prefix // ForceRawLeaves if true will force the use of raw leaves From 3003f93854681ba4296cffaf542f1786da8c0dca Mon Sep 17 00:00:00 2001 From: Kevin Atkinson Date: Thu, 19 Oct 2017 16:08:10 -0400 Subject: [PATCH 16/16] Fix "files stat" to work on raw nodes. License: MIT Signed-off-by: Kevin Atkinson --- core/commands/files/files.go | 58 ++++++++++++++++++-------------- test/sharness/t0250-files-api.sh | 13 +++++++ 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index ef7903e7de4..bd39b1f2d5f 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -168,38 +168,46 @@ func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) { c := nd.Cid() - pbnd, ok := nd.(*dag.ProtoNode) - if !ok { - return nil, dag.ErrNotProtobuf - } - - d, err := ft.FromBytes(pbnd.Data()) - if err != nil { - return nil, err - } - cumulsize, err := nd.Size() if err != nil { return nil, err } - var ndtype string - switch fsn.Type() { - case mfs.TDir: - ndtype = "directory" - case mfs.TFile: - ndtype = "file" + switch n := nd.(type) { + case *dag.ProtoNode: + d, err := ft.FromBytes(n.Data()) + if err != nil { + return nil, err + } + + var ndtype string + switch fsn.Type() { + case mfs.TDir: + ndtype = "directory" + case mfs.TFile: + ndtype = "file" + default: + return nil, fmt.Errorf("unrecognized node type: %s", fsn.Type()) + } + + return &Object{ + Hash: c.String(), + Blocks: len(nd.Links()), + Size: d.GetFilesize(), + CumulativeSize: cumulsize, + Type: ndtype, + }, nil + case *dag.RawNode: + return &Object{ + Hash: c.String(), + Blocks: 0, + Size: cumulsize, + CumulativeSize: cumulsize, + Type: "file", + }, nil default: - return nil, fmt.Errorf("Unrecognized node type: %s", fsn.Type()) + return nil, fmt.Errorf("not unixfs node (proto or raw)") } - - return &Object{ - Hash: c.String(), - Blocks: len(nd.Links()), - Size: d.GetFilesize(), - CumulativeSize: cumulsize, - Type: ndtype, - }, nil } var FilesCpCmd = &cmds.Command{ diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index 7f1f08a68f4..ecdf97925a8 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -177,6 +177,19 @@ test_files_api() { test_cmp ls_l_expected ls_l_actual ' + test_expect_success "can stat file $EXTRA" ' + ipfs files stat /cats/file1 > file1stat_orig + ' + + test_expect_success "stat output looks good" ' + grep -v CumulativeSize: file1stat_orig > file1stat_actual && + echo "$FILE1" > file1stat_expect && + echo "Size: 4" >> file1stat_expect && + echo "ChildBlocks: 0" >> file1stat_expect && + echo "Type: file" >> file1stat_expect && + test_cmp file1stat_expect file1stat_actual + ' + test_expect_success "can read file $EXTRA" ' ipfs files read /cats/file1 > file1out '