From 4aceac54615c64a0e3b441ba80de948f97f7e039 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 13 Sep 2023 17:06:55 +0200 Subject: [PATCH] use mock clock for IPNS record generation --- v2/backend_provider_test.go | 29 ++++++++++--------- v2/handlers_test.go | 57 ++++++++++++++++++++++++------------- 2 files changed, 54 insertions(+), 32 deletions(-) diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index b87cf488..470c6556 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -41,17 +41,19 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac } func TestProvidersBackend_GarbageCollection(t *testing.T) { - mockClock := clock.NewMock() + clk := clock.NewMock() + cfg, err := DefaultProviderBackendConfig() require.NoError(t, err) - cfg.clk = mockClock + cfg.clk = clk cfg.Logger = devnull b := newBackendProvider(t, cfg) // start the garbage collection process b.StartGarbageCollection() + t.Cleanup(func() { b.StopGarbageCollection() }) // write random record to datastore and peerstore ctx := context.Background() @@ -59,7 +61,7 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { // write to datastore dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID)) - rec := expiryRecord{expiry: mockClock.Now()} + rec := expiryRecord{expiry: clk.Now()} err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) @@ -67,10 +69,10 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { b.addrBook.AddAddrs(p.ID, p.Addrs, time.Hour) // advance clock half the validity time and check if record is still there - mockClock.Add(cfg.ProvideValidity / 2) + clk.Add(cfg.ProvideValidity / 2) - // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey("")) + // sync datastore to have all put/deletes visible + err = b.datastore.Sync(ctx, ds.NewKey("/")) require.NoError(t, err) // we expect the record to still be there after half the ProvideValidity @@ -78,17 +80,18 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { require.NoError(t, err) // advance clock another time and check if the record was GC'd now - mockClock.Add(cfg.ProvideValidity + cfg.GCInterval) + clk.Add(cfg.ProvideValidity + cfg.GCInterval) - // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey("")) + // sync datastore to have all put/deletes visible + err = b.datastore.Sync(ctx, ds.NewKey("/")) require.NoError(t, err) - // we expect the record to be GC'd now - _, err = b.datastore.Get(ctx, dsKey) - require.ErrorIs(t, err, ds.ErrNotFound) + clk.Add(time.Minute) - b.StopGarbageCollection() + // we expect the record to be GC'd now + val, err := b.datastore.Get(ctx, dsKey) + assert.ErrorIs(t, err, ds.ErrNotFound) + assert.Nil(t, val) } func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) { diff --git a/v2/handlers_test.go b/v2/handlers_test.go index ec320fee..79806473 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -12,8 +12,7 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - + "github.com/benbjohnson/clock" "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" @@ -25,6 +24,7 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" @@ -432,12 +432,12 @@ func BenchmarkDHT_handlePing(b *testing.B) { } } -func newPutIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { +func newPutIPNSRequest(t testing.TB, clk clock.Clock, priv crypto.PrivKey, seq uint64, ttl time.Duration) *pb.Message { t.Helper() testPath := path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") - rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl) + rec, err := ipns.NewRecord(priv, testPath, seq, clk.Now().Add(ttl), ttl) require.NoError(t, err) remote, err := peer.IDFromPublicKey(priv.GetPublic()) @@ -469,7 +469,7 @@ func BenchmarkDHT_handlePutValue_unique_peers(b *testing.B) { for i := 0; i < b.N; i++ { remote, priv := newIdentity(b) peers[i] = remote - reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + reqs[i] = newPutIPNSRequest(b, d.cfg.Clock, priv, uint64(i), time.Hour) } ctx := context.Background() @@ -491,7 +491,7 @@ func BenchmarkDHT_handlePutValue_single_peer(b *testing.B) { remote, priv := newIdentity(b) reqs := make([]*pb.Message, b.N) for i := 0; i < b.N; i++ { - reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + reqs[i] = newPutIPNSRequest(b, d.cfg.Clock, priv, uint64(i), time.Hour) } ctx := context.Background() @@ -510,19 +510,28 @@ func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { ctx := context.Background() // init new DHT - d := newTestDHT(t) + clk := clock.NewMock() + clk.Set(time.Now()) // needed because record validators don't use mock clocks + + cfg := DefaultConfig() + cfg.Clock = clk + + d := newTestDHTWithConfig(t, cfg) // generate new identity for the peer that issues the request remote, priv := newIdentity(t) // expired record - req := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + req := newPutIPNSRequest(t, clk, priv, 0, time.Hour) ns, suffix, err := record.SplitKey(string(req.Key)) require.NoError(t, err) _, err = d.backends[ns].Fetch(ctx, suffix) require.ErrorIs(t, err, ds.ErrNotFound) + // advance the clock a bit so that TimeReceived values will be definitely different + clk.Add(time.Minute) + cloned := proto.Clone(req).(*pb.Message) _, err = d.handlePutValue(ctx, remote, cloned) require.NoError(t, err) @@ -588,7 +597,7 @@ func TestDHT_handlePutValue_bad_ipns_record(t *testing.T) { remote, priv := newIdentity(t) // expired record - req := newPutIPNSRequest(t, priv, 10, time.Now().Add(-time.Hour), -time.Hour) + req := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, -time.Hour) resp, err := d.handlePutValue(context.Background(), remote, req) assert.Error(t, err) @@ -601,8 +610,8 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { remote, priv := newIdentity(t) - goodReq := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) - worseReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + goodReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, time.Hour) + worseReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, time.Hour) for i, req := range []*pb.Message{goodReq, worseReq} { resp, err := d.handlePutValue(context.Background(), remote, req) @@ -630,8 +639,8 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { for i := 0; i < 100; i++ { - req1 := newPutIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour) - req2 := newPutIPNSRequest(t, priv, uint64(2*i+1), time.Now().Add(time.Hour), time.Hour) + req1 := newPutIPNSRequest(t, d.cfg.Clock, priv, uint64(2*i), time.Hour) + req2 := newPutIPNSRequest(t, d.cfg.Clock, priv, uint64(2*i+1), time.Hour) var wg sync.WaitGroup wg.Add(1) @@ -673,7 +682,7 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) remote, priv := newIdentity(t) - req := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + req := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, time.Hour) dsKey := newDatastoreKey(namespaceIPNS, string(remote)) // string(remote) is the key suffix @@ -878,7 +887,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { for i := 0; i < b.N; i++ { pid, priv := newIdentity(b) - putReq := newPutIPNSRequest(b, priv, 0, time.Now().Add(time.Hour), time.Hour) + putReq := newPutIPNSRequest(b, d.cfg.Clock, priv, 0, time.Hour) data, err := putReq.Record.Marshal() require.NoError(b, err) @@ -915,7 +924,7 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { remote, priv := newIdentity(t) - putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + putReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, time.Hour) rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) require.NoError(t, err) @@ -1006,13 +1015,19 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { } func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { - d := newTestDHT(t) + clk := clock.NewMock() + clk.Set(time.Now()) // needed because record validators don't use mock clocks + + cfg := DefaultConfig() + cfg.Clock = clk + + d := newTestDHTWithConfig(t, cfg) fillRoutingTable(t, d, 250) remote, priv := newIdentity(t) - putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + putReq := newPutIPNSRequest(t, clk, priv, 0, time.Hour) rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) require.NoError(t, err) @@ -1030,6 +1045,10 @@ func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { Key: putReq.GetKey(), } + // The following line is actually not necessary because we set the + // MaxRecordAge to 0. However, this fixes time granularity bug in Windows + clk.Add(time.Minute) + rbe.cfg.MaxRecordAge = 0 resp, err := d.handleGetValue(context.Background(), remote, req) @@ -1058,7 +1077,7 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { remote, priv := newIdentity(t) // generate expired record (doesn't pass validation) - putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(-time.Hour), -time.Hour) + putReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, -time.Hour) data, err := putReq.Record.Marshal() require.NoError(t, err)