Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use mock clock for IPNS record generation #894

Merged
merged 1 commit into from
Sep 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 16 additions & 13 deletions v2/backend_provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,54 +41,57 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac
}

func TestProvidersBackend_GarbageCollection(t *testing.T) {
mockClock := clock.NewMock()
clk := clock.NewMock()

cfg, err := DefaultProviderBackendConfig()
require.NoError(t, err)

cfg.clk = mockClock
cfg.clk = clk
cfg.Logger = devnull

b := newBackendProvider(t, cfg)

// start the garbage collection process
b.StartGarbageCollection()
t.Cleanup(func() { b.StopGarbageCollection() })

// write random record to datastore and peerstore
ctx := context.Background()
p := newAddrInfo(t)

// write to datastore
dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID))
rec := expiryRecord{expiry: mockClock.Now()}
rec := expiryRecord{expiry: clk.Now()}
err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary())
require.NoError(t, err)

// write to peerstore
b.addrBook.AddAddrs(p.ID, p.Addrs, time.Hour)

// advance clock half the validity time and check if record is still there
mockClock.Add(cfg.ProvideValidity / 2)
clk.Add(cfg.ProvideValidity / 2)

// sync autobatching datastore to have all put/deletes visible
err = b.datastore.Sync(ctx, ds.NewKey(""))
// sync datastore to have all put/deletes visible
err = b.datastore.Sync(ctx, ds.NewKey("/"))
require.NoError(t, err)

// we expect the record to still be there after half the ProvideValidity
_, err = b.datastore.Get(ctx, dsKey)
require.NoError(t, err)

// advance clock another time and check if the record was GC'd now
mockClock.Add(cfg.ProvideValidity + cfg.GCInterval)
clk.Add(cfg.ProvideValidity + cfg.GCInterval)

// sync autobatching datastore to have all put/deletes visible
err = b.datastore.Sync(ctx, ds.NewKey(""))
// sync datastore to have all put/deletes visible
err = b.datastore.Sync(ctx, ds.NewKey("/"))
require.NoError(t, err)

// we expect the record to be GC'd now
_, err = b.datastore.Get(ctx, dsKey)
require.ErrorIs(t, err, ds.ErrNotFound)
clk.Add(time.Minute)

b.StopGarbageCollection()
// we expect the record to be GC'd now
val, err := b.datastore.Get(ctx, dsKey)
assert.ErrorIs(t, err, ds.ErrNotFound)
assert.Nil(t, val)
}

func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) {
Expand Down
57 changes: 38 additions & 19 deletions v2/handlers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ import (
"testing"
"time"

"google.golang.org/protobuf/proto"

"github.com/benbjohnson/clock"
"github.com/ipfs/boxo/ipns"
"github.com/ipfs/boxo/path"
ds "github.com/ipfs/go-datastore"
Expand All @@ -25,6 +24,7 @@ import (
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"

"github.com/libp2p/go-libp2p-kad-dht/v2/kadt"
"github.com/libp2p/go-libp2p-kad-dht/v2/pb"
Expand Down Expand Up @@ -432,12 +432,12 @@ func BenchmarkDHT_handlePing(b *testing.B) {
}
}

func newPutIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message {
func newPutIPNSRequest(t testing.TB, clk clock.Clock, priv crypto.PrivKey, seq uint64, ttl time.Duration) *pb.Message {
t.Helper()

testPath := path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk")

rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl)
rec, err := ipns.NewRecord(priv, testPath, seq, clk.Now().Add(ttl), ttl)
require.NoError(t, err)

remote, err := peer.IDFromPublicKey(priv.GetPublic())
Expand Down Expand Up @@ -469,7 +469,7 @@ func BenchmarkDHT_handlePutValue_unique_peers(b *testing.B) {
for i := 0; i < b.N; i++ {
remote, priv := newIdentity(b)
peers[i] = remote
reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour)
reqs[i] = newPutIPNSRequest(b, d.cfg.Clock, priv, uint64(i), time.Hour)
}

ctx := context.Background()
Expand All @@ -491,7 +491,7 @@ func BenchmarkDHT_handlePutValue_single_peer(b *testing.B) {
remote, priv := newIdentity(b)
reqs := make([]*pb.Message, b.N)
for i := 0; i < b.N; i++ {
reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour)
reqs[i] = newPutIPNSRequest(b, d.cfg.Clock, priv, uint64(i), time.Hour)
}

ctx := context.Background()
Expand All @@ -510,19 +510,28 @@ func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) {
ctx := context.Background()

// init new DHT
d := newTestDHT(t)
clk := clock.NewMock()
clk.Set(time.Now()) // needed because record validators don't use mock clocks

cfg := DefaultConfig()
cfg.Clock = clk

d := newTestDHTWithConfig(t, cfg)

// generate new identity for the peer that issues the request
remote, priv := newIdentity(t)

// expired record
req := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour)
req := newPutIPNSRequest(t, clk, priv, 0, time.Hour)
ns, suffix, err := record.SplitKey(string(req.Key))
require.NoError(t, err)

_, err = d.backends[ns].Fetch(ctx, suffix)
require.ErrorIs(t, err, ds.ErrNotFound)

// advance the clock a bit so that TimeReceived values will be definitely different
clk.Add(time.Minute)

cloned := proto.Clone(req).(*pb.Message)
_, err = d.handlePutValue(ctx, remote, cloned)
require.NoError(t, err)
Expand Down Expand Up @@ -588,7 +597,7 @@ func TestDHT_handlePutValue_bad_ipns_record(t *testing.T) {
remote, priv := newIdentity(t)

// expired record
req := newPutIPNSRequest(t, priv, 10, time.Now().Add(-time.Hour), -time.Hour)
req := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, -time.Hour)

resp, err := d.handlePutValue(context.Background(), remote, req)
assert.Error(t, err)
Expand All @@ -601,8 +610,8 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) {

remote, priv := newIdentity(t)

goodReq := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour)
worseReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour)
goodReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, time.Hour)
worseReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, time.Hour)

for i, req := range []*pb.Message{goodReq, worseReq} {
resp, err := d.handlePutValue(context.Background(), remote, req)
Expand Down Expand Up @@ -630,8 +639,8 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) {

for i := 0; i < 100; i++ {

req1 := newPutIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour)
req2 := newPutIPNSRequest(t, priv, uint64(2*i+1), time.Now().Add(time.Hour), time.Hour)
req1 := newPutIPNSRequest(t, d.cfg.Clock, priv, uint64(2*i), time.Hour)
req2 := newPutIPNSRequest(t, d.cfg.Clock, priv, uint64(2*i+1), time.Hour)

var wg sync.WaitGroup
wg.Add(1)
Expand Down Expand Up @@ -673,7 +682,7 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T)

remote, priv := newIdentity(t)

req := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour)
req := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, time.Hour)

dsKey := newDatastoreKey(namespaceIPNS, string(remote)) // string(remote) is the key suffix

Expand Down Expand Up @@ -878,7 +887,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) {
for i := 0; i < b.N; i++ {
pid, priv := newIdentity(b)

putReq := newPutIPNSRequest(b, priv, 0, time.Now().Add(time.Hour), time.Hour)
putReq := newPutIPNSRequest(b, d.cfg.Clock, priv, 0, time.Hour)

data, err := putReq.Record.Marshal()
require.NoError(b, err)
Expand Down Expand Up @@ -915,7 +924,7 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) {

remote, priv := newIdentity(t)

putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour)
putReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, time.Hour)

rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS)
require.NoError(t, err)
Expand Down Expand Up @@ -1006,13 +1015,19 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) {
}

func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) {
d := newTestDHT(t)
clk := clock.NewMock()
clk.Set(time.Now()) // needed because record validators don't use mock clocks

cfg := DefaultConfig()
cfg.Clock = clk

d := newTestDHTWithConfig(t, cfg)

fillRoutingTable(t, d, 250)

remote, priv := newIdentity(t)

putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour)
putReq := newPutIPNSRequest(t, clk, priv, 0, time.Hour)

rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS)
require.NoError(t, err)
Expand All @@ -1030,6 +1045,10 @@ func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) {
Key: putReq.GetKey(),
}

// The following line is actually not necessary because we set the
// MaxRecordAge to 0. However, this fixes time granularity bug in Windows
clk.Add(time.Minute)

rbe.cfg.MaxRecordAge = 0

resp, err := d.handleGetValue(context.Background(), remote, req)
Expand Down Expand Up @@ -1058,7 +1077,7 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) {
remote, priv := newIdentity(t)

// generate expired record (doesn't pass validation)
putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(-time.Hour), -time.Hour)
putReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, -time.Hour)

data, err := putReq.Record.Marshal()
require.NoError(t, err)
Expand Down