diff --git a/assigner/server/server_test.go b/assigner/server/server_test.go index 3a42f043f..9a9f4122e 100644 --- a/assigner/server/server_test.go +++ b/assigner/server/server_test.go @@ -41,17 +41,13 @@ const pubsubTopic = "/indexer/ingest/mainnet" func setupServer(t *testing.T, assigner *core.Assigner) *server.Server { s, err := server.New("127.0.0.1:0", assigner) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return s } func setupClient(t *testing.T, host string) *client.Client { c, err := client.New(host) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return c } @@ -176,9 +172,8 @@ func TestAssignOnAnnounce(t *testing.T) { assignChan, cancel := assigner.OnAssignment(peerID) defer cancel() - if err := cl.Announce(context.Background(), ai, cid.NewCidV1(22, mhs[0])); err != nil { - t.Fatalf("Failed to announce to %s: %s", s.URL(), err) - } + err = cl.Announce(context.Background(), ai, cid.NewCidV1(22, mhs[0])) + require.NoErrorf(t, err, "Failed to announce to %s", s.URL()) select { case indexerNum := <-assignChan: @@ -231,9 +226,7 @@ func initAssigner(t *testing.T, trustedID string) (*core.Assigner, config.Assign PubSubTopic: pubsubTopic, } assigner, err := core.NewAssigner(context.Background(), cfg, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return assigner, cfg } diff --git a/carstore/carwriter_test.go b/carstore/carwriter_test.go index 7d5e69e0e..89c0f2f1f 100644 --- a/carstore/carwriter_test.go +++ b/carstore/carwriter_test.go @@ -262,11 +262,8 @@ func TestWriteExistingAdsInStore(t *testing.T) { var carFound bool fc, ec := fileStore.List(ctx, "", false) for fileInfo := range fc { - if fileInfo.Path == carName { - carFound = true - } else { - t.Fatal("unexpected file") - } + require.Equal(t, carName, fileInfo.Path, "unexpected file") + carFound = true } err = <-ec require.NoError(t, err) diff --git a/command/init_test.go b/command/init_test.go index bc8f75042..b80629ae7 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/ipni/storetheindex/config" + "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" ) @@ -34,19 +35,13 @@ func TestInit(t *testing.T) { ) err := app.RunContext(ctx, []string{"storetheindex", "init", "-listen-admin", badAddr}) - if err == nil { - t.Fatal("expected error") - } + require.Error(t, err) err = app.RunContext(ctx, []string{"storetheindex", "init", "-listen-finder", badAddr}) - if err == nil { - t.Fatal("expected error") - } + require.Error(t, err) err = app.RunContext(ctx, []string{"storetheindex", "init", "-listen-ingest", badAddr}) - if err == nil { - t.Fatal("expected error") - } + require.Error(t, err) args := []string{ "storetheindex", "init", @@ -57,33 +52,17 @@ func TestInit(t *testing.T) { "-store", storeType, } err = app.RunContext(ctx, args) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cfg, err := config.Load("") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if cfg.Addresses.Finder != goodAddr { - t.Error("finder listen address was not configured") - } - if cfg.Addresses.Ingest != goodAddr2 { - t.Error("ingest listen address was not configured") - } - if cfg.Indexer.CacheSize != cacheSize { - t.Error("cache size was tno configured") - } - if cfg.Indexer.ValueStoreType != storeType { - t.Error("value store type was not configured") - } - if cfg.Ingest.PubSubTopic != topicName { - t.Errorf("expected %s for pubsub topic, got %s", topicName, cfg.Ingest.PubSubTopic) - } - if cfg.Version != config.Version { - t.Error("did not init config with correct version") - } + require.Equal(t, goodAddr, cfg.Addresses.Finder, "finder listen address was not configured") + require.Equal(t, goodAddr2, cfg.Addresses.Ingest, "ingest listen address was not configured") + require.Equal(t, cacheSize, cfg.Indexer.CacheSize, "cache size was tno configured") + require.Equal(t, storeType, cfg.Indexer.ValueStoreType, "value store type was not configured") + require.Equal(t, topicName, cfg.Ingest.PubSubTopic) + require.Equal(t, config.Version, cfg.Version) t.Log(cfg.String()) } diff --git a/config/bootstrap_test.go b/config/bootstrap_test.go index 772d49e11..f16fa8c4b 100644 --- a/config/bootstrap_test.go +++ b/config/bootstrap_test.go @@ -3,6 +3,8 @@ package config import ( "sort" "testing" + + "github.com/stretchr/testify/require" ) var testBootstrapAddresses = []string{ @@ -19,9 +21,7 @@ func TestBoostrapPeers(t *testing.T) { Peers: testBootstrapAddresses, } addrs, err := b.PeerAddrs() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var b2 Bootstrap b2.SetPeers(addrs) @@ -29,8 +29,6 @@ func TestBoostrapPeers(t *testing.T) { sort.Strings(b.Peers) for i := range b2.Peers { - if b2.Peers[i] != b.Peers[i] { - t.Fatalf("expected %s, %s", b.Peers[i], b2.Peers[i]) - } + require.Equal(t, b2.Peers[i], b.Peers[i]) } } diff --git a/config/config_test.go b/config/config_test.go index b1896a36c..b14b57773 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -4,6 +4,8 @@ import ( "path/filepath" "runtime" "testing" + + "github.com/stretchr/testify/require" ) func TestPath(t *testing.T) { @@ -17,32 +19,18 @@ func TestPath(t *testing.T) { } path, err := Path("", dir) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) configRoot, err := PathRoot() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if path != filepath.Join(configRoot, dir) { - t.Fatalf("wrong path %s:", path) - } + require.Equal(t, filepath.Join(configRoot, dir), path) path, err = Path("altroot", dir) - if err != nil { - t.Fatal(err) - } - if path != filepath.Join("altroot", dir) { - t.Fatalf("wrong path %s:", path) - } + require.NoError(t, err) + require.Equal(t, filepath.Join("altroot", dir), path) path, err = Path("altroot", absdir) - if err != nil { - t.Fatal(err) - } - if path != filepath.Clean(absdir) { - t.Fatalf("wrong path %s:", path) - } + require.NoError(t, err) + require.Equal(t, filepath.Clean(absdir), path) } diff --git a/config/init_test.go b/config/init_test.go index 326f34dcf..e15ed85fa 100644 --- a/config/init_test.go +++ b/config/init_test.go @@ -8,83 +8,53 @@ import ( "testing" crypto_pb "github.com/libp2p/go-libp2p/core/crypto/pb" + "github.com/stretchr/testify/require" ) func TestCreateIdentity(t *testing.T) { id, err := CreateIdentity(io.Discard) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) pk, err := id.DecodePrivateKey("") - if err != nil { - t.Fatal(err) - } - if pk.Type() != crypto_pb.KeyType_Ed25519 { - t.Fatal("unexpected type:", pk.Type()) - } + require.NoError(t, err) + require.Equal(t, crypto_pb.KeyType_Ed25519, pk.Type()) } func TestMarshalUnmarshal(t *testing.T) { cfg, err := Init(io.Discard) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b, err := json.MarshalIndent(&cfg, " ", " ") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Log("default config:\n", string(b)) cfg2 := Config{} - if err = json.Unmarshal(b, &cfg2); err != nil { - t.Fatal(err) - } + err = json.Unmarshal(b, &cfg2) + require.NoError(t, err) - if cfg.Identity.PeerID != cfg2.Identity.PeerID { - t.Fatal("identity no same") - } - if cfg.Identity.PrivKey != cfg2.Identity.PrivKey { - t.Fatal("private key not same") - } + require.Equal(t, cfg.Identity.PeerID, cfg2.Identity.PeerID) + require.Equal(t, cfg.Identity.PrivKey, cfg2.Identity.PrivKey) } func TestSaveLoad(t *testing.T) { tmpDir := t.TempDir() cfgFile, err := Filename(tmpDir) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if filepath.Dir(cfgFile) != tmpDir { - t.Fatal("wrong root dir", cfgFile) - } + require.Equal(t, tmpDir, filepath.Dir(cfgFile), "wrong root dir") cfg, err := Init(io.Discard) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cfgBytes, err := Marshal(cfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = cfg.Save(cfgFile) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cfg2, err := Load(cfgFile) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) cfg2Bytes, err := Marshal(cfg2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !bytes.Equal(cfgBytes, cfg2Bytes) { - t.Fatal("config data different after being loaded") - } + require.True(t, bytes.Equal(cfgBytes, cfg2Bytes), "config data different after being loaded") } diff --git a/dagsync/announce_test.go b/dagsync/announce_test.go index 7e0e95eb7..a669d9731 100644 --- a/dagsync/announce_test.go +++ b/dagsync/announce_test.go @@ -65,9 +65,7 @@ func TestAnnounceReplace(t *testing.T) { // Have the subscriber receive an announce. This is the same as if it was // published by the publisher without having to wait for it to arrive. err = sub.Announce(context.Background(), firstCid, srcHost.ID(), srcHost.Addrs()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Log("Sent announce for first CID", firstCid) // This first announce should start the handler goroutine and clear the @@ -113,15 +111,10 @@ func TestAnnounceReplace(t *testing.T) { case <-time.After(updateTimeout): t.Fatal("timed out waiting for sync to propagate") case downstream, open := <-watcher: - if !open { - t.Fatal("event channle closed without receiving event") - } - if !downstream.Cid.Equals(firstCid) { - t.Fatalf("sync returned unexpected first cid %s, expected %s", downstream.Cid, firstCid) - } - if _, err = dstStore.Get(context.Background(), datastore.NewKey(downstream.Cid.String())); err != nil { - t.Fatalf("data not in receiver store: %s", err) - } + require.True(t, open, "event channle closed without receiving event") + require.Equal(t, firstCid, downstream.Cid, "sync returned unexpected first cid") + _, err = dstStore.Get(context.Background(), datastore.NewKey(downstream.Cid.String())) + require.NoError(t, err, "data not in receiver store") t.Log("Received sync notification for first CID:", firstCid) } @@ -130,15 +123,10 @@ func TestAnnounceReplace(t *testing.T) { case <-time.After(updateTimeout): t.Fatal("timed out waiting for sync to propagate") case downstream, open := <-watcher: - if !open { - t.Fatal("event channle closed without receiving event") - } - if !downstream.Cid.Equals(lastCid) { - t.Fatalf("sync returned unexpected last cid %s, expected %s", downstream.Cid, lastCid) - } - if _, err = dstStore.Get(context.Background(), datastore.NewKey(downstream.Cid.String())); err != nil { - t.Fatalf("data not in receiver store: %s", err) - } + require.True(t, open, "event channle closed without receiving event") + require.Equal(t, lastCid, downstream.Cid, "sync returned unexpected last cid") + _, err = dstStore.Get(context.Background(), datastore.NewKey(downstream.Cid.String())) + require.NoError(t, err, "data not in receiver store") t.Log("Received sync notification for last CID:", lastCid) } @@ -146,10 +134,9 @@ func TestAnnounceReplace(t *testing.T) { select { case <-time.After(3 * time.Second): case changeEvent, open := <-watcher: - if open { - t.Fatalf("no exchange should have been performed, but got change from peer %s for cid %s", - changeEvent.PeerID, changeEvent.Cid) - } + require.Falsef(t, open, + "no exchange should have been performed, but got change from peer %s for cid %s", + changeEvent.PeerID, changeEvent.Cid) } } diff --git a/dagsync/http_test.go b/dagsync/http_test.go index f3914abc4..82a0d8efc 100644 --- a/dagsync/http_test.go +++ b/dagsync/http_test.go @@ -3,7 +3,6 @@ package dagsync_test import ( "context" "crypto/rand" - "strings" "testing" "time" @@ -21,6 +20,7 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" ) type httpTestEnv struct { @@ -36,17 +36,13 @@ type httpTestEnv struct { func setupPublisherSubscriber(t *testing.T, subscriberOptions []dagsync.Option) httpTestEnv { srcPrivKey, _, err := ic.GenerateECDSAKeyPair(rand.Reader) - if err != nil { - t.Fatal("Err generating private key", err) - } + require.NoError(t, err, "Err generating private key") srcHost = test.MkTestHost(libp2p.Identity(srcPrivKey)) srcStore := dssync.MutexWrap(datastore.NewMapDatastore()) srcLinkSys := test.MkLinkSystem(srcStore) httpPub, err := httpsync.NewPublisher("127.0.0.1:0", srcLinkSys, srcPrivKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Cleanup(func() { httpPub.Close() }) @@ -57,9 +53,7 @@ func setupPublisherSubscriber(t *testing.T, subscriberOptions []dagsync.Option) dstHost := test.MkTestHost() sub, err := dagsync.NewSubscriber(dstHost, dstStore, dstLinkSys, testTopic, nil, subscriberOptions...) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Cleanup(func() { sub.Close() }) @@ -86,62 +80,41 @@ func TestManualSync(t *testing.T) { te := setupPublisherSubscriber(t, []dagsync.Option{dagsync.BlockHook(blockHook)}) rootLnk, err := test.Store(te.srcStore, basicnode.NewString("hello world")) - if err != nil { - t.Fatal(err) - } - if err := te.pub.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + err = te.pub.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() syncCid, err := te.sub.Sync(ctx, te.srcHost.ID(), cid.Undef, nil, te.pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !syncCid.Equals(rootLnk.(cidlink.Link).Cid) { - t.Fatalf("didn't get expected cid. expected %s, got %s", rootLnk, syncCid) - } + require.Equal(t, rootLnk.(cidlink.Link).Cid, syncCid) _, ok := blocksSeenByHook[syncCid] - if !ok { - t.Fatal("hook did not get", syncCid) - } + require.True(t, ok, "hook did not get", syncCid) } func TestSyncHttpFailsUnexpectedPeer(t *testing.T) { te := setupPublisherSubscriber(t, nil) rootLnk, err := test.Store(te.srcStore, basicnode.NewString("hello world")) - if err != nil { - t.Fatal(err) - } - if err := te.pub.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + err = te.pub.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), updateTimeout) defer cancel() _, otherPubKey, err := ic.GenerateECDSAKeyPair(rand.Reader) - if err != nil { - t.Fatal("failed to make another peerid") - } + require.NoError(t, err, "failed to make another peerid") otherPeerID, err := peer.IDFromPublicKey(otherPubKey) - if err != nil { - t.Fatal("failed to make another peerid") - } + require.NoError(t, err, "failed to make another peerid") // This fails because the head msg is signed by srcHost.ID(), but we are asking this to check if it's signed by otherPeerID. _, err = te.sub.Sync(ctx, otherPeerID, cid.Undef, nil, te.pubAddr) - if err == nil { - t.Fatalf("expected error, got nil") - } - if !strings.Contains(err.Error(), "unexpected peer") { - t.Fatalf("expected error to contain the string 'unexpected peer', got %s", err.Error()) - } + require.ErrorContains(t, err, "unexpected peer") } func TestSyncFnHttp(t *testing.T) { @@ -165,13 +138,8 @@ func TestSyncFnHttp(t *testing.T) { ctx, syncncl := context.WithTimeout(context.Background(), time.Second) defer syncncl() - var err error - if _, err = te.sub.Sync(ctx, te.srcHost.ID(), cids[0], nil, te.pubAddr); err == nil { - t.Fatal("expected error when no content to sync") - } - if !strings.Contains(err.Error(), "failed to traverse requested dag") { - t.Fatalf("expected error to contain the string 'failed to traverse requested dag', got %s", err.Error()) - } + _, err := te.sub.Sync(ctx, te.srcHost.ID(), cids[0], nil, te.pubAddr) + require.ErrorContains(t, err, "failed to traverse requested dag") syncncl() select { @@ -182,9 +150,8 @@ func TestSyncFnHttp(t *testing.T) { // Assert the latestSync is updated by explicit sync when cid and selector are unset. newHead := chainLnks[0].(cidlink.Link).Cid - if err = te.pub.UpdateRoot(context.Background(), newHead); err != nil { - t.Fatal(err) - } + err = te.pub.UpdateRoot(context.Background(), newHead) + require.NoError(t, err) lnk := chainLnks[1] @@ -194,59 +161,41 @@ func TestSyncFnHttp(t *testing.T) { ctx, syncncl = context.WithTimeout(context.Background(), updateTimeout) defer syncncl() syncCid, err := te.sub.Sync(ctx, te.srcHost.ID(), lnk.(cidlink.Link).Cid, nil, te.pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !syncCid.Equals(lnk.(cidlink.Link).Cid) { - t.Fatalf("sync'd cid unexpected %s vs %s", syncCid, lnk) - } - if _, err = te.dstStore.Get(context.Background(), datastore.NewKey(syncCid.String())); err != nil { - t.Fatalf("data not in receiver store: %v", err) - } + require.Equal(t, lnk.(cidlink.Link).Cid, syncCid, "sync'd cid unexpected") + _, err = te.dstStore.Get(context.Background(), datastore.NewKey(syncCid.String())) + require.NoError(t, err, "data not in receiver store") syncncl() _, ok := blocksSeenByHook[lnk.(cidlink.Link).Cid] - if !ok { - t.Fatal("block hook did not see link cid") - } - if blockHookCalls != 11 { - t.Fatalf("expected 11 block hook calls, got %d", blockHookCalls) - } + require.True(t, ok, "block hook did not see link cid") + require.Equal(t, 11, blockHookCalls) // Assert the latestSync is not updated by explicit sync when cid is set - if te.sub.GetLatestSync(te.srcHost.ID()) != nil && assertLatestSyncEquals(te.sub, te.srcHost.ID(), curLatestSync.(cidlink.Link).Cid) != nil { - t.Fatal("Sync should not update latestSync") + if te.sub.GetLatestSync(te.srcHost.ID()) != nil { + err = assertLatestSyncEquals(te.sub, te.srcHost.ID(), curLatestSync.(cidlink.Link).Cid) + require.NoError(t, err, "Sync should not update latestSync") } ctx, syncncl = context.WithTimeout(context.Background(), updateTimeout) defer syncncl() syncCid, err = te.sub.Sync(ctx, te.srcHost.ID(), cid.Undef, nil, te.pubAddr) - if err != nil { - t.Fatal(err) - } - if !syncCid.Equals(newHead) { - t.Fatalf("sync'd cid unexpected %s vs %s", syncCid, lnk) - } - if _, err = te.dstStore.Get(context.Background(), datastore.NewKey(syncCid.String())); err != nil { - t.Fatalf("data not in receiver store: %v", err) - } + require.NoError(t, err) + require.Equal(t, newHead, syncCid, "sync'd cid unexpected") + _, err = te.dstStore.Get(context.Background(), datastore.NewKey(syncCid.String())) + require.NoError(t, err, "data not in receiver store") syncncl() select { case <-time.After(updateTimeout): t.Fatal("timed out waiting for sync from published update") case syncFin, open := <-watcher: - if !open { - t.Fatal("sync finished channel closed with no event") - } - if syncFin.Cid != newHead { - t.Fatalf("Should have been updated to %s, got %s", newHead, syncFin.Cid) - } + require.True(t, open, "sync finished channel closed with no event") + require.Equal(t, newHead, syncFin.Cid) } cancelWatcher() - if err = assertLatestSyncEquals(te.sub, te.srcHost.ID(), newHead); err != nil { - t.Fatal(err) - } + err = assertLatestSyncEquals(te.sub, te.srcHost.ID(), newHead) + require.NoError(t, err) } diff --git a/dagsync/httpsync/maconv/convert_test.go b/dagsync/httpsync/maconv/convert_test.go index 90dd248cf..3122328d6 100644 --- a/dagsync/httpsync/maconv/convert_test.go +++ b/dagsync/httpsync/maconv/convert_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" ) func TestRoundtrip(t *testing.T) { @@ -19,22 +20,12 @@ func TestRoundtrip(t *testing.T) { for _, s := range samples { u, _ := url.Parse(s) mu, err := ToMultiaddr(u) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) u2, err := ToURL(mu) - if err != nil { - t.Fatal(err) - } - if u2.Scheme != u.Scheme { - t.Fatalf("scheme didn't roundtrip. got %s expected %s", u2.Scheme, u.Scheme) - } - if u2.Host != u.Host { - t.Fatalf("host didn't roundtrip. got %s, expected %s", u2.Host, u.Host) - } - if u2.Path != u.Path { - t.Fatalf("path didn't roundtrip. got %s, expected %s", u2.Path, u.Path) - } + require.NoError(t, err) + require.Equal(t, u.Scheme, u2.Scheme, "scheme didn't roundtrip") + require.Equal(t, u.Host, u2.Host, "host didn't roundtrip") + require.Equal(t, u.Path, u2.Path, "path didn't roundtrip") } } @@ -59,16 +50,10 @@ func TestTLSProtos(t *testing.T) { for i := range samples { m, err := multiaddr.NewMultiaddr(samples[i]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) u, err := ToURL(m) - if err != nil { - t.Fatal(err) - } - if u.String() != expect[i] { - t.Fatalf("expected %s to convert to url %s, got %s", m.String(), expect[i], u.String()) - } + require.NoError(t, err) + require.Equal(t, expect[i], u.String(), "Did not convert to expected URL") } } diff --git a/dagsync/httpsync/message_test.go b/dagsync/httpsync/message_test.go index f38637e07..8efa975e9 100644 --- a/dagsync/httpsync/message_test.go +++ b/dagsync/httpsync/message_test.go @@ -7,71 +7,48 @@ import ( "github.com/ipfs/go-cid" ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/stretchr/testify/require" ) func TestRoundTripSignedHead(t *testing.T) { privKey, pubKey, err := ic.GenerateEd25519Key(rand.Reader) - if err != nil { - t.Fatal("Err generarting private key", err) - } + require.NoError(t, err, "Err generarting private key") testCid, err := cid.Parse("bafybeicyhbhhklw3kdwgrxmf67mhkgjbsjauphsvrzywav63kn7bkpmqfa") - if err != nil { - t.Fatal("Err parsing cid", err) - } + require.NoError(t, err, "Err parsing cid") signed, err := newEncodedSignedHead(testCid, privKey) - if err != nil { - t.Fatal("Err creating signed envelope", err) - } + require.NoError(t, err, "Err creating signed envelope") cidRT, err := openSignedHead(pubKey, bytes.NewReader(signed)) - if err != nil { - t.Fatal("Err Opening msg envelope", err) - } + require.NoError(t, err, "Err opening msg envelope") - if cidRT != testCid { - t.Fatal("CidStr mismatch. Failed round trip") - } + require.Equal(t, testCid, cidRT, "CidStr mismatch. Failed round trip") } func TestRoundTripSignedHeadWithIncludedPubKey(t *testing.T) { privKey, pubKey, err := ic.GenerateECDSAKeyPair(rand.Reader) - if err != nil { - t.Fatal("Err generarting private key", err) - } + require.NoError(t, err, "Err generarting private key") testCid, err := cid.Parse("bafybeicyhbhhklw3kdwgrxmf67mhkgjbsjauphsvrzywav63kn7bkpmqfa") - if err != nil { - t.Fatal("Err parsing cid", err) - } + require.NoError(t, err, "Err parsing cid") signed, err := newEncodedSignedHead(testCid, privKey) - if err != nil { - t.Fatal("Err creating signed envelope", err) - } + require.NoError(t, err, "Err creating signed envelope") includedPubKey, head, err := openSignedHeadWithIncludedPubKey(bytes.NewReader(signed)) - if err != nil { - t.Fatal("Err Opening msg envelope", err) - } + require.NoError(t, err, "Err opening msg envelope") - if head != testCid { - t.Fatal("CidStr mismatch. Failed round trip") - } + require.Equal(t, testCid, head, "CidStr mismatch. Failed round trip") - if !includedPubKey.Equals(pubKey) { - t.Fatal("pubkey mismatch. Failed round trip") - } + require.Equal(t, pubKey, includedPubKey, "pubkey mismatch. Failed round trip") // Try with a pubkey that doesn't match _, otherPubKey, err := ic.GenerateECDSAKeyPair(rand.Reader) - if err != nil { - t.Fatal("Err generarting other key", err) - } + require.NoError(t, err, "Err generarting other key") _, err = openSignedHead(otherPubKey, bytes.NewReader(signed)) - if err == nil || err.Error() != "invalid signature" { - t.Fatal("Expected an error when opening envelope with another pubkey. And the error should be 'invalid signature'") - } + require.Error(t, err) + require.ErrorContains(t, err, "invalid signature", + "Expected an error when opening envelope with another pubkey. And the error should be 'invalid signature'") } diff --git a/dagsync/legs_test.go b/dagsync/legs_test.go index eaa0737f7..9a7080508 100644 --- a/dagsync/legs_test.go +++ b/dagsync/legs_test.go @@ -78,9 +78,7 @@ func TestAllowPeerReject(t *testing.T) { srcStore := dssync.MutexWrap(datastore.NewMapDatastore()) dstStore := dssync.MutexWrap(datastore.NewMapDatastore()) srcHost, dstHost, pub, sub, err := initPubSub(t, srcStore, dstStore) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer srcHost.Close() defer dstHost.Close() defer pub.Close() @@ -99,16 +97,12 @@ func TestAllowPeerReject(t *testing.T) { // Update root with item err = pub.UpdateRoot(context.Background(), c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-time.After(3 * time.Second): case _, open := <-watcher: - if open { - t.Fatal("something was exchanged, and that is wrong") - } + require.False(t, open, "something was exchanged, and that is wrong") } } @@ -118,9 +112,7 @@ func TestAllowPeerAllows(t *testing.T) { srcStore := dssync.MutexWrap(datastore.NewMapDatastore()) dstStore := dssync.MutexWrap(datastore.NewMapDatastore()) srcHost, dstHost, pub, sub, err := initPubSub(t, srcStore, dstStore) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer srcHost.Close() defer dstHost.Close() defer pub.Close() @@ -138,9 +130,7 @@ func TestAllowPeerAllows(t *testing.T) { // Update root with item err = pub.UpdateRoot(context.Background(), c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-time.After(updateTimeout): @@ -239,39 +229,28 @@ func TestIdleHandlerCleaner(t *testing.T) { te := setupPublisherSubscriber(t, []dagsync.Option{dagsync.BlockHook(blockHook), dagsync.IdleHandlerTTL(ttl)}) rootLnk, err := test.Store(te.srcStore, basicnode.NewString("hello world")) - if err != nil { - t.Fatal(err) - } - if err := te.pub.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + err = te.pub.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid) + require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Do a sync to create the handler. _, err = te.sub.Sync(ctx, te.srcHost.ID(), cid.Undef, nil, te.pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that the handler is preeent by seeing if it can be removed. - if !te.sub.RemoveHandler(te.srcHost.ID()) { - t.Fatal("Expected handler to be present") - } + require.True(t, te.sub.RemoveHandler(te.srcHost.ID()), "Expected handler to be present") // Do another sync to re-create the handler. _, err = te.sub.Sync(ctx, te.srcHost.ID(), cid.Undef, nil, te.pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // For long enough for the idle cleaner to remove the handler, and check // that it was removed. time.Sleep(3 * ttl) - if te.sub.RemoveHandler(te.srcHost.ID()) { - t.Fatal("Expected handler to already be removed") - } + require.False(t, te.sub.RemoveHandler(te.srcHost.ID()), "Expected handler to already be removed") } func mkLnk(t *testing.T, srcStore datastore.Batching) cid.Cid { @@ -279,27 +258,15 @@ func mkLnk(t *testing.T, srcStore datastore.Batching) cid.Cid { np := basicnode.Prototype__Any{} nb := np.NewBuilder() ma, _ := nb.BeginMap(2) - err := ma.AssembleKey().AssignString("hey") - if err != nil { - t.Fatal(err) - } - if err = ma.AssembleValue().AssignString("it works!"); err != nil { - t.Fatal(err) - } - if err = ma.AssembleKey().AssignString("yes"); err != nil { - t.Fatal(err) - } - if err = ma.AssembleValue().AssignBool(true); err != nil { - t.Fatal(err) - } - if err = ma.Finish(); err != nil { - t.Fatal(err) - } + require.NoError(t, ma.AssembleKey().AssignString("hey")) + require.NoError(t, ma.AssembleValue().AssignString("it works!")) + require.NoError(t, ma.AssembleKey().AssignString("yes")) + require.NoError(t, ma.AssembleValue().AssignBool(true)) + require.NoError(t, ma.Finish()) + n := nb.Build() lnk, err := test.Store(srcStore, n) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return lnk.(cidlink.Link).Cid } diff --git a/dagsync/p2p/protocol/head/head_test.go b/dagsync/p2p/protocol/head/head_test.go index bbb7bba57..822ccd4a3 100644 --- a/dagsync/p2p/protocol/head/head_test.go +++ b/dagsync/p2p/protocol/head/head_test.go @@ -15,6 +15,7 @@ import ( "github.com/ipni/storetheindex/dagsync/p2p/protocol/head" "github.com/ipni/storetheindex/dagsync/test" "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" ) func TestFetchLatestHead(t *testing.T) { @@ -30,9 +31,7 @@ func TestFetchLatestHead(t *testing.T) { if strings.HasPrefix(a.String(), ipPrefix) { addrStr := "/dns4/localhost/tcp/" + a.String()[len(ipPrefix):] addr, err := multiaddr.NewMultiaddr(addrStr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) addrs = append(addrs, addr) break } @@ -43,9 +42,7 @@ func TestFetchLatestHead(t *testing.T) { publisherStore := dssync.MutexWrap(datastore.NewMapDatastore()) rootLnk, err := test.Store(publisherStore, basicnode.NewString("hello world")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) p := head.NewPublisher() go p.Serve(publisher, "test") @@ -55,20 +52,14 @@ func TestFetchLatestHead(t *testing.T) { defer cancel() c, err := head.QueryRootCid(ctx, client, "test", publisher.ID()) - if err != nil && c != cid.Undef { - t.Fatal("Expected to get a nil error and a cid undef because there is no root") - } + require.NoError(t, err) + require.Equal(t, cid.Undef, c, "Expected cid undef because there is no root") - if err := p.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid); err != nil { - t.Fatal(err) - } + err = p.UpdateRoot(context.Background(), rootLnk.(cidlink.Link).Cid) + require.NoError(t, err) c, err = head.QueryRootCid(ctx, client, "test", publisher.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !c.Equals(rootLnk.(cidlink.Link).Cid) { - t.Fatalf("didn't get expected cid. expected %s, got %s", rootLnk, c) - } + require.Equal(t, rootLnk.(cidlink.Link).Cid, c) } diff --git a/dagsync/subscriber_test.go b/dagsync/subscriber_test.go index 4f47182dc..3c13b2fee 100644 --- a/dagsync/subscriber_test.go +++ b/dagsync/subscriber_test.go @@ -78,12 +78,11 @@ func TestScopedBlockHook(t *testing.T) { })) require.NoError(t, err) - if atomic.LoadInt64(&calledGeneralBlockHookTimes) != 0 { - t.Fatalf("General block hook should not have been called when scoped block hook is set") - } - if atomic.LoadInt64(&calledScopedBlockHookTimes) != int64(ll.Length) { - t.Fatalf("Didn't call scoped block hook enough times") - } + require.Zero(t, atomic.LoadInt64(&calledGeneralBlockHookTimes), + "General block hook should not have been called when scoped block hook is set") + + require.Equal(t, int64(ll.Length), atomic.LoadInt64(&calledScopedBlockHookTimes), + "Didn't call scoped block hook enough times") anotherLL := llBuilder{ Length: ll.Length, @@ -96,10 +95,8 @@ func TestScopedBlockHook(t *testing.T) { _, err = sub.Sync(context.Background(), pubHost.ID(), cid.Undef, nil, pubHost.Addrs()[0]) require.NoError(t, err) - if atomic.LoadInt64(&calledGeneralBlockHookTimes) != int64(ll.Length) { - t.Fatalf("General hook should have been called only in secod sync") - } - + require.Equal(t, int64(ll.Length), atomic.LoadInt64(&calledGeneralBlockHookTimes), + "General hook should have been called only in secod sync") }) }, &quick.Config{ MaxCount: 3, @@ -138,13 +135,11 @@ func TestSyncedCidsReturned(t *testing.T) { require.NoError(t, err) finishedVal := <-onFinished - if len(finishedVal.SyncedCids) != int(ll.Length) { - t.Fatalf("The finished value should include %d synced cids, but has %d", ll.Length, len(finishedVal.SyncedCids)) - } + require.Equalf(t, int(ll.Length), len(finishedVal.SyncedCids), + "The finished value should include %d synced cids, but has %d", ll.Length, len(finishedVal.SyncedCids)) - if finishedVal.SyncedCids[0] != head.(cidlink.Link).Cid { - t.Fatal("The latest synced cid should be the head and first in the list") - } + require.Equal(t, head.(cidlink.Link).Cid, finishedVal.SyncedCids[0], + "The latest synced cid should be the head and first in the list") }) }, &quick.Config{ MaxCount: 3, @@ -222,9 +217,8 @@ func TestConcurrentSync(t *testing.T) { case <-doneChan: } - if atomic.LoadInt64(&calledTimes) != int64(ll.Length)*int64(publisherCount) { - t.Fatalf("Didn't call block hook for each publisher. Expected %d saw %d", int(ll.Length)*publisherCount, calledTimes) - } + require.Equalf(t, int64(ll.Length)*int64(publisherCount), atomic.LoadInt64(&calledTimes), + "Didn't call block hook for each publisher. Expected %d saw %d", int(ll.Length)*publisherCount, calledTimes) }) }, &quick.Config{ MaxCount: 3, @@ -255,36 +249,24 @@ func TestSync(t *testing.T) { } err := pub.UpdateRoot(context.Background(), head.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = sub.Sync(context.Background(), pubSys.host.ID(), cid.Undef, nil, pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) calledTimesFirstSync := calledTimes latestSync := sub.GetLatestSync(pubSys.host.ID()) - if latestSync != head { - t.Fatalf("Subscriber did not persist latest sync") - } + require.Equal(t, head, latestSync, "Subscriber did not persist latest sync") // Now sync again. We shouldn't call the hook. _, err = sub.Sync(context.Background(), pubSys.host.ID(), cid.Undef, nil, pubAddr) - if err != nil { - t.Fatal(err) - } - if calledTimesFirstSync != calledTimes { - t.Fatalf("Subscriber called the block hook multiple times for the same sync. Expected %d, got %d", calledTimesFirstSync, calledTimes) - } - + require.NoError(t, err) + require.Equalf(t, calledTimes, calledTimesFirstSync, + "Subscriber called the block hook multiple times for the same sync. Expected %d, got %d", calledTimesFirstSync, calledTimes) require.Equal(t, int(ll.Length), calledTimes, "Subscriber did not call the block hook exactly once for each block") }) }, &quick.Config{ MaxCount: 5, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } // TestSyncWithHydratedDataStore tests what happens if we call sync when the @@ -326,9 +308,7 @@ func TestSyncWithHydratedDataStore(t *testing.T) { } err = pub.UpdateRoot(context.Background(), head.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Sync once to hydrate the datastore // Note we set the cid we are syncing to so we don't update the latestSync. @@ -341,17 +321,13 @@ func TestSyncWithHydratedDataStore(t *testing.T) { // Now sync again. We might call the hook because we don't have the latestSync persisted. _, err = sub.Sync(context.Background(), pubSys.host.ID(), cid.Undef, nil, pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.GreaterOrEqual(t, calledTimes, calledTimesFirstSync, "Expected to have called block hook twice. Once for each sync.") }) }, &quick.Config{ MaxCount: 5, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestRoundTripSimple(t *testing.T) { @@ -359,9 +335,7 @@ func TestRoundTripSimple(t *testing.T) { srcStore := dssync.MutexWrap(datastore.NewMapDatastore()) dstStore := dssync.MutexWrap(datastore.NewMapDatastore()) srcHost, dstHost, pub, sub, err := initPubSub(t, srcStore, dstStore) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer srcHost.Close() defer dstHost.Close() defer pub.Close() @@ -373,24 +347,19 @@ func TestRoundTripSimple(t *testing.T) { // Update root with item itm := basicnode.NewString("hello world") lnk, err := test.Store(srcStore, itm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := pub.UpdateRoot(context.Background(), lnk.(cidlink.Link).Cid); err != nil { - t.Fatal(err) - } + err = pub.UpdateRoot(context.Background(), lnk.(cidlink.Link).Cid) + require.NoError(t, err) select { case <-time.After(updateTimeout): t.Fatal("timed out waiting for sync to propagate") case downstream := <-watcher: - if !downstream.Cid.Equals(lnk.(cidlink.Link).Cid) { - t.Fatalf("sync'd cid unexpected %s vs %s", downstream.Cid, lnk) - } - if _, err := dstStore.Get(context.Background(), datastore.NewKey(downstream.Cid.String())); err != nil { - t.Fatalf("data not in receiver store: %v", err) - } + require.Equalf(t, lnk.(cidlink.Link).Cid, downstream.Cid, + "sync'd cid unexpected %s vs %s", downstream.Cid, lnk) + _, err = dstStore.Get(context.Background(), datastore.NewKey(downstream.Cid.String())) + require.NoError(t, err, "data not in receiver store") } } @@ -463,9 +432,8 @@ func TestRoundTrip(t *testing.T) { waitForSync(t, "Watcher 1", dstStore, lnk2.(cidlink.Link), watcher1) waitForSync(t, "Watcher 2", dstStore, lnk2.(cidlink.Link), watcher2) - if len(blocksSeenByHook) != 2 { - t.Fatal("expected 2 blocks seen by hook, got", len(blocksSeenByHook)) - } + require.Equal(t, 2, len(blocksSeenByHook)) + _, ok := blocksSeenByHook[lnk1.(cidlink.Link).Cid] require.True(t, ok, "hook did not see link1") @@ -498,26 +466,18 @@ func TestHttpPeerAddrPeerstore(t *testing.T) { head := nextLL err := pub.UpdateRoot(context.Background(), prevHead.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = sub.Sync(context.Background(), pubHostSys.host.ID(), cid.Undef, nil, pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = pub.UpdateRoot(context.Background(), head.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Now call sync again with no address. The subscriber should re-use the // previous address and succeeed. _, err = sub.Sync(context.Background(), pubHostSys.host.ID(), cid.Undef, nil, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestRateLimiter(t *testing.T) { @@ -561,9 +521,7 @@ func TestRateLimiter(t *testing.T) { ll := llB.Build(t, pubHostSys.lsys) err := pub.SetRoot(context.Background(), ll.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) start := time.Now() _, err = sub.Sync(context.Background(), pubHostSys.host.ID(), cid.Undef, nil, pubAddr) @@ -605,24 +563,16 @@ func TestBackpressureDoesntDeadlock(t *testing.T) { defer cncl() err := pub.UpdateRoot(context.Background(), prevHead.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = sub.Sync(context.Background(), pubHostSys.host.ID(), cid.Undef, nil, pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = pub.UpdateRoot(context.Background(), head.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = sub.Sync(context.Background(), pubHostSys.host.ID(), cid.Undef, nil, pubAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) head = llBuilder{ Length: 1, @@ -630,9 +580,7 @@ func TestBackpressureDoesntDeadlock(t *testing.T) { }.BuildWithPrev(t, pubHostSys.lsys, head) err = pub.UpdateRoot(context.Background(), head.(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // This is blocked until we read from onSyncFinishedChan syncDoneCh := make(chan error) @@ -692,12 +640,9 @@ func waitForSync(t *testing.T, logPrefix string, store *dssync.MutexDatastore, e case <-time.After(updateTimeout): t.Fatal("timed out waiting for sync to propogate") case downstream := <-watcher: - if !downstream.Cid.Equals(expectedCid.Cid) { - t.Fatalf("sync'd cid unexpected %s vs %s", downstream, expectedCid.Cid) - } - if _, err := store.Get(context.Background(), datastore.NewKey(downstream.Cid.String())); err != nil { - t.Fatalf("data not in receiver store: %v", err) - } + require.Equal(t, expectedCid.Cid, downstream.Cid, "sync'd cid unexpected") + _, err := store.Get(context.Background(), datastore.NewKey(downstream.Cid.String())) + require.NoError(t, err, "data not in receiver store") t.Log(logPrefix+" got sync:", downstream.Cid) } @@ -709,31 +654,23 @@ func TestCloseSubscriber(t *testing.T) { lsys := test.MkLinkSystem(st) sub, err := dagsync.NewSubscriber(sh, st, lsys, testTopic, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) watcher, cncl := sub.OnSyncFinished() defer cncl() err = sub.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case _, open := <-watcher: - if open { - t.Fatal("Watcher channel should have been closed") - } + require.False(t, open, "Watcher channel should have been closed") case <-time.After(updateTimeout): t.Fatal("timed out waiting for watcher to close") } err = sub.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) done := make(chan struct{}) go func() { @@ -822,43 +759,27 @@ func (b llBuilder) BuildWithPrev(t *testing.T, lsys ipld.LinkSystem, prev datamo p := basicnode.Prototype.Map b := p.NewBuilder() ma, err := b.BeginMap(2) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) eb, err := ma.AssembleEntry("Value") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = eb.AssignInt(int64(rng.Intn(100))) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) eb, err = ma.AssembleEntry("Next") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if prev != nil { err = eb.AssignLink(prev) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } else { err = eb.AssignNull() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } err = ma.Finish() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) n := b.Build() prev, err = lsys.Store(linking.LinkContext{}, linkproto, n) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } return prev diff --git a/dagsync/sync_test.go b/dagsync/sync_test.go index ea43926f8..473cf59c0 100644 --- a/dagsync/sync_test.go +++ b/dagsync/sync_test.go @@ -44,15 +44,11 @@ func TestLatestSyncSuccess(t *testing.T) { defer pub.Close() sub, err := dagsync.NewSubscriber(dstHost, dstStore, dstLnkS, testTopic, nil, dagsync.Topic(topics[1])) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer sub.Close() err = test.WaitForPublisher(dstHost, topics[0].String(), srcHost.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) watcher, cncl := sub.OnSyncFinished() defer cncl() @@ -61,17 +57,11 @@ func TestLatestSyncSuccess(t *testing.T) { chainLnks := test.MkChain(srcLnkS, true) err = newUpdateTest(pub, sub, dstStore, watcher, srcHost.ID(), chainLnks[2], false, chainLnks[2].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = newUpdateTest(pub, sub, dstStore, watcher, srcHost.ID(), chainLnks[1], false, chainLnks[1].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = newUpdateTest(pub, sub, dstStore, watcher, srcHost.ID(), chainLnks[0], false, chainLnks[0].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestSyncFn(t *testing.T) { @@ -307,9 +297,7 @@ func TestLatestSyncFailure(t *testing.T) { defer srcHost.Close() srcLnkS := test.MkLinkSystem(srcStore) pub, err := dtsync.NewPublisher(srcHost, srcStore, srcLnkS, testTopic) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer pub.Close() chainLnks := test.MkChain(srcLnkS, true) @@ -324,54 +312,39 @@ func TestLatestSyncFailure(t *testing.T) { t.Log("targer host:", dstHost.ID()) sub, err := dagsync.NewSubscriber(dstHost, dstStore, dstLnkS, testTopic, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer sub.Close() - if err := srcHost.Connect(context.Background(), dstHost.Peerstore().PeerInfo(dstHost.ID())); err != nil { - t.Fatal(err) - } + err = srcHost.Connect(context.Background(), dstHost.Peerstore().PeerInfo(dstHost.ID())) + require.NoError(t, err) err = sub.SetLatestSync(srcHost.ID(), chainLnks[3].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = test.WaitForPublisher(dstHost, testTopic, srcHost.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) watcher, cncl := sub.OnSyncFinished() t.Log("Testing sync fail when the other end does not have the data") err = newUpdateTest(pub, sub, dstStore, watcher, srcHost.ID(), cidlink.Link{Cid: cid.Undef}, true, chainLnks[3].(cidlink.Link).Cid) cncl() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) sub.Close() dstStore = dssync.MutexWrap(datastore.NewMapDatastore()) sub2, err := dagsync.NewSubscriber(dstHost, dstStore, dstLnkS, testTopic, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer sub2.Close() err = sub2.SetLatestSync(srcHost.ID(), chainLnks[3].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) watcher, cncl = sub2.OnSyncFinished() t.Log("Testing sync fail when not able to run the full exchange") err = newUpdateTest(pub, sub2, dstStore, watcher, srcHost.ID(), chainLnks[2], true, chainLnks[3].(cidlink.Link).Cid) cncl() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestAnnounce(t *testing.T) { @@ -388,21 +361,15 @@ func TestAnnounce(t *testing.T) { dstLnkS := test.MkLinkSystem(dstStore) pub, err := dtsync.NewPublisher(srcHost, srcStore, srcLnkS, testTopic) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer pub.Close() sub, err := dagsync.NewSubscriber(dstHost, dstStore, dstLnkS, testTopic, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer sub.Close() err = test.WaitForPublisher(dstHost, testTopic, srcHost.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) watcher, cncl := sub.OnSyncFinished() defer cncl() @@ -411,17 +378,11 @@ func TestAnnounce(t *testing.T) { chainLnks := test.MkChain(srcLnkS, true) err = newAnnounceTest(pub, sub, dstStore, watcher, srcHost.ID(), srcHost.Addrs(), chainLnks[2], chainLnks[2].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = newAnnounceTest(pub, sub, dstStore, watcher, srcHost.ID(), srcHost.Addrs(), chainLnks[1], chainLnks[1].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = newAnnounceTest(pub, sub, dstStore, watcher, srcHost.ID(), srcHost.Addrs(), chainLnks[0], chainLnks[0].(cidlink.Link).Cid) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func newAnnounceTest(pub dagsync.Publisher, sub *dagsync.Subscriber, dstStore datastore.Batching, watcher <-chan dagsync.SyncFinished, peerID peer.ID, peerAddrs []multiaddr.Multiaddr, lnk ipld.Link, expectedSync cid.Cid) error { diff --git a/dagsync/test/util.go b/dagsync/test/util.go index 8ff8d1ef1..e6b875902 100644 --- a/dagsync/test/util.go +++ b/dagsync/test/util.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "io" "math/rand" "sync" @@ -23,6 +22,7 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" ) const ( @@ -39,9 +39,7 @@ func WaitForMeshWithMessage(t *testing.T, topic string, hosts ...host.Host) []*p } retries-- msg := "Mesh failed to startup" - if retries == 0 { - t.Fatalf(msg) - } + require.NotZero(t, retries, msg) t.Log(msg + " retrying") } } @@ -69,9 +67,7 @@ func waitForMeshWithMessage(t *testing.T, topic string, hosts ...host.Host) []*p } h.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, time.Hour) err := h.Connect(context.Background(), addrInfo) - if err != nil { - t.Fatalf("Failed to connect: %v", err) - } + require.NoError(t, err, "Failed to connect") } } @@ -86,22 +82,17 @@ func waitForMeshWithMessage(t *testing.T, topic string, hosts ...host.Host) []*p } pubsub, err := pubsub.NewGossipSub(context.Background(), h, pubsub.WithDirectPeers(addrInfosWithoutSelf)) - if err != nil { - t.Fatalf("Failed to start gossipsub: %v", err) - } + require.NoError(t, err, "Failed to start gossipsub") tpc, err := pubsub.Join(topic) - if err != nil { - t.Fatalf("Failed to join topic: %v", err) - } + require.NoError(t, err, "Failed to join topic") pubsubs[i] = pubsub topics[i] = tpc } - if len(pubsubs) == 1 { - t.Fatalf("No point in using this helper if there's only one host. Did you mean to pass in another host?") - } + require.NotEqual(t, 1, len(pubsubs), + "No point in using this helper if there's only one host. Did you mean to pass in another host?") restTopics := topics[1:] wg := sync.WaitGroup{} @@ -110,24 +101,18 @@ func waitForMeshWithMessage(t *testing.T, topic string, hosts ...host.Host) []*p wg.Add(1) s, err := restTopics[i].Subscribe() - if err != nil { - t.Fatalf("Failed to subscribe: %v", err) - } + require.NoError(t, err, "Failed to subscribe") go func(s *pubsub.Subscription) { _, err := s.Next(context.Background()) - if err != nil { - fmt.Println("Failed in waiting for startupCheck msg in goroutine", err) - } + require.NoError(t, err, "Failed in waiting for startupCheck msg in goroutine") wg.Done() // Wait until someone else picks up this topic and sends a message before // we cancel. This way the topic isn't unsubscribed to before we start // the test. _, err = s.Next(context.Background()) - if err != nil { - fmt.Println("error getting next message on subscription:", err) - } + require.NoError(t, err, "Error getting next message on subscription") s.Cancel() }(s) } @@ -140,9 +125,7 @@ func waitForMeshWithMessage(t *testing.T, topic string, hosts ...host.Host) []*p tpc := topics[0] err := tpc.Publish(context.Background(), []byte("hi")) - if err != nil { - t.Fatalf("Failed to publish: %v", err) - } + require.NoError(t, err, "Failed to publish") timeout := time.NewTimer(waitForMeshTimeout) defer timeout.Stop() @@ -160,9 +143,7 @@ func waitForMeshWithMessage(t *testing.T, topic string, hosts ...host.Host) []*p return nil case <-pubTimeout.C: err := tpc.Publish(context.Background(), []byte("hi")) - if err != nil { - fmt.Println("Failed to publish:", err) - } + require.NoError(t, err, "Failed to publish") pubTimeout.Reset(publishTimeout) } } diff --git a/internal/ingest/ingest_test.go b/internal/ingest/ingest_test.go index 2522d62e4..075225923 100644 --- a/internal/ingest/ingest_test.go +++ b/internal/ingest/ingest_test.go @@ -1211,9 +1211,7 @@ func TestRecursionDepthLimitsEntriesSync(t *testing.T) { for err == nil && lcid == cid.Undef { // May not have marked ad as processed yet, retry. time.Sleep(time.Second) - if ctx.Err() != nil { - t.Fatal("sync timeout") - } + require.NoError(t, ctx.Err(), "sync timeout") lcid, err = ing.GetLatestSync(pubHost.ID()) } @@ -1594,9 +1592,7 @@ func mkRegistry(t *testing.T) *registry.Registry { PollInterval: config.Duration(time.Minute), } reg, err := registry.New(context.Background(), discoveryCfg, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return reg } @@ -1643,9 +1639,8 @@ func mkIngestWithConfig(t *testing.T, h host.Host, cfg config.Ingest) (*Ingester func connectHosts(t *testing.T, srcHost, dstHost host.Host) { srcHost.Peerstore().AddAddrs(dstHost.ID(), dstHost.Addrs(), time.Hour) dstHost.Peerstore().AddAddrs(srcHost.ID(), srcHost.Addrs(), time.Hour) - if err := srcHost.Connect(context.Background(), dstHost.Peerstore().PeerInfo(dstHost.ID())); err != nil { - t.Fatal(err) - } + err := srcHost.Connect(context.Background(), dstHost.Peerstore().PeerInfo(dstHost.ID())) + require.NoError(t, err) } func newRandomLinkedList(t *testing.T, lsys ipld.LinkSystem, size int) (ipld.Link, []multihash.Multihash) { diff --git a/internal/registry/policy/policy_test.go b/internal/registry/policy/policy_test.go index eebb8c89c..25641c4ab 100644 --- a/internal/registry/policy/policy_test.go +++ b/internal/registry/policy/policy_test.go @@ -5,6 +5,7 @@ import ( "github.com/ipni/storetheindex/config" "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) const ( @@ -38,41 +39,29 @@ func TestNewPolicy(t *testing.T) { } _, err := New(policyCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) policyCfg.Allow = true _, err = New(policyCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) policyCfg.Allow = false policyCfg.PublishExcept = append(policyCfg.PublishExcept, "bad ID") _, err = New(policyCfg) - if err == nil { - t.Error("expected error with bad PublishExcept ID") - } + require.Error(t, err, "expected error with bad PublishExcept ID") policyCfg.PublishExcept = nil policyCfg.Except = append(policyCfg.Except, "bad ID") _, err = New(policyCfg) - if err == nil { - t.Error("expected error with bad except ID") - } + require.Error(t, err, "expected error with bad except ID") policyCfg.Except = nil _, err = New(policyCfg) - if err != nil { - t.Error(err) - } + require.NoError(t, err) policyCfg.Allow = true _, err = New(policyCfg) - if err != nil { - t.Error(err) - } + require.NoError(t, err) } func TestPolicyAccess(t *testing.T) { @@ -84,95 +73,49 @@ func TestPolicyAccess(t *testing.T) { } p, err := New(policyCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if p.Allowed(otherID) { - t.Error("peer ID should not be allowed by policy") - } - if !p.Allowed(exceptID) { - t.Error("peer ID should be allowed") - } + require.False(t, p.Allowed(otherID), "peer ID should not be allowed by policy") + require.True(t, p.Allowed(exceptID), "peer ID should be allowed") - if p.PublishAllowed(otherID, exceptID) { - t.Error("peer ID should not be allowed to publish") - } - if !p.PublishAllowed(otherID, otherID) { - t.Error("should be allowed to publish to self") - } - if p.PublishAllowed(exceptID, otherID) { - t.Error("should not be allowed to publish to blocked peer") - } + require.False(t, p.PublishAllowed(otherID, exceptID), "peer ID should not be allowed to publish") + require.True(t, p.PublishAllowed(otherID, otherID), "should be allowed to publish to self") + require.False(t, p.PublishAllowed(exceptID, otherID), "should not be allowed to publish to blocked peer") p.Allow(otherID) - if !p.Allowed(otherID) { - t.Error("peer ID should be allowed by policy") - } + require.True(t, p.Allowed(otherID), "peer ID should be allowed by policy") p.Block(exceptID) - if p.Allowed(exceptID) { - t.Error("peer ID should not be allowed") - } + require.False(t, p.Allowed(exceptID), "peer ID should not be allowed") policyCfg.Allow = true policyCfg.Publish = true newPol, err := New(policyCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) p.Copy(newPol) - if !p.Allowed(otherID) { - t.Error("peer ID should be allowed by policy") - } - if p.Allowed(exceptID) { - t.Error("peer ID should not be allowed") - } + require.True(t, p.Allowed(otherID), "peer ID should be allowed by policy") + require.False(t, p.Allowed(exceptID), "peer ID should not be allowed") - if p.PublishAllowed(otherID, exceptID) { - t.Error("should not be allowed to publish to blocked peer") - } - if p.PublishAllowed(exceptID, otherID) { - t.Error("peer ID should not be allowed to publish") - } - if !p.PublishAllowed(exceptID, exceptID) { - t.Error("peer ID be allowed to publish to self") - } + require.False(t, p.PublishAllowed(otherID, exceptID), "should not be allowed to publish to blocked peer") + require.False(t, p.PublishAllowed(exceptID, otherID), "peer ID should not be allowed to publish") + require.True(t, p.PublishAllowed(exceptID, exceptID), "peer ID be allowed to publish to self") p.Allow(exceptID) - if !p.Allowed(exceptID) { - t.Error("peer ID should be allowed by policy") - } + require.True(t, p.Allowed(exceptID), "peer ID should be allowed by policy") p.Block(otherID) - if p.Allowed(otherID) { - t.Error("peer ID should not be allowed") - } + require.False(t, p.Allowed(otherID), "peer ID should not be allowed") cfg := p.ToConfig() - if cfg.Allow != true { - t.Error("wrong config.Allow") - } - if cfg.Publish != true { - t.Error("wrong config.Publish") - } - if len(cfg.Except) != 1 { - t.Fatal("expected 1 item in cfg.Except") - } - if cfg.Except[0] != otherIDStr { - t.Error("wrong ID in cfg.Except") - } - if len(cfg.PublishExcept) != 1 { - t.Fatal("expected 1 item in cfg.PublishExcept") - } + require.True(t, cfg.Allow) + require.True(t, cfg.Publish) + require.Equal(t, 1, len(cfg.Except)) + require.Equal(t, otherIDStr, cfg.Except[0]) + require.Equal(t, 1, len(cfg.PublishExcept)) p, err = New(config.Policy{}) - if err != nil { - t.Fatal(err) - } - if !p.NoneAllowed() { - t.Error("expected inaccessible policy") - } + require.NoError(t, err) + require.True(t, p.NoneAllowed(), "expected inaccessible policy") } diff --git a/peerutil/policy_test.go b/peerutil/policy_test.go index 9ff500455..64a3cfe15 100644 --- a/peerutil/policy_test.go +++ b/peerutil/policy_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) const ( @@ -30,176 +31,100 @@ func init() { func TestNewPolicy(t *testing.T) { _, err := NewPolicyStrings(false, []string{exceptIDStr, "bad ID"}) - if err == nil { - t.Error("expected error with bad except ID") - } + require.Error(t, err, "expected error with bad except ID") except := []string{exceptIDStr} p, err := NewPolicyStrings(false, except) - if err != nil { - t.Fatal(err) - } - if !p.Any(true) { - t.Error("true should be possible") - } + require.NoError(t, err) + require.True(t, p.Any(true), "true should be possible") p, err = NewPolicyStrings(true, except) - if err != nil { - t.Fatal(err) - } - if !p.Any(true) { - t.Error("true should be possible") - } + require.NoError(t, err) + require.True(t, p.Any(true), "true should be possible") p = NewPolicy(false) - if p.Any(true) { - t.Error("should not be true for any peers") - } + require.False(t, p.Any(true), "should not be true for any peers") - if p.SetPeer(exceptID, false) { - t.Fatal("should not have been updated to be false for peer") - } + require.False(t, p.SetPeer(exceptID, false), "should not have been updated to be false for peer") p = NewPolicy(true) - if !p.Any(true) { - t.Error("should by true for any peers") - } + require.True(t, p.Any(true), "should by true for any peers") - if !p.SetPeer(exceptID, false) { - t.Fatal("should have been updated to be false for peer") - } - if p.Eval(exceptID) { - t.Fatal("should be false for peer ID") - } + require.True(t, p.SetPeer(exceptID, false), "should have been updated to be false for peer") + require.False(t, p.Eval(exceptID), "should be false for peer ID") } func TestFalseDefault(t *testing.T) { p := NewPolicy(false, exceptID) - if p.Default() { - t.Fatal("expected false default") - } + require.False(t, p.Default(), "expected false default") - if p.Eval(otherID) { - t.Error("should evaluate false") - } - if !p.Eval(exceptID) { - t.Error("should evaluate true") - } + require.False(t, p.Eval(otherID)) + require.True(t, p.Eval(exceptID)) // Check that disabling otherID does not update. - if p.SetPeer(otherID, false) { - t.Fatal("should not have been updated") - } - if p.Eval(otherID) { - t.Error("should not evaluate true") - } + require.False(t, p.SetPeer(otherID, false), "should not have been updated") + require.False(t, p.Eval(otherID)) // Check that setting exceptID true does not update. - if p.SetPeer(exceptID, true) { - t.Fatal("should not have been updated") - } - if !p.Eval(exceptID) { - t.Error("should evaluate true") - } + require.False(t, p.SetPeer(exceptID, true), "should not have been updated") + require.True(t, p.Eval(exceptID)) // Check that setting otherID true does update. - if !p.SetPeer(otherID, true) { - t.Fatal("should have been updated") - } - if !p.Eval(otherID) { - t.Error("should evaluate true") - } + require.True(t, p.SetPeer(otherID, true), "should have been updated") + require.True(t, p.Eval(otherID)) // Check that setting exceptID false does update. - if !p.SetPeer(exceptID, false) { - t.Fatal("should have been updated") - } - if p.Eval(exceptID) { - t.Error("peer ID should evaluate false") - } + require.True(t, p.SetPeer(exceptID, false), "should have been updated") + require.False(t, p.Eval(exceptID)) } func TestTrueDefault(t *testing.T) { p := NewPolicy(true, exceptID) - if !p.Default() { - t.Fatal("expected true default") - } + require.True(t, p.Default(), "expected true default") - if !p.Eval(otherID) { - t.Error("should evaluate true") - } - if p.Eval(exceptID) { - t.Error("should evaluate false") - } + require.True(t, p.Eval(otherID)) + require.False(t, p.Eval(exceptID)) // Check that setting exceptID false does not update. - if p.SetPeer(exceptID, false) { - t.Fatal("should not have been update") - } - if p.Eval(exceptID) { - t.Error("should evaluate false") - } + require.False(t, p.SetPeer(exceptID, false), "should not have been updated") + require.False(t, p.Eval(exceptID)) // Check that setting otherID true does not update. - if p.SetPeer(otherID, true) { - t.Fatal("should have been update") - } - if !p.Eval(otherID) { - t.Error("should evaluate true") - } + require.False(t, p.SetPeer(otherID, true), "should not have been updated") + require.True(t, p.Eval(otherID)) // Check that setting exceptID true does updates. - if !p.SetPeer(exceptID, true) { - t.Fatal("should have been update") - } - if !p.Eval(exceptID) { - t.Error("should evaluate true") - } + require.True(t, p.SetPeer(exceptID, true), "should have been updated") + require.True(t, p.Eval(exceptID)) // Check that setting otherID false does updates. - if !p.SetPeer(otherID, false) { - t.Fatal("should have been updated") - } - if p.Eval(otherID) { - t.Error("should evaluate false") - } + require.True(t, p.SetPeer(otherID, false), "should have been updated") + require.False(t, p.Eval(otherID)) } func TestExceptStrings(t *testing.T) { p, err := NewPolicyStrings(false, nil) - if err != nil { - t.Fatal(err) - } - if len(p.ExceptStrings()) != 0 { - t.Fatal("should not be any except strings") - } + require.NoError(t, err) + require.Zero(t, len(p.ExceptStrings()), "should not be any except strings") except := []string{exceptIDStr, otherIDStr} p, err = NewPolicyStrings(false, except) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) exStrs := p.ExceptStrings() - if len(exStrs) != 2 { - t.Fatal("wrong number of except strings") - } + require.Equal(t, 2, len(exStrs), "wrong number of except strings") for _, exStr := range exStrs { - if exStr != except[0] && exStr != except[1] { - t.Fatal("except strings does not match original") - } + require.Contains(t, except, exStr, "except strings does not match original") } for exID := range p.except { p.SetPeer(exID, false) } - if p.ExceptStrings() != nil { - t.Fatal("expected nil except strings") - } + require.Nil(t, p.ExceptStrings()) } diff --git a/server/admin/http/server_test.go b/server/admin/http/server_test.go index f889db96c..892be06a2 100644 --- a/server/admin/http/server_test.go +++ b/server/admin/http/server_test.go @@ -291,9 +291,7 @@ func initRegistry(t *testing.T, trustedID string) *registry.Registry { UseAssigner: true, } reg, err := registry.New(context.Background(), discoveryCfg, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return reg } @@ -305,14 +303,10 @@ func initIngest(t *testing.T, indx indexer.Interface, reg *registry.Registry) *i cfg := config.NewIngest() ds := dssync.MutexWrap(datastore.NewMapDatastore()) host, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ing, err := ingest.NewIngester(cfg, host, indx, reg, ds) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Cleanup(func() { ing.Close() }) diff --git a/server/finder/http/protocol_test.go b/server/finder/http/protocol_test.go index dab18d5d3..61176158a 100644 --- a/server/finder/http/protocol_test.go +++ b/server/finder/http/protocol_test.go @@ -15,21 +15,18 @@ import ( "github.com/ipni/storetheindex/internal/registry" httpserver "github.com/ipni/storetheindex/server/finder/http" "github.com/ipni/storetheindex/server/finder/test" + "github.com/stretchr/testify/require" ) func setupServer(ind indexer.Interface, reg *registry.Registry, idxCts *counter.IndexCounts, t *testing.T) *httpserver.Server { s, err := httpserver.New("127.0.0.1:0", ind, reg, httpserver.WithIndexCounts(idxCts)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return s } func setupClient(host string, t *testing.T) *httpclient.Client { c, err := httpclient.New(host) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return c } @@ -61,14 +58,10 @@ func TestFindIndexData(t *testing.T) { t.Error("shutdown error:", err) } err = <-errChan - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestFindIndexWithExtendedProviders(t *testing.T) { @@ -101,19 +94,12 @@ func TestFindIndexWithExtendedProviders(t *testing.T) { test.MainProviderChainRecordIsIncludedIfItsMetadataIsDifferentTest(ctx, t, c, ind, reg) test.MainProviderContextRecordIsIncludedIfItsMetadataIsDifferentTest(ctx, t, c, ind, reg) - err := s.Close() - if err != nil { - t.Error("shutdown error:", err) - } - err = <-errChan - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Close(), "shutdown error") + err := <-errChan + require.NoError(t, err) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestReframeFindIndexData(t *testing.T) { @@ -125,13 +111,9 @@ func TestReframeFindIndexData(t *testing.T) { // create delegated routing client q, err := proto.New_DelegatedRouting_Client(s.URL() + "/reframe") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) reframeClient, err := client.NewClient(q, nil, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Start server errChan := make(chan error, 1) @@ -149,19 +131,12 @@ func TestReframeFindIndexData(t *testing.T) { test.ReframeFindIndexTest(ctx, t, c, reframeClient, ind, reg) - err = s.Close() - if err != nil { - t.Error("shutdown error:", err) - } + require.NoError(t, s.Close(), "shutdown error") err = <-errChan - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestProviderInfo(t *testing.T) { @@ -194,19 +169,12 @@ func TestProviderInfo(t *testing.T) { test.ListProvidersTest(t, httpClient, peerID) - err := s.Close() - if err != nil { - t.Error("shutdown error:", err) - } - err = <-errChan - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Close(), "shutdown error") + err := <-errChan + require.NoError(t, err) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestGetStats(t *testing.T) { @@ -233,14 +201,9 @@ func TestGetStats(t *testing.T) { test.GetStatsTest(ctx, t, ind, s.RefreshStats, httpClient) - err := s.Close() - if err != nil { - t.Error("shutdown error:", err) - } - err = <-errChan - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Close(), "shutdown error") + err := <-errChan + require.NoError(t, err) } func TestRemoveProvider(t *testing.T) { @@ -266,17 +229,10 @@ func TestRemoveProvider(t *testing.T) { test.RemoveProviderTest(ctx, t, c, ind, reg) - err := s.Close() - if err != nil { - t.Error("shutdown error:", err) - } - err = <-errChan - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Close(), "shutdown error") + err := <-errChan + require.NoError(t, err) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } diff --git a/server/finder/libp2p/protocol_test.go b/server/finder/libp2p/protocol_test.go index 834233ca8..898672783 100644 --- a/server/finder/libp2p/protocol_test.go +++ b/server/finder/libp2p/protocol_test.go @@ -14,22 +14,19 @@ import ( "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" ) func setupServer(ctx context.Context, ind indexer.Interface, reg *registry.Registry, idxCts *counter.IndexCounts, t *testing.T) (*p2pserver.FinderServer, host.Host) { h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) s := p2pserver.New(ctx, h, ind, reg, idxCts) return s, h } func setupClient(peerID peer.ID, t *testing.T) *p2pclient.Client { c, err := p2pclient.New(nil, peerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return c } @@ -43,15 +40,11 @@ func TestFindIndexData(t *testing.T) { s, sh := setupServer(ctx, ind, reg, nil, t) c := setupClient(s.ID(), t) err := c.ConnectAddrs(ctx, sh.Addrs()...) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test.FindIndexTest(ctx, t, c, ind, reg) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestFindIndexWithExtendedProviders(t *testing.T) { @@ -64,9 +57,7 @@ func TestFindIndexWithExtendedProviders(t *testing.T) { s, sh := setupServer(ctx, ind, reg, nil, t) c := setupClient(s.ID(), t) err := c.ConnectAddrs(ctx, sh.Addrs()...) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test.ProvidersShouldBeUnaffectedByExtendedProvidersOfEachOtherTest(ctx, t, c, ind, reg) test.ExtendedProviderShouldHaveOwnMetadataTest(ctx, t, c, ind, reg) test.ExtendedProviderShouldInheritMetadataOfMainProviderTest(ctx, t, c, ind, reg) @@ -76,9 +67,7 @@ func TestFindIndexWithExtendedProviders(t *testing.T) { test.MainProviderContextRecordIsIncludedIfItsMetadataIsDifferentTest(ctx, t, c, ind, reg) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestProviderInfo(t *testing.T) { @@ -93,9 +82,7 @@ func TestProviderInfo(t *testing.T) { s, sh := setupServer(ctx, ind, reg, idxCts, t) p2pClient := setupClient(s.ID(), t) err := p2pClient.ConnectAddrs(ctx, sh.Addrs()...) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) peerID := test.Register(ctx, t, reg) @@ -106,9 +93,7 @@ func TestProviderInfo(t *testing.T) { test.ListProvidersTest(t, p2pClient, peerID) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestGetStats(t *testing.T) { @@ -123,9 +108,7 @@ func TestGetStats(t *testing.T) { s, sh := setupServer(ctx, ind, reg, nil, t) c := setupClient(s.ID(), t) err := c.ConnectAddrs(ctx, sh.Addrs()...) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test.GetStatsTest(ctx, t, ind, s.RefreshStats, c) } @@ -139,14 +122,10 @@ func TestRemoveProvider(t *testing.T) { s, sh := setupServer(ctx, ind, reg, nil, t) c := setupClient(s.ID(), t) err := c.ConnectAddrs(ctx, sh.Addrs()...) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test.RemoveProviderTest(ctx, t, c, ind, reg) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } diff --git a/server/finder/test/test.go b/server/finder/test/test.go index 802d4d939..e852202d6 100644 --- a/server/finder/test/test.go +++ b/server/finder/test/test.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "fmt" "math/rand" - "strings" "testing" "time" @@ -40,9 +39,7 @@ func InitIndex(t *testing.T, withCache bool) indexer.Interface { // InitPebbleIndex initialize a new indexer engine using pebbel with cache. func InitPebbleIndex(t *testing.T, withCache bool) indexer.Interface { valueStore, err := pebble.New(t.TempDir(), nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if withCache { return engine.New(radixcache.New(1000), valueStore) } @@ -71,48 +68,32 @@ func InitRegistryWithRestrictivePolicy(t *testing.T, restrictive bool) *registry } } reg, err := registry.New(context.Background(), discoveryCfg, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return reg } // populateIndex with some multihashes func populateIndex(ind indexer.Interface, mhs []multihash.Multihash, v indexer.Value, t *testing.T) { err := ind.Put(v, mhs...) - if err != nil { - t.Fatal("Error putting multihashes: ", err) - } + require.NoError(t, err, "Error putting multihashes") vals, ok, err := ind.Get(mhs[0]) - if err != nil { - t.Fatal(err) - } - if !ok { - t.Fatal("index not found") - } - if len(vals) == 0 { - t.Fatal("no values returned") - } - if !v.Equal(vals[0]) { - t.Fatal("stored and retrieved values are different") - } + require.NoError(t, err) + require.True(t, ok, "index not found") + require.NotZero(t, len(vals), "no values returned") + require.True(t, v.Equal(vals[0]), "stored and retrieved values are different") } func ReframeFindIndexTest(ctx context.Context, t *testing.T, c client.Finder, rc *reframeclient.Client, ind indexer.Interface, reg *registry.Registry) { // Generate some multihashes and populate indexer mhs := util.RandomMultihashes(15, rng) p, err := peer.Decode(providerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctxID := []byte("test-context-id") // Use a sample metadata with multiple protocols that includes BitSwap // among others to make a stronger test. metadata, err := base64.StdEncoding.DecodeString("gBKQEqNoUGllY2VDSUTYKlgoAAGB4gOSICAYVAKmPqL1mpkiiDhd9iBaXoU/3rXorXxzjiyESP4hB2xWZXJpZmllZERlYWz0bUZhc3RSZXRyaWV2YWz1") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) v := indexer.Value{ ProviderID: p, ContextID: ctxID, @@ -126,31 +107,21 @@ func ReframeFindIndexTest(ctx context.Context, t *testing.T, c client.Finder, rc Addrs: []multiaddr.Multiaddr{a}, } err = reg.Update(ctx, provider, peer.AddrInfo{}, cid.Undef, nil, 0) - if err != nil { - t.Fatal("could not register provider info:", err) - } + require.NoError(t, err, "could not register provider info") // Get single multihash peerAddrs, err := rc.FindProviders(ctx, cid.NewCidV1(cid.Raw, mhs[0])) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if len(peerAddrs) != 1 { - t.Fatalf("expecting one peer addr, got %d", len(peerAddrs)) - } - if peerAddrs[0].ID != p { - t.Fatalf("expecting %v, got %v", p, peerAddrs[0].ID) - } + require.Equal(t, 1, len(peerAddrs), "expecting one peer addr") + require.Equal(t, p, peerAddrs[0].ID) } func FindIndexTest(ctx context.Context, t *testing.T, c client.Finder, ind indexer.Interface, reg *registry.Registry) { // Generate some multihashes and populate indexer mhs := util.RandomMultihashes(15, rng) p, err := peer.Decode(providerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctxID := []byte("test-context-id") metadata := []byte("test-metadata") v := indexer.Value{ @@ -166,15 +137,11 @@ func FindIndexTest(ctx context.Context, t *testing.T, c client.Finder, ind index Addrs: []multiaddr.Multiaddr{a}, } err = reg.Update(ctx, provider, peer.AddrInfo{}, cid.Undef, nil, 0) - if err != nil { - t.Fatal("could not register provider info:", err) - } + require.NoError(t, err, "could not register provider info") // Get single multihash resp, err := c.Find(ctx, mhs[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Log("index values in resp:", len(resp.MultihashResults)) provResult := model.ProviderResult{ @@ -188,49 +155,31 @@ func FindIndexTest(ctx context.Context, t *testing.T, c client.Finder, ind index expectedResults := []model.ProviderResult{provResult} err = checkResponse(resp, mhs[:1], expectedResults) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Get a batch of multihashes resp, err = c.FindBatch(ctx, mhs[:10]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = checkResponse(resp, mhs[:10], expectedResults) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Get a batch of multihashes where only a subset is in the index resp, err = c.FindBatch(ctx, mhs) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = checkResponse(resp, mhs[:10], expectedResults) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Get empty batch _, err = c.FindBatch(ctx, []multihash.Multihash{}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = checkResponse(&model.FindResponse{}, []multihash.Multihash{}, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Get batch with no multihashes in request _, err = c.FindBatch(ctx, mhs[10:]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = checkResponse(&model.FindResponse{}, []multihash.Multihash{}, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func checkResponse(r *model.FindResponse, mhs []multihash.Multihash, expected []model.ProviderResult) error { @@ -278,12 +227,8 @@ func ListProvidersTest(t *testing.T, c client.Finder, providerID peer.ID) { defer cancel() providers, err := c.ListProviders(ctx) - if err != nil { - t.Fatal(err) - } - if len(providers) != 1 { - t.Fatalf("should have 1 provider, has %d", len(providers)) - } + require.NoError(t, err) + require.Equal(t, 1, len(providers), "should have 1 provider") verifyProviderInfo(t, providers[0]) } @@ -324,14 +269,10 @@ func RemoveProviderTest(ctx context.Context, t *testing.T, c client.Finder, ind // Generate some multihashes and populate indexer mhs := util.RandomMultihashes(15, rng) p, err := peer.Decode(providerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctxID := []byte("test-context-id") metadata := []byte("test-metadata") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) v := indexer.Value{ ProviderID: p, ContextID: ctxID, @@ -345,18 +286,12 @@ func RemoveProviderTest(ctx context.Context, t *testing.T, c client.Finder, ind Addrs: []multiaddr.Multiaddr{a}, } err = reg.Update(ctx, provider, peer.AddrInfo{}, cid.Undef, nil, 0) - if err != nil { - t.Fatal("could not register provider info:", err) - } + require.NoError(t, err, "could not register provider info") // Get single multihash resp, err := c.Find(ctx, mhs[0]) - if err != nil { - t.Fatal(err) - } - if len(resp.MultihashResults) != 1 { - t.Fatal("expected 1 value in response") - } + require.NoError(t, err) + require.Equal(t, 1, len(resp.MultihashResults), "expected 1 value in response") provResult := model.ProviderResult{ ContextID: v.ContextID, @@ -369,39 +304,27 @@ func RemoveProviderTest(ctx context.Context, t *testing.T, c client.Finder, ind expectedResults := []model.ProviderResult{provResult} err = checkResponse(resp, mhs[:1], expectedResults) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Log("removing provider from registry") err = reg.RemoveProvider(ctx, p) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Get single multihash resp, err = c.Find(ctx, mhs[0]) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Log("index values in resp:", len(resp.MultihashResults)) - if len(resp.MultihashResults) != 0 { - t.Fatal("expected 0 multihashes in response") - } + require.Zero(t, len(resp.MultihashResults), "expected 0 multihashes in response") _, err = c.GetProvider(ctx, p) - if err == nil || !strings.HasSuffix(err.Error(), "not found") { - t.Fatal("expected 'error not found' from GetProvider") - } + require.ErrorContains(t, err, "not found") } func GetStatsTest(ctx context.Context, t *testing.T, ind indexer.Interface, refreshStats func(), c client.Finder) { t.Parallel() mhs := util.RandomMultihashes(15, rng) p, err := peer.Decode(providerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ctxID := []byte("test-context-id") metadata := []byte("test-metadata") v := indexer.Value{ @@ -422,14 +345,10 @@ func GetStatsTest(ctx context.Context, t *testing.T, ind indexer.Interface, refr func Register(ctx context.Context, t *testing.T, reg *registry.Registry) peer.ID { peerID, err := peer.Decode(providerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/9999") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ep1, _, _ := util.RandomIdentity(t) ep2, _, _ := util.RandomIdentity(t) @@ -461,9 +380,7 @@ func Register(ctx context.Context, t *testing.T, reg *registry.Registry) peer.ID } err = reg.Update(ctx, provider, peer.AddrInfo{}, cid.Undef, extProviders, 0) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return peerID } diff --git a/server/ingest/http/handler_test.go b/server/ingest/http/handler_test.go index f62598bed..bc23f11d2 100644 --- a/server/ingest/http/handler_test.go +++ b/server/ingest/http/handler_test.go @@ -90,9 +90,7 @@ func TestHandleRegisterProvider(t *testing.T) { addrs := []string{"/ip4/127.0.0.1/tcp/9999"} data, err := model.MakeRegisterRequest(peerID, privKey, addrs) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) reqBody := bytes.NewBuffer(data) req := httptest.NewRequest(http.MethodPost, "http://example.com/providers", reqBody) @@ -101,15 +99,9 @@ func TestHandleRegisterProvider(t *testing.T) { resp := w.Result() - if resp.StatusCode != http.StatusOK { - t.Fatal("expected response to be", http.StatusOK) - } + require.Equal(t, http.StatusOK, resp.StatusCode) pinfo, allowed := reg.ProviderInfo(peerID) - if pinfo == nil { - t.Fatal("provider was not registered") - } - if !allowed { - t.Fatal("provider not allowed") - } + require.NotNil(t, pinfo, "provider was not registered") + require.True(t, allowed, "provider not allowed") } diff --git a/server/ingest/http/protocol_test.go b/server/ingest/http/protocol_test.go index ddf867306..569fe8e03 100644 --- a/server/ingest/http/protocol_test.go +++ b/server/ingest/http/protocol_test.go @@ -13,6 +13,7 @@ import ( "github.com/ipni/storetheindex/internal/registry" httpserver "github.com/ipni/storetheindex/server/ingest/http" "github.com/ipni/storetheindex/server/ingest/test" + "github.com/stretchr/testify/require" ) var providerIdent = config.Identity{ @@ -22,35 +23,25 @@ var providerIdent = config.Identity{ func setupServer(ind indexer.Interface, ing *ingest.Ingester, reg *registry.Registry, t *testing.T) *httpserver.Server { s, err := httpserver.New("127.0.0.1:0", ind, ing, reg) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return s } func setupClient(host string, t *testing.T) *httpclient.Client { c, err := httpclient.New(host) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return c } func setupSender(t *testing.T, baseURL string) *httpsender.Sender { announceURL, err := url.Parse(baseURL + httpsender.DefaultAnnouncePath) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) peerID, _, err := providerIdent.Decode() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) httpSender, err := httpsender.New([]*url.URL{announceURL}, peerID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return httpSender } @@ -64,9 +55,7 @@ func TestRegisterProvider(t *testing.T) { httpClient := setupClient(s.URL(), t) peerID, privKey, err := providerIdent.Decode() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Start server errChan := make(chan error, 1) @@ -81,9 +70,7 @@ func TestRegisterProvider(t *testing.T) { test.RegisterProviderTest(t, httpClient, peerID, privKey, "/ip4/127.0.0.1/tcp/9999", reg) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } func TestAnnounce(t *testing.T) { @@ -94,9 +81,7 @@ func TestAnnounce(t *testing.T) { s := setupServer(ind, ing, reg, t) httpSender := setupSender(t, s.URL()) peerID, _, err := providerIdent.Decode() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errChan := make(chan error, 1) go func() { err := s.Start() @@ -109,7 +94,5 @@ func TestAnnounce(t *testing.T) { test.AnnounceTest(t, peerID, httpSender) reg.Close() - if err = ind.Close(); err != nil { - t.Errorf("Error closing indexer core: %s", err) - } + require.NoError(t, ind.Close(), "Error closing indexer core") } diff --git a/server/ingest/test/test.go b/server/ingest/test/test.go index 1b220183f..0ac1fe36e 100644 --- a/server/ingest/test/test.go +++ b/server/ingest/test/test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "math/rand" - "strings" "testing" "time" @@ -27,6 +26,7 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" ) var rng = rand.New(rand.NewSource(1413)) @@ -48,9 +48,7 @@ func InitRegistry(t *testing.T, trustedID string) *registry.Registry { PollInterval: config.Duration(time.Minute), } reg, err := registry.New(context.Background(), discoveryCfg, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return reg } @@ -58,14 +56,10 @@ func InitIngest(t *testing.T, indx indexer.Interface, reg *registry.Registry) *i cfg := config.NewIngest() ds := dssync.MutexWrap(datastore.NewMapDatastore()) host, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ing, err := ingest.NewIngester(cfg, host, indx, reg, ds) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) t.Cleanup(func() { ing.Close() }) @@ -78,25 +72,17 @@ func RegisterProviderTest(t *testing.T, c client.Ingest, providerID peer.ID, pri t.Log("registering provider") err := c.Register(ctx, providerID, privateKey, []string{addr}) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !reg.IsRegistered(providerID) { - t.Fatal("provider not registered") - } + require.True(t, reg.IsRegistered(providerID), "provider not registered") // Test signature fail t.Log("registering provider with bad signature") badPeerID, err := peer.Decode("12D3KooWD1XypSuBmhebQcvq7Sf1XJZ1hKSfYCED4w6eyxhzwqnV") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = c.Register(ctx, badPeerID, privateKey, []string{addr}) - if err == nil { - t.Fatal("expected bad signature error") - } + require.Error(t, err, "expected bad signature error") } func IndexContent(t *testing.T, cl client.Ingest, providerID peer.ID, privateKey crypto.PrivKey, ind indexer.Interface) { @@ -109,20 +95,12 @@ func IndexContent(t *testing.T, cl client.Ingest, providerID peer.ID, privateKey metadata := []byte("test-metadata") err := cl.IndexContent(ctx, providerID, privateKey, mhs[0], contextID, metadata, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vals, ok, err := ind.Get(mhs[0]) - if err != nil { - t.Fatal(err) - } - if !ok { - t.Fatal("did not find content") - } - if len(vals) == 0 { - t.Fatal("no content values returned") - } + require.NoError(t, err) + require.True(t, ok, "did not find content") + require.NotZero(t, len(vals), "no content values returned") expectValue := indexer.Value{ ProviderID: providerID, @@ -136,9 +114,7 @@ func IndexContent(t *testing.T, cl client.Ingest, providerID peer.ID, privateKey break } } - if !ok { - t.Fatal("did not get expected content") - } + require.True(t, ok, "did not get expected content") } func IndexContentNewAddr(t *testing.T, cl client.Ingest, providerID peer.ID, privateKey crypto.PrivKey, ind indexer.Interface, newAddr string, reg *registry.Registry) { @@ -152,26 +128,16 @@ func IndexContentNewAddr(t *testing.T, cl client.Ingest, providerID peer.ID, pri addrs := []string{newAddr} err := cl.IndexContent(ctx, providerID, privateKey, mhs[0], ctxID, metadata, addrs) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) info, allowed := reg.ProviderInfo(providerID) - if info == nil { - t.Fatal("did not get infor for provider:", providerID) - } - if !allowed { - t.Fatal("provider not allowed") - } + require.NotNil(t, info, "did not get infor for provider") + require.True(t, allowed, "provider not allowed") maddr, err := multiaddr.NewMultiaddr(newAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if !info.AddrInfo.Addrs[0].Equal(maddr) { - t.Fatalf("Did not update address. Have %q, want %q", info.AddrInfo.Addrs[0].String(), maddr.String()) - } + require.True(t, info.AddrInfo.Addrs[0].Equal(maddr), "Did not update address") } func IndexContentFail(t *testing.T, cl client.Ingest, providerID peer.ID, privateKey crypto.PrivKey, ind indexer.Interface) { @@ -184,44 +150,28 @@ func IndexContentFail(t *testing.T, cl client.Ingest, providerID peer.ID, privat metadata := []byte("test-metadata") err := cl.IndexContent(ctx, providerID, privateKey, mhs[0], contextID, metadata, nil) - if err == nil { - t.Fatal("expected error") - } - - if !strings.HasSuffix(err.Error(), "context id too long") { - t.Fatalf("expected error message: \"context id too long\", got %q", err.Error()) - } + require.Error(t, err) + require.ErrorContains(t, err, "context id too long") contextID = []byte("test-context-id") metadata = make([]byte, schema.MaxMetadataLen+1) err = cl.IndexContent(ctx, providerID, privateKey, mhs[0], contextID, metadata, nil) - if err == nil { - t.Fatal("expected error") - } - - if !strings.HasSuffix(err.Error(), "metadata too long") { - t.Fatalf("expected error message: \"metadata too long\", got %q", err.Error()) - } + require.Error(t, err) + require.ErrorContains(t, err, "metadata too long") apierr, ok := err.(*v0.Error) if ok { - if apierr.Status() != 400 { - t.Fatalf("expected status 400, got %d", apierr.Status()) - } + require.Equal(t, 400, apierr.Status()) } } func AnnounceTest(t *testing.T, peerID peer.ID, sender announce.Sender) { ai, err := peer.AddrInfoFromString(fmt.Sprintf("/ip4/127.0.0.1/tcp/9999/p2p/%s", peerID)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ai.ID = peerID p2pAddrs, err := peer.AddrInfoToP2pAddrs(ai) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) mhs := util.RandomMultihashes(1, rng) @@ -230,7 +180,6 @@ func AnnounceTest(t *testing.T, peerID peer.ID, sender announce.Sender) { } msg.SetAddrs(p2pAddrs) - if err = sender.Send(context.Background(), msg); err != nil { - t.Fatalf("Failed to announce: %s", err) - } + err = sender.Send(context.Background(), msg) + require.NoError(t, err, "Failed to announce") }