diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml index 281cfe951..61471c015 100644 --- a/.github/workflows/gateway-conformance.yml +++ b/.github/workflows/gateway-conformance.yml @@ -61,3 +61,9 @@ jobs: with: name: gateway-conformance.html path: output.html + - name: Upload JSON report + if: failure() || success() + uses: actions/upload-artifact@v3 + with: + name: gateway-conformance.json + path: output.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 4cbba9436..9ade2c65c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,14 +16,32 @@ The following emojis are used to highlight certain changes: ### Added -* The `routing/http` client and server now support Delegated IPNS as per [IPIP-379](https://specs.ipfs.tech/ipips/ipip-0379/). +* The `routing/http` client and server now support Delegated IPNS at `/routing/v1` + as per [IPIP-379](https://specs.ipfs.tech/ipips/ipip-0379/). +* The `verifycid` package has been updated with the new Allowlist interface as part of + reducing globals efforts. Still, existing global accessor funcs are kept for + backwards-compatibility. +* The `blockservice` and `provider` packages has been updated to accommodate for + changes in `verifycid`. ### Changed +* 🛠 `blockservice.New` now accepts a variadic of func options following the [Functional + Options pattern](https://www.sohamkamani.com/golang/options-pattern/). + ### Removed ### Fixed +- HTTP Gateway API: Not having a block will result in a 5xx error rather than 404 +- HTTP Gateway API: CAR requests will return 200s and a CAR file proving a requested path does not exist rather than returning an error +- 🛠 `MultiFileReader` has been updated with a new header with the encoded file name instead of the plain filename, due to a regression found in [`net/textproto`](https://github.com/golang/go/issues/60674). This only affects files with binary characters in their name. By keeping the old header, we maximize backwards compatibility. + | | New Client | Old Client | + |------------|------------|-------------| + | New Server | ✅ | 🟡* | + | Old Server | ✅ | ✅ | + *Old clients can only send Unicode file paths to the server. + ### Security ## [v0.11.0] diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index 0878614c5..80eb373ab 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -114,7 +114,7 @@ func BenchmarkFixedDelay(b *testing.B) { } out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/benchmark.json", out, 0666) + _ = os.WriteFile("tmp/benchmark.json", out, 0o666) printResults(benchmarkLog) } @@ -182,28 +182,30 @@ func BenchmarkFetchFromOldBitswap(b *testing.B) { } out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/benchmark.json", out, 0666) + _ = os.WriteFile("tmp/benchmark.json", out, 0o666) printResults(benchmarkLog) } -const datacenterSpeed = 5 * time.Millisecond -const fastSpeed = 60 * time.Millisecond -const mediumSpeed = 200 * time.Millisecond -const slowSpeed = 800 * time.Millisecond -const superSlowSpeed = 4000 * time.Millisecond -const datacenterDistribution = 3 * time.Millisecond -const distribution = 20 * time.Millisecond -const datacenterBandwidth = 125000000.0 -const datacenterBandwidthDeviation = 3000000.0 -const fastBandwidth = 1250000.0 -const fastBandwidthDeviation = 300000.0 -const mediumBandwidth = 500000.0 -const mediumBandwidthDeviation = 80000.0 -const slowBandwidth = 100000.0 -const slowBandwidthDeviation = 16500.0 -const rootBlockSize = 800 -const stdBlockSize = 8000 -const largeBlockSize = int64(256 * 1024) +const ( + datacenterSpeed = 5 * time.Millisecond + fastSpeed = 60 * time.Millisecond + mediumSpeed = 200 * time.Millisecond + slowSpeed = 800 * time.Millisecond + superSlowSpeed = 4000 * time.Millisecond + datacenterDistribution = 3 * time.Millisecond + distribution = 20 * time.Millisecond + datacenterBandwidth = 125000000.0 + datacenterBandwidthDeviation = 3000000.0 + fastBandwidth = 1250000.0 + fastBandwidthDeviation = 300000.0 + mediumBandwidth = 500000.0 + mediumBandwidthDeviation = 80000.0 + slowBandwidth = 100000.0 + slowBandwidthDeviation = 16500.0 + rootBlockSize = 800 + stdBlockSize = 8000 + largeBlockSize = int64(256 * 1024) +) func BenchmarkRealWorld(b *testing.B) { benchmarkLog = nil @@ -240,7 +242,7 @@ func BenchmarkRealWorld(b *testing.B) { subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/rw-benchmark.json", out, 0666) + _ = os.WriteFile("tmp/rw-benchmark.json", out, 0o666) printResults(benchmarkLog) } @@ -263,7 +265,7 @@ func BenchmarkDatacenter(b *testing.B) { subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) + _ = os.WriteFile("tmp/rb-benchmark.json", out, 0o666) printResults(benchmarkLog) } @@ -304,7 +306,7 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { }) out, _ := json.MarshalIndent(benchmarkLog, "", " ") - _ = os.WriteFile("tmp/rb-benchmark.json", out, 0666) + _ = os.WriteFile("tmp/rb-benchmark.json", out, 0o666) printResults(benchmarkLog) } diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 3863538ee..393ab96ad 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -44,9 +44,11 @@ type bitswap interface { WantlistForPeer(p peer.ID) []cid.Cid } -var _ exchange.SessionExchange = (*Bitswap)(nil) -var _ bitswap = (*Bitswap)(nil) -var HasBlockBufferSize = defaults.HasBlockBufferSize +var ( + _ exchange.SessionExchange = (*Bitswap)(nil) + _ bitswap = (*Bitswap)(nil) + HasBlockBufferSize = defaults.HasBlockBufferSize +) type Bitswap struct { *client.Client diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 7d478ca73..02bd57947 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -390,7 +390,6 @@ func TestSendToWantingPeer(t *testing.T) { if !blkrecvd.Cid().Equals(alpha.Cid()) { t.Fatal("Wrong block!") } - } func TestEmptyKey(t *testing.T) { @@ -828,6 +827,7 @@ func (tsl *testingScoreLedger) Start(scorePeer server.ScorePeerFunc) { tsl.scorePeer = scorePeer close(tsl.started) } + func (tsl *testingScoreLedger) Stop() { close(tsl.closed) } diff --git a/bitswap/client/client.go b/bitswap/client/client.go index d29eb6faf..854d030d1 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -132,7 +132,8 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D, - self peer.ID) bssm.Session { + self peer.ID, + ) bssm.Session { return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { diff --git a/bitswap/client/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go index f4d0bbe1f..713394b08 100644 --- a/bitswap/client/internal/getter/getter.go +++ b/bitswap/client/internal/getter/getter.go @@ -68,7 +68,8 @@ type WantFunc func(context.Context, []cid.Cid) // blocks, a want function, and a close function, and returns a channel of // incoming blocks. func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, - want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { + want WantFunc, cwants func([]cid.Cid), +) (<-chan blocks.Block, error) { ctx, span := internal.StartSpan(ctx, "Getter.AsyncGetBlocks") defer span.End() @@ -99,8 +100,8 @@ func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid // If the context is cancelled or the incoming channel closes, calls cfun with // any keys corresponding to blocks that were never received. func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, - in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { - + in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid), +) { ctx, cancel := context.WithCancel(ctx) // Clean up before exiting this function, and call the cancel function on diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go index e1b42c421..cdeee68ec 100644 --- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go @@ -107,8 +107,8 @@ func newDontHaveTimeoutMgrWithParams( messageLatencyMultiplier int, maxExpectedWantProcessTime time.Duration, clock clock.Clock, - timeoutsTriggered chan struct{}) *dontHaveTimeoutMgr { - + timeoutsTriggered chan struct{}, +) *dontHaveTimeoutMgr { ctx, shutdown := context.WithCancel(context.Background()) mqp := &dontHaveTimeoutMgr{ clock: clock, @@ -222,7 +222,6 @@ func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { // checkForTimeouts checks pending wants to see if any are over the timeout. // Note: this function should only be called within the lock. func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { - if len(dhtm.wantQueue) == 0 { return } diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go index 117d46b98..4f90f239b 100644 --- a/bitswap/client/internal/messagequeue/messagequeue.go +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -18,8 +18,10 @@ import ( "go.uber.org/zap" ) -var log = logging.Logger("bitswap") -var sflog = log.Desugar() +var ( + log = logging.Logger("bitswap") + sflog = log.Desugar() +) const ( defaultRebroadcastInterval = 30 * time.Second @@ -240,8 +242,8 @@ func newMessageQueue( maxValidLatency time.Duration, dhTimeoutMgr DontHaveTimeoutManager, clock clock.Clock, - events chan messageEvent) *MessageQueue { - + events chan messageEvent, +) *MessageQueue { ctx, cancel := context.WithCancel(ctx) return &MessageQueue{ ctx: ctx, diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index 59788f50b..886e60772 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -61,6 +61,7 @@ func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { } fp.ks = s.Keys() } + func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { fp.lk.Lock() defer fp.lk.Unlock() @@ -74,18 +75,21 @@ func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { } fp.ks = s.Keys() } + func (fp *fakeDontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { fp.lk.Lock() defer fp.lk.Unlock() fp.latencyUpds = append(fp.latencyUpds, elapsed) } + func (fp *fakeDontHaveTimeoutMgr) latencyUpdates() []time.Duration { fp.lk.Lock() defer fp.lk.Unlock() return fp.latencyUpds } + func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { fp.lk.Lock() defer fp.lk.Unlock() @@ -101,8 +105,8 @@ type fakeMessageSender struct { } func newFakeMessageSender(reset chan<- struct{}, - messagesSent chan<- []bsmsg.Entry, supportsHave bool) *fakeMessageSender { - + messagesSent chan<- []bsmsg.Entry, supportsHave bool, +) *fakeMessageSender { return &fakeMessageSender{ reset: reset, messagesSent: messagesSent, @@ -126,7 +130,8 @@ func mockTimeoutCb(peer.ID, []cid.Cid) {} func collectMessages(ctx context.Context, t *testing.T, messagesSent <-chan []bsmsg.Entry, - timeout time.Duration) [][]bsmsg.Entry { + timeout time.Duration, +) [][]bsmsg.Entry { var messagesReceived [][]bsmsg.Entry timeoutctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() diff --git a/bitswap/client/internal/notifications/notifications.go b/bitswap/client/internal/notifications/notifications.go index dc6dda899..499a61c42 100644 --- a/bitswap/client/internal/notifications/notifications.go +++ b/bitswap/client/internal/notifications/notifications.go @@ -68,7 +68,6 @@ func (ps *impl) Shutdown() { // is closed if the |ctx| times out or is cancelled, or after receiving the blocks // corresponding to |keys|. func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { - blocksCh := make(chan blocks.Block, len(keys)) valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { diff --git a/bitswap/client/internal/notifications/notifications_test.go b/bitswap/client/internal/notifications/notifications_test.go index 25b580f6a..b4b8ef55d 100644 --- a/bitswap/client/internal/notifications/notifications_test.go +++ b/bitswap/client/internal/notifications/notifications_test.go @@ -58,7 +58,6 @@ func TestPublishSubscribe(t *testing.T) { } assertBlocksEqual(t, blockRecvd, blockSent) - } func TestSubscribeMany(t *testing.T) { diff --git a/bitswap/client/internal/peermanager/peermanager_test.go b/bitswap/client/internal/peermanager/peermanager_test.go index 40e1f072c..a2569201f 100644 --- a/bitswap/client/internal/peermanager/peermanager_test.go +++ b/bitswap/client/internal/peermanager/peermanager_test.go @@ -30,12 +30,15 @@ func (fp *mockPeerQueue) Shutdown() {} func (fp *mockPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) { fp.msgs <- msg{fp.p, nil, whs, nil} } + func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { fp.msgs <- msg{fp.p, wbs, whs, nil} } + func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { fp.msgs <- msg{fp.p, nil, nil, cs} } + func (fp *mockPeerQueue) ResponseReceived(ks []cid.Cid) { } @@ -271,6 +274,7 @@ func TestSendCancels(t *testing.T) { func (s *sess) ID() uint64 { return s.id } + func (s *sess) SignalAvailability(p peer.ID, isAvailable bool) { s.available[p] = isAvailable } @@ -332,8 +336,7 @@ func TestSessionRegistration(t *testing.T) { } } -type benchPeerQueue struct { -} +type benchPeerQueue struct{} func (*benchPeerQueue) Startup() {} func (*benchPeerQueue) Shutdown() {} diff --git a/bitswap/client/internal/peermanager/peerwantmanager_test.go b/bitswap/client/internal/peermanager/peerwantmanager_test.go index 618217d6b..b4c3d5029 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager_test.go +++ b/bitswap/client/internal/peermanager/peerwantmanager_test.go @@ -16,6 +16,7 @@ type gauge struct { func (g *gauge) Inc() { g.count++ } + func (g *gauge) Dec() { g.count-- } @@ -40,13 +41,16 @@ func (mpq *mockPQ) Shutdown() {} func (mpq *mockPQ) AddBroadcastWantHaves(whs []cid.Cid) { mpq.bcst = append(mpq.bcst, whs...) } + func (mpq *mockPQ) AddWants(wbs []cid.Cid, whs []cid.Cid) { mpq.wbs = append(mpq.wbs, wbs...) mpq.whs = append(mpq.whs, whs...) } + func (mpq *mockPQ) AddCancels(cs []cid.Cid) { mpq.cancels = append(mpq.cancels, cs...) } + func (mpq *mockPQ) ResponseReceived(ks []cid.Cid) { } diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go index 6cf5fa4a2..04b309286 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -95,7 +95,6 @@ func TestNormalSimultaneousFetch(t *testing.T) { if fpn.queriesMade != 2 { t.Fatal("Did not dedup provider requests running simultaneously") } - } func TestDedupingProviderRequests(t *testing.T) { @@ -256,7 +255,6 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { if len(firstPeersReceived) != 0 || len(secondPeersReceived) != 0 { t.Fatal("Did not filter out peers with connection issues") } - } func TestRateLimitingRequests(t *testing.T) { diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go index 127004dc7..39266a5e6 100644 --- a/bitswap/client/internal/session/session.go +++ b/bitswap/client/internal/session/session.go @@ -18,8 +18,10 @@ import ( "go.uber.org/zap" ) -var log = logging.Logger("bs:sess") -var sflog = log.Desugar() +var ( + log = logging.Logger("bs:sess") + sflog = log.Desugar() +) const ( broadcastLiveWantsLimit = 64 @@ -146,8 +148,8 @@ func New( notif notifications.PubSub, initialSearchDelay time.Duration, periodicSearchDelay delay.D, - self peer.ID) *Session { - + self peer.ID, +) *Session { ctx, cancel := context.WithCancel(ctx) s := &Session{ sw: newSessionWants(broadcastLiveWantsLimit), diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go index b60c7d1af..cf6de1e5a 100644 --- a/bitswap/client/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -170,7 +170,6 @@ func TestSessionGetBlocks(t *testing.T) { } _, err := session.GetBlocks(ctx, cids) - if err != nil { t.Fatal("error getting blocks") } @@ -344,7 +343,6 @@ func TestSessionOnPeersExhausted(t *testing.T) { cids = append(cids, block.Cid()) } _, err := session.GetBlocks(ctx, cids) - if err != nil { t.Fatal("error getting blocks") } diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go index 41145fbf6..390fdf29d 100644 --- a/bitswap/client/internal/session/sessionwantsender.go +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -67,8 +67,10 @@ type change struct { availability peerAvailability } -type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) -type onPeersExhaustedFn func([]cid.Cid) +type ( + onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) + onPeersExhaustedFn func([]cid.Cid) +) // sessionWantSender is responsible for sending want-have and want-block to // peers. For each want, it sends a single optimistic want-block request to @@ -111,8 +113,8 @@ type sessionWantSender struct { } func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, - bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { - + bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn, +) sessionWantSender { ctx, cancel := context.WithCancel(context.Background()) sws := sessionWantSender{ ctx: ctx, diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go index 97ff788a9..476b13991 100644 --- a/bitswap/client/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -34,13 +34,14 @@ func (sw *sentWants) add(wantBlocks []cid.Cid, wantHaves []cid.Cid) { sw.wantHaves.Add(c) } } - } + func (sw *sentWants) wantHavesKeys() []cid.Cid { sw.Lock() defer sw.Unlock() return sw.wantHaves.Keys() } + func (sw *sentWants) wantBlocksKeys() []cid.Cid { sw.Lock() defer sw.Unlock() diff --git a/bitswap/client/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go index 38e490a2e..a75a3f769 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager.go @@ -69,8 +69,8 @@ type SessionManager struct { // New creates a new SessionManager. func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, - blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { - + blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID, +) *SessionManager { return &SessionManager{ ctx: ctx, sessionFactory: sessionFactory, @@ -88,7 +88,8 @@ func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestMana // session manager. func (sm *SessionManager) NewSession(ctx context.Context, provSearchDelay time.Duration, - rebroadcastDelay delay.D) exchange.Fetcher { + rebroadcastDelay delay.D, +) exchange.Fetcher { id := sm.GetNextSessionID() ctx, span := internal.StartSpan(ctx, "SessionManager.NewSession", trace.WithAttributes(attribute.String("ID", strconv.FormatUint(id, 10)))) diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go index 51dde7fda..5ecabfdb3 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -33,23 +33,26 @@ type fakeSession struct { func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { return nil, nil } + func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { return nil, nil } + func (fs *fakeSession) ID() uint64 { return fs.id } + func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid, wantHaves []cid.Cid) { fs.ks = append(fs.ks, ks...) fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) fs.wantHaves = append(fs.wantHaves, wantHaves...) } + func (fs *fakeSession) Shutdown() { fs.sm.RemoveSession(fs.id) } -type fakeSesPeerManager struct { -} +type fakeSesPeerManager struct{} func (*fakeSesPeerManager) Peers() []peer.ID { return nil } func (*fakeSesPeerManager) PeersDiscovered() bool { return false } @@ -73,6 +76,7 @@ func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) defer fpm.lk.Unlock() fpm.cancels = append(fpm.cancels, cancels...) } + func (fpm *fakePeerManager) cancelled() []cid.Cid { fpm.lk.Lock() defer fpm.lk.Unlock() @@ -89,7 +93,8 @@ func sessionFactory(ctx context.Context, notif notifications.PubSub, provSearchDelay time.Duration, rebroadcastDelay delay.D, - self peer.ID) Session { + self peer.ID, +) Session { fs := &fakeSession{ id: id, pm: sprm.(*fakeSesPeerManager), diff --git a/bitswap/client/wantlist/wantlist_test.go b/bitswap/client/wantlist/wantlist_test.go index 829af50a6..035786ea3 100644 --- a/bitswap/client/wantlist/wantlist_test.go +++ b/bitswap/client/wantlist/wantlist_test.go @@ -24,7 +24,6 @@ func init() { } testcids = append(testcids, c) } - } type wli interface { @@ -236,7 +235,6 @@ func TestSortEntries(t *testing.T) { !entries[2].Cid.Equals(testcids[0]) { t.Fatal("wrong order") } - } // Test adding and removing interleaved with checking entries to make sure we clear the cache. diff --git a/bitswap/internal/testutil/testutil.go b/bitswap/internal/testutil/testutil.go index d35bdb743..eb633bfc3 100644 --- a/bitswap/internal/testutil/testutil.go +++ b/bitswap/internal/testutil/testutil.go @@ -12,8 +12,10 @@ import ( peer "github.com/libp2p/go-libp2p/core/peer" ) -var blockGenerator = blocksutil.NewBlockGenerator() -var prioritySeq int32 +var ( + blockGenerator = blocksutil.NewBlockGenerator() + prioritySeq int32 +) // GenerateBlocksOfSize generates a series of blocks of the given byte size func GenerateBlocksOfSize(n int, size int64) []blocks.Block { diff --git a/bitswap/message/message_test.go b/bitswap/message/message_test.go index 520abd22b..ec07cbcff 100644 --- a/bitswap/message/message_test.go +++ b/bitswap/message/message_test.go @@ -47,7 +47,6 @@ func TestNewMessageFromProto(t *testing.T) { } func TestAppendBlock(t *testing.T) { - strs := make([]string, 2) strs = append(strs, "Celeritas") strs = append(strs, "Incendia") @@ -78,7 +77,6 @@ func TestWantlist(t *testing.T) { for _, k := range exported { present := false for _, s := range keystrs { - if s.Equals(k.Cid) { present = true } @@ -135,7 +133,6 @@ func TestToNetFromNetPreservesWantList(t *testing.T) { } func TestToAndFromNetMessage(t *testing.T) { - original := New(true) original.AddBlock(blocks.NewBlock([]byte("W"))) original.AddBlock(blocks.NewBlock([]byte("E"))) diff --git a/bitswap/message/pb/cid_test.go b/bitswap/message/pb/cid_test.go index 490e6b997..6d346488a 100644 --- a/bitswap/message/pb/cid_test.go +++ b/bitswap/message/pb/cid_test.go @@ -11,7 +11,7 @@ import ( ) func TestCID(t *testing.T) { - var expected = [...]byte{ + expected := [...]byte{ 10, 34, 18, 32, 195, 171, 143, 241, 55, 32, 232, 173, 144, 71, 221, 57, 70, 107, diff --git a/bitswap/message/pb/message.pb.go b/bitswap/message/pb/message.pb.go index ec80b2512..cf22ce525 100644 --- a/bitswap/message/pb/message.pb.go +++ b/bitswap/message/pb/message.pb.go @@ -5,17 +5,20 @@ package bitswap_message_pb import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -87,9 +90,11 @@ func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor_33c57e4bae7b9afd, []int{0} } + func (m *Message) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Message.Marshal(b, m, deterministic) @@ -102,12 +107,15 @@ func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } + func (m *Message) XXX_Merge(src proto.Message) { xxx_messageInfo_Message.Merge(m, src) } + func (m *Message) XXX_Size() int { return m.Size() } + func (m *Message) XXX_DiscardUnknown() { xxx_messageInfo_Message.DiscardUnknown(m) } @@ -160,9 +168,11 @@ func (*Message_Wantlist) ProtoMessage() {} func (*Message_Wantlist) Descriptor() ([]byte, []int) { return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} } + func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) @@ -175,12 +185,15 @@ func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } + func (m *Message_Wantlist) XXX_Merge(src proto.Message) { xxx_messageInfo_Message_Wantlist.Merge(m, src) } + func (m *Message_Wantlist) XXX_Size() int { return m.Size() } + func (m *Message_Wantlist) XXX_DiscardUnknown() { xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) } @@ -215,9 +228,11 @@ func (*Message_Wantlist_Entry) ProtoMessage() {} func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} } + func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) @@ -230,12 +245,15 @@ func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]by return b[:n], nil } } + func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) } + func (m *Message_Wantlist_Entry) XXX_Size() int { return m.Size() } + func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) } @@ -281,9 +299,11 @@ func (*Message_Block) ProtoMessage() {} func (*Message_Block) Descriptor() ([]byte, []int) { return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} } + func (m *Message_Block) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) @@ -296,12 +316,15 @@ func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } + func (m *Message_Block) XXX_Merge(src proto.Message) { xxx_messageInfo_Message_Block.Merge(m, src) } + func (m *Message_Block) XXX_Size() int { return m.Size() } + func (m *Message_Block) XXX_DiscardUnknown() { xxx_messageInfo_Message_Block.DiscardUnknown(m) } @@ -333,9 +356,11 @@ func (*Message_BlockPresence) ProtoMessage() {} func (*Message_BlockPresence) Descriptor() ([]byte, []int) { return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} } + func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) @@ -348,12 +373,15 @@ func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } + func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { xxx_messageInfo_Message_BlockPresence.Merge(m, src) } + func (m *Message_BlockPresence) XXX_Size() int { return m.Size() } + func (m *Message_BlockPresence) XXX_DiscardUnknown() { xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) } @@ -685,6 +713,7 @@ func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } + func (m *Message) Size() (n int) { if m == nil { return 0 @@ -792,9 +821,11 @@ func (m *Message_BlockPresence) Size() (n int) { func sovMessage(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } + func sozMessage(x uint64) (n int) { return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } + func (m *Message) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -997,6 +1028,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } return nil } + func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1101,6 +1133,7 @@ func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { } return nil } + func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1262,6 +1295,7 @@ func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { } return nil } + func (m *Message_Block) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1380,6 +1414,7 @@ func (m *Message_Block) Unmarshal(dAtA []byte) error { } return nil } + func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1482,6 +1517,7 @@ func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { } return nil } + func skipMessage(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 039121cfc..a1446775c 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -30,10 +30,12 @@ var log = logging.Logger("bitswap_network") var connectTimeout = time.Second * 5 -var maxSendTimeout = 2 * time.Minute -var minSendTimeout = 10 * time.Second -var sendLatency = 2 * time.Second -var minSendRate = (100 * 1000) / 8 // 100kbit/s +var ( + maxSendTimeout = 2 * time.Minute + minSendTimeout = 10 * time.Second + sendLatency = 2 * time.Second + minSendRate = (100 * 1000) / 8 // 100kbit/s +) // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { @@ -284,7 +286,6 @@ func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *Messag _, err := sender.Connect(ctx) return err }) - if err != nil { return nil, err } @@ -320,8 +321,8 @@ func sendTimeout(size int) time.Duration { func (bsnet *impl) SendMessage( ctx context.Context, p peer.ID, - outgoing bsmsg.BitSwapMessage) error { - + outgoing bsmsg.BitSwapMessage, +) error { tctx, cancel := context.WithTimeout(ctx, connectTimeout) defer cancel() @@ -357,7 +358,6 @@ func (bsnet *impl) Start(r ...Receiver) { } bsnet.host.Network().Notify((*netNotifiee)(bsnet)) bsnet.connectEvtMgr.Start() - } func (bsnet *impl) Stop() { @@ -458,6 +458,7 @@ func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { nn.impl().connectEvtMgr.Connected(v.RemotePeer()) } + func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { // Only record a "disconnect" when we actually disconnect. if n.Connectedness(v.RemotePeer()) == network.Connected { diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 61b00baa3..175957799 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -47,7 +47,8 @@ func newReceiver() *receiver { func (r *receiver) ReceiveMessage( ctx context.Context, sender peer.ID, - incoming bsmsg.BitSwapMessage) { + incoming bsmsg.BitSwapMessage, +) { r.lastSender = sender r.lastMessage = incoming select { diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index 02f317e07..a7b6c11d0 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -456,7 +456,6 @@ func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { e.taskWorker(ctx) }) } - } func (e *Engine) onPeerAdded(p peer.ID) { diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 9b4dbc2de..922836042 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -104,8 +104,7 @@ func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInte e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, append(opts[:len(opts):len(opts)], WithScoreLedger(NewTestScoreLedger(peerSampleInterval, sampleCh, clock)), WithBlockstoreWorkerCount(4))...) e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) return engineSet{ - Peer: peer.ID(idStr), - //Strategy: New(true), + Peer: peer.ID(idStr), PeerTagger: fpt, Blockstore: bs, Engine: e, diff --git a/bitswap/server/server.go b/bitswap/server/server.go index 9c8e4cdb3..f71d6f5f1 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -28,8 +28,10 @@ import ( var provideKeysBufferSize = 2048 -var log = logging.Logger("bitswap-server") -var sflog = log.Desugar() +var ( + log = logging.Logger("bitswap-server") + sflog = log.Desugar() +) const provideWorkerMax = 6 @@ -545,11 +547,12 @@ func (*Server) ReceiveError(err error) { log.Infof("Bitswap Client ReceiveError: %s", err) // TODO log the network error // TODO bubble the network error up to the parent context/error logger - } + func (bs *Server) PeerConnected(p peer.ID) { bs.engine.PeerConnected(p) } + func (bs *Server) PeerDisconnected(p peer.ID) { bs.engine.PeerDisconnected(p) } diff --git a/bitswap/testnet/internet_latency_delay_generator.go b/bitswap/testnet/internet_latency_delay_generator.go index 25b9f5b80..d133ae4cb 100644 --- a/bitswap/testnet/internet_latency_delay_generator.go +++ b/bitswap/testnet/internet_latency_delay_generator.go @@ -27,7 +27,8 @@ func InternetLatencyDelayGenerator( percentMedium float64, percentLarge float64, std time.Duration, - rng *rand.Rand) delay.Generator { + rng *rand.Rand, +) delay.Generator { if rng == nil { rng = sharedRNG } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index fc055a2d1..0947eff3e 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -31,8 +31,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { responder.Start(lambda(func( ctx context.Context, fromWaiter peer.ID, - msgFromWaiter bsmsg.BitSwapMessage) { - + msgFromWaiter bsmsg.BitSwapMessage, + ) { msgToWaiter := bsmsg.New(true) msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) err := waiter.SendMessage(ctx, fromWaiter, msgToWaiter) @@ -45,8 +45,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) { waiter.Start(lambda(func( ctx context.Context, fromResponder peer.ID, - msgFromResponder bsmsg.BitSwapMessage) { - + msgFromResponder bsmsg.BitSwapMessage, + ) { // TODO assert that this came from the correct peer and that the message contents are as expected ok := false for _, b := range msgFromResponder.Blocks() { @@ -88,7 +88,8 @@ type lambdaImpl struct { } func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, - p peer.ID, incoming bsmsg.BitSwapMessage) { + p peer.ID, incoming bsmsg.BitSwapMessage, +) { lam.f(ctx, p, incoming) } @@ -99,6 +100,7 @@ func (lam *lambdaImpl) ReceiveError(err error) { func (lam *lambdaImpl) PeerConnected(p peer.ID) { // TODO } + func (lam *lambdaImpl) PeerDisconnected(peer.ID) { // TODO } diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 0deb1b1ab..914044aed 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -126,8 +126,8 @@ func (n *network) SendMessage( ctx context.Context, from peer.ID, to peer.ID, - mes bsmsg.BitSwapMessage) error { - + mes bsmsg.BitSwapMessage, +) error { mes = mes.Clone() n.mu.Lock() @@ -213,6 +213,7 @@ func (nc *networkClient) PeerConnected(p peer.ID) { v.PeerConnected(p) } } + func (nc *networkClient) PeerDisconnected(p peer.ID) { for _, v := range nc.receivers { v.PeerDisconnected(p) @@ -236,7 +237,8 @@ func (nc *networkClient) Latency(p peer.ID) time.Duration { func (nc *networkClient) SendMessage( ctx context.Context, to peer.ID, - message bsmsg.BitSwapMessage) error { + message bsmsg.BitSwapMessage, +) error { if err := nc.network.SendMessage(ctx, nc.local, to, message); err != nil { return err } diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 773fb5303..37c4b35da 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -1,4 +1,4 @@ -// package blockservice implements a BlockService interface that provides +// Package blockservice implements a BlockService interface that provides // a single GetBlock/AddBlock interface that seamlessly retrieves data either // locally or from a remote peer through the exchange. package blockservice @@ -11,11 +11,11 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - blockstore "github.com/ipfs/boxo/blockstore" - exchange "github.com/ipfs/boxo/exchange" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/exchange" "github.com/ipfs/boxo/verifcid" blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" @@ -64,7 +64,15 @@ type BlockService interface { DeleteBlock(ctx context.Context, o cid.Cid) error } +// BoundedBlockService is a Blockservice bounded via strict multihash Allowlist. +type BoundedBlockService interface { + BlockService + + Allowlist() verifcid.Allowlist +} + type blockService struct { + allowlist verifcid.Allowlist blockstore blockstore.Blockstore exchange exchange.Interface // If checkFirst is true then first check that a block doesn't @@ -72,31 +80,49 @@ type blockService struct { checkFirst bool } -// NewBlockService creates a BlockService with given datastore instance. -func New(bs blockstore.Blockstore, rem exchange.Interface) BlockService { - if rem == nil { - logger.Debug("blockservice running in local (offline) mode.") +type Option func(*blockService) + +// WriteThrough disable cache checks for writes and make them go straight to +// the blockstore. +func WriteThrough() Option { + return func(bs *blockService) { + bs.checkFirst = false } +} - return &blockService{ - blockstore: bs, - exchange: rem, - checkFirst: true, +// WithAllowlist sets a custom [verifcid.Allowlist] which will be used +func WithAllowlist(allowlist verifcid.Allowlist) Option { + return func(bs *blockService) { + bs.allowlist = allowlist } } -// NewWriteThrough creates a BlockService that guarantees writes will go -// through to the blockstore and are not skipped by cache checks. -func NewWriteThrough(bs blockstore.Blockstore, rem exchange.Interface) BlockService { - if rem == nil { +// New creates a BlockService with given datastore instance. +func New(bs blockstore.Blockstore, exchange exchange.Interface, opts ...Option) BlockService { + if exchange == nil { logger.Debug("blockservice running in local (offline) mode.") } - return &blockService{ + service := &blockService{ + allowlist: verifcid.DefaultAllowlist, blockstore: bs, - exchange: rem, - checkFirst: false, + exchange: exchange, + checkFirst: true, } + + for _, opt := range opts { + opt(service) + } + + return service +} + +// NewWriteThrough creates a BlockService that guarantees writes will go +// through to the blockstore and are not skipped by cache checks. +// +// Deprecated: Use [New] with the [WriteThrough] option. +func NewWriteThrough(bs blockstore.Blockstore, exchange exchange.Interface) BlockService { + return New(bs, exchange, WriteThrough()) } // Blockstore returns the blockstore behind this blockservice. @@ -109,27 +135,37 @@ func (s *blockService) Exchange() exchange.Interface { return s.exchange } +func (s *blockService) Allowlist() verifcid.Allowlist { + return s.allowlist +} + // NewSession creates a new session that allows for // controlled exchange of wantlists to decrease the bandwidth overhead. // If the current exchange is a SessionExchange, a new exchange // session will be created. Otherwise, the current exchange will be used // directly. func NewSession(ctx context.Context, bs BlockService) *Session { + allowlist := verifcid.Allowlist(verifcid.DefaultAllowlist) + if bbs, ok := bs.(BoundedBlockService); ok { + allowlist = bbs.Allowlist() + } exch := bs.Exchange() if sessEx, ok := exch.(exchange.SessionExchange); ok { return &Session{ - sessCtx: ctx, - ses: nil, - sessEx: sessEx, - bs: bs.Blockstore(), - notifier: exch, + allowlist: allowlist, + sessCtx: ctx, + ses: nil, + sessEx: sessEx, + bs: bs.Blockstore(), + notifier: exch, } } return &Session{ - ses: exch, - sessCtx: ctx, - bs: bs.Blockstore(), - notifier: exch, + allowlist: allowlist, + ses: exch, + sessCtx: ctx, + bs: bs.Blockstore(), + notifier: exch, } } @@ -139,8 +175,7 @@ func (s *blockService) AddBlock(ctx context.Context, o blocks.Block) error { defer span.End() c := o.Cid() - // hash security - err := verifcid.ValidateCid(c) + err := verifcid.ValidateCid(s.allowlist, c) // hash security if err != nil { return err } @@ -171,7 +206,7 @@ func (s *blockService) AddBlocks(ctx context.Context, bs []blocks.Block) error { // hash security for _, b := range bs { - err := verifcid.ValidateCid(b.Cid()) + err := verifcid.ValidateCid(s.allowlist, b.Cid()) if err != nil { return err } @@ -221,15 +256,15 @@ func (s *blockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, e f = s.getExchange } - return getBlock(ctx, c, s.blockstore, f) // hash security + return getBlock(ctx, c, s.blockstore, s.allowlist, f) } func (s *blockService) getExchange() notifiableFetcher { return s.exchange } -func getBlock(ctx context.Context, c cid.Cid, bs blockstore.Blockstore, fget func() notifiableFetcher) (blocks.Block, error) { - err := verifcid.ValidateCid(c) // hash security +func getBlock(ctx context.Context, c cid.Cid, bs blockstore.Blockstore, allowlist verifcid.Allowlist, fget func() notifiableFetcher) (blocks.Block, error) { + err := verifcid.ValidateCid(allowlist, c) // hash security if err != nil { return nil, err } @@ -278,10 +313,10 @@ func (s *blockService) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan block f = s.getExchange } - return getBlocks(ctx, ks, s.blockstore, f) // hash security + return getBlocks(ctx, ks, s.blockstore, s.allowlist, f) } -func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, fget func() notifiableFetcher) <-chan blocks.Block { +func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, allowlist verifcid.Allowlist, fget func() notifiableFetcher) <-chan blocks.Block { out := make(chan blocks.Block) go func() { @@ -289,7 +324,7 @@ func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, fget allValid := true for _, c := range ks { - if err := verifcid.ValidateCid(c); err != nil { + if err := verifcid.ValidateCid(allowlist, c); err != nil { allValid = false break } @@ -300,7 +335,7 @@ func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, fget ks2 := make([]cid.Cid, 0, len(ks)) for _, c := range ks { // hash security - if err := verifcid.ValidateCid(c); err == nil { + if err := verifcid.ValidateCid(allowlist, c); err == nil { ks2 = append(ks2, c) } else { logger.Errorf("unsafe CID (%s) passed to blockService.GetBlocks: %s", c, err) @@ -396,12 +431,13 @@ type notifier interface { // Session is a helper type to provide higher level access to bitswap sessions type Session struct { - bs blockstore.Blockstore - ses exchange.Fetcher - sessEx exchange.SessionExchange - sessCtx context.Context - notifier notifier - lk sync.Mutex + allowlist verifcid.Allowlist + bs blockstore.Blockstore + ses exchange.Fetcher + sessEx exchange.SessionExchange + sessCtx context.Context + notifier notifier + lk sync.Mutex } type notifiableFetcher interface { @@ -444,7 +480,7 @@ func (s *Session) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) ctx, span := internal.StartSpan(ctx, "Session.GetBlock", trace.WithAttributes(attribute.Stringer("CID", c))) defer span.End() - return getBlock(ctx, c, s.bs, s.getFetcherFactory()) // hash security + return getBlock(ctx, c, s.bs, s.allowlist, s.getFetcherFactory()) } // GetBlocks gets blocks in the context of a request session @@ -452,7 +488,7 @@ func (s *Session) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Blo ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") defer span.End() - return getBlocks(ctx, ks, s.bs, s.getFetcherFactory()) // hash security + return getBlocks(ctx, ks, s.bs, s.allowlist, s.getFetcherFactory()) } var _ BlockGetter = (*Session)(nil) diff --git a/blockservice/blockservice_test.go b/blockservice/blockservice_test.go index 14396c8a1..e36058040 100644 --- a/blockservice/blockservice_test.go +++ b/blockservice/blockservice_test.go @@ -7,15 +7,20 @@ import ( blockstore "github.com/ipfs/boxo/blockstore" exchange "github.com/ipfs/boxo/exchange" offline "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/verifcid" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" butil "github.com/ipfs/go-ipfs-blocksutil" ipld "github.com/ipfs/go-ipld-format" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/assert" ) func TestWriteThroughWorks(t *testing.T) { + t.Parallel() + bstore := &PutCountingBlockstore{ blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 0, @@ -46,6 +51,8 @@ func TestWriteThroughWorks(t *testing.T) { } func TestExchangeWrite(t *testing.T) { + t.Parallel() + bstore := &PutCountingBlockstore{ blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 0, @@ -117,6 +124,8 @@ func TestExchangeWrite(t *testing.T) { } func TestLazySessionInitialization(t *testing.T) { + t.Parallel() + ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -215,6 +224,8 @@ func (fe *fakeSessionExchange) NewSession(ctx context.Context) exchange.Fetcher } func TestNilExchange(t *testing.T) { + t.Parallel() + ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -241,3 +252,39 @@ func TestNilExchange(t *testing.T) { t.Fatal("got the wrong block") } } + +func TestAllowlist(t *testing.T) { + t.Parallel() + a := assert.New(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + bgen := butil.NewBlockGenerator() + block := bgen.Next() + + data := []byte("this is some blake3 block") + mh, err := multihash.Sum(data, multihash.BLAKE3, -1) + a.NoError(err) + blake3 := cid.NewCidV1(cid.Raw, mh) + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + a.NoError(bs.Put(ctx, block)) + b, err := blocks.NewBlockWithCid(data, blake3) + a.NoError(err) + a.NoError(bs.Put(ctx, b)) + + check := func(getBlock func(context.Context, cid.Cid) (blocks.Block, error)) { + _, err := getBlock(ctx, block.Cid()) + a.Error(err) + a.ErrorIs(err, verifcid.ErrPossiblyInsecureHashFunction) + + _, err = getBlock(ctx, blake3) + a.NoError(err) + } + + blockservice := New(bs, nil, WithAllowlist(verifcid.NewAllowlist(map[uint64]bool{multihash.BLAKE3: true}))) + check(blockservice.GetBlock) + check(NewSession(ctx, blockservice).GetBlock) +} diff --git a/blockservice/test/blocks_test.go b/blockservice/test/blocks_test.go index 6bfe603e4..6cb56faab 100644 --- a/blockservice/test/blocks_test.go +++ b/blockservice/test/blocks_test.go @@ -65,7 +65,7 @@ func makeObjects(n int) []blocks.Block { } func TestGetBlocksSequential(t *testing.T) { - var servs = Mocks(4) + servs := Mocks(4) for _, s := range servs { defer s.Close() } diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 6a606f1e1..27260e5d8 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -268,7 +268,6 @@ func (bs *blockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { // // AllKeysChan respects context. func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - // KeysOnly, because that would be _a lot_ of data. q := dsq.Query{KeysOnly: true} res, err := bs.datastore.Query(ctx, q) diff --git a/blockstore/blockstore_test.go b/blockstore/blockstore_test.go index afcaec40e..684a9968f 100644 --- a/blockstore/blockstore_test.go +++ b/blockstore/blockstore_test.go @@ -229,7 +229,7 @@ func TestAllKeysRespectsContext(t *testing.T) { } var results dsq.Results - var resultsmu = make(chan struct{}) + resultsmu := make(chan struct{}) resultChan := make(chan dsq.Result) d.SetFunc(func(q dsq.Query) (dsq.Results, error) { results = dsq.ResultsWithChan(q, resultChan) @@ -265,7 +265,6 @@ func TestAllKeysRespectsContext(t *testing.T) { } t.Error(err) } - } func expectMatches(t *testing.T, expect, actual []cid.Cid) { @@ -328,6 +327,7 @@ func (c *queryTestDS) Sync(ctx context.Context, key ds.Key) error { func (c *queryTestDS) Batch(_ context.Context) (ds.Batch, error) { return ds.NewBasicBatch(c), nil } + func (c *queryTestDS) Close() error { return nil } diff --git a/blockstore/bloom_cache.go b/blockstore/bloom_cache.go index fddab1e53..a0a1b9ad2 100644 --- a/blockstore/bloom_cache.go +++ b/blockstore/bloom_cache.go @@ -78,8 +78,10 @@ type bloomcache struct { total metrics.Counter } -var _ Blockstore = (*bloomcache)(nil) -var _ Viewer = (*bloomcache)(nil) +var ( + _ Blockstore = (*bloomcache)(nil) + _ Viewer = (*bloomcache)(nil) +) func (b *bloomcache) BloomActive() bool { return atomic.LoadInt32(&b.active) != 0 diff --git a/blockstore/bloom_cache_test.go b/blockstore/bloom_cache_test.go index 70d118e09..a1daa0ae5 100644 --- a/blockstore/bloom_cache_test.go +++ b/blockstore/bloom_cache_test.go @@ -93,6 +93,7 @@ func TestReturnsErrorWhenSizeNegative(t *testing.T) { t.Fail() } } + func TestHasIsBloomCached(t *testing.T) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) diff --git a/blockstore/caching.go b/blockstore/caching.go index bfbe449a9..67fe8cade 100644 --- a/blockstore/caching.go +++ b/blockstore/caching.go @@ -29,7 +29,8 @@ func DefaultCacheOpts() CacheOpts { func CachedBlockstore( ctx context.Context, bs Blockstore, - opts CacheOpts) (cbs Blockstore, err error) { + opts CacheOpts, +) (cbs Blockstore, err error) { cbs = bs if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 || diff --git a/blockstore/idstore.go b/blockstore/idstore.go index 25a6284c8..340b8b18b 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -15,9 +15,11 @@ type idstore struct { viewer Viewer } -var _ Blockstore = (*idstore)(nil) -var _ Viewer = (*idstore)(nil) -var _ io.Closer = (*idstore)(nil) +var ( + _ Blockstore = (*idstore)(nil) + _ Viewer = (*idstore)(nil) + _ io.Closer = (*idstore)(nil) +) func NewIdStore(bs Blockstore) Blockstore { ids := &idstore{bs: bs} diff --git a/blockstore/twoqueue_cache.go b/blockstore/twoqueue_cache.go index 97374ad5b..2b03cb972 100644 --- a/blockstore/twoqueue_cache.go +++ b/blockstore/twoqueue_cache.go @@ -12,8 +12,10 @@ import ( metrics "github.com/ipfs/go-metrics-interface" ) -type cacheHave bool -type cacheSize int +type ( + cacheHave bool + cacheSize int +) type lock struct { mu sync.RWMutex @@ -39,8 +41,10 @@ type tqcache struct { total metrics.Counter } -var _ Blockstore = (*tqcache)(nil) -var _ Viewer = (*tqcache)(nil) +var ( + _ Blockstore = (*tqcache)(nil) + _ Viewer = (*tqcache)(nil) +) func newTwoQueueCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*tqcache, error) { cache, err := lru.New2Q[string, any](lruSize) diff --git a/blockstore/twoqueue_cache_test.go b/blockstore/twoqueue_cache_test.go index b4ed91893..c93f6b8ec 100644 --- a/blockstore/twoqueue_cache_test.go +++ b/blockstore/twoqueue_cache_test.go @@ -46,6 +46,7 @@ func trap(message string, cd *callbackDatastore, t *testing.T) { t.Fatal(message) }) } + func untrap(cd *callbackDatastore) { cd.SetFunc(func() {}) } @@ -317,7 +318,7 @@ func BenchmarkTwoQueueCacheConcurrentOps(b *testing.B) { // We always mix just two operations at a time. const numOps = 2 - var testOps = []struct { + testOps := []struct { name string ops [numOps]func(*tqcache, blocks.Block) }{ @@ -393,7 +394,6 @@ func BenchmarkTwoQueueCacheConcurrentOps(b *testing.B) { b.Fatalf("op %d ran %fx as many times as %d", maxIdx, ratio, minIdx) } } - }) } } diff --git a/chunker/buzhash.go b/chunker/buzhash.go index 83ab019dd..790bb0e7f 100644 --- a/chunker/buzhash.go +++ b/chunker/buzhash.go @@ -148,4 +148,5 @@ var bytehash = [256]uint32{ 0xe981a4c4, 0x82991da1, 0x708f7294, 0xe6e2ae62, 0xfc441870, 0x95e1b0b6, 0x445f825, 0x5a93b47f, 0x5e9cf4be, 0x84da71e7, 0x9d9582b0, 0x9bf835ef, 0x591f61e2, 0x43325985, 0x5d2de32e, 0x8d8fbf0f, 0x95b30f38, 0x7ad5b6e, - 0x4e934edf, 0x3cd4990e, 0x9053e259, 0x5c41857d} + 0x4e934edf, 0x3cd4990e, 0x9053e259, 0x5c41857d, +} diff --git a/chunker/parse_test.go b/chunker/parse_test.go index 237a2b439..2a33d64de 100644 --- a/chunker/parse_test.go +++ b/chunker/parse_test.go @@ -52,7 +52,6 @@ func TestParseRabin(t *testing.T) { if err != ErrSizeMax { t.Fatalf("Expected 'ErrSizeMax', got: %#v", err) } - } func TestParseSize(t *testing.T) { diff --git a/chunker/splitting_test.go b/chunker/splitting_test.go index c53dfb4a7..4a9f7f332 100644 --- a/chunker/splitting_test.go +++ b/chunker/splitting_test.go @@ -110,7 +110,6 @@ type clipReader struct { } func (s *clipReader) Read(buf []byte) (int, error) { - // clip the incoming buffer to produce smaller chunks if len(buf) > s.size { buf = buf[:s.size] diff --git a/cmd/deprecator/main.go b/cmd/deprecator/main.go index 7b3bc3d17..02dd2ae04 100644 --- a/cmd/deprecator/main.go +++ b/cmd/deprecator/main.go @@ -143,7 +143,6 @@ func inspectASTNode(addComment func(string, *dst.Decorations), n dst.Node) bool } } return true - } func getModulePath(dir string) (string, error) { diff --git a/coreiface/options/block.go b/coreiface/options/block.go index 130648682..83a43702c 100644 --- a/coreiface/options/block.go +++ b/coreiface/options/block.go @@ -17,8 +17,10 @@ type BlockRmSettings struct { Force bool } -type BlockPutOption func(*BlockPutSettings) error -type BlockRmOption func(*BlockRmSettings) error +type ( + BlockPutOption func(*BlockPutSettings) error + BlockRmOption func(*BlockRmSettings) error +) func BlockPutOptions(opts ...BlockPutOption) (*BlockPutSettings, error) { var cidPrefix cid.Prefix @@ -131,7 +133,6 @@ func (blockOpts) Format(format string) BlockPutOption { return nil } - } // Hash is an option for Block.Put which specifies the multihash settings to use diff --git a/coreiface/options/dht.go b/coreiface/options/dht.go index e13e16020..b43bf3e7a 100644 --- a/coreiface/options/dht.go +++ b/coreiface/options/dht.go @@ -8,8 +8,10 @@ type DhtFindProvidersSettings struct { NumProviders int } -type DhtProvideOption func(*DhtProvideSettings) error -type DhtFindProvidersOption func(*DhtFindProvidersSettings) error +type ( + DhtProvideOption func(*DhtProvideSettings) error + DhtFindProvidersOption func(*DhtFindProvidersSettings) error +) func DhtProvideOptions(opts ...DhtProvideOption) (*DhtProvideSettings, error) { options := &DhtProvideSettings{ diff --git a/coreiface/options/key.go b/coreiface/options/key.go index 4bc53a65f..ebff6d5a7 100644 --- a/coreiface/options/key.go +++ b/coreiface/options/key.go @@ -16,8 +16,10 @@ type KeyRenameSettings struct { Force bool } -type KeyGenerateOption func(*KeyGenerateSettings) error -type KeyRenameOption func(*KeyRenameSettings) error +type ( + KeyGenerateOption func(*KeyGenerateSettings) error + KeyRenameOption func(*KeyRenameSettings) error +) func KeyGenerateOptions(opts ...KeyGenerateOption) (*KeyGenerateSettings, error) { options := &KeyGenerateSettings{ diff --git a/coreiface/options/name.go b/coreiface/options/name.go index 8e9b5183d..35e78c394 100644 --- a/coreiface/options/name.go +++ b/coreiface/options/name.go @@ -24,8 +24,10 @@ type NameResolveSettings struct { ResolveOpts []ropts.ResolveOpt } -type NamePublishOption func(*NamePublishSettings) error -type NameResolveOption func(*NameResolveSettings) error +type ( + NamePublishOption func(*NamePublishSettings) error + NameResolveOption func(*NameResolveSettings) error +) func NamePublishOptions(opts ...NamePublishOption) (*NamePublishSettings, error) { options := &NamePublishSettings{ diff --git a/coreiface/options/object.go b/coreiface/options/object.go index e484a9f36..b5625a1d6 100644 --- a/coreiface/options/object.go +++ b/coreiface/options/object.go @@ -14,9 +14,11 @@ type ObjectAddLinkSettings struct { Create bool } -type ObjectNewOption func(*ObjectNewSettings) error -type ObjectPutOption func(*ObjectPutSettings) error -type ObjectAddLinkOption func(*ObjectAddLinkSettings) error +type ( + ObjectNewOption func(*ObjectNewSettings) error + ObjectPutOption func(*ObjectPutSettings) error + ObjectAddLinkOption func(*ObjectAddLinkSettings) error +) func ObjectNewOptions(opts ...ObjectNewOption) (*ObjectNewSettings, error) { options := &ObjectNewSettings{ diff --git a/coreiface/options/pubsub.go b/coreiface/options/pubsub.go index c387d613d..839ef97b1 100644 --- a/coreiface/options/pubsub.go +++ b/coreiface/options/pubsub.go @@ -8,8 +8,10 @@ type PubSubSubscribeSettings struct { Discover bool } -type PubSubPeersOption func(*PubSubPeersSettings) error -type PubSubSubscribeOption func(*PubSubSubscribeSettings) error +type ( + PubSubPeersOption func(*PubSubPeersSettings) error + PubSubSubscribeOption func(*PubSubSubscribeSettings) error +) func PubSubPeersOptions(opts ...PubSubPeersOption) (*PubSubPeersSettings, error) { options := &PubSubPeersSettings{ diff --git a/coreiface/options/unixfs.go b/coreiface/options/unixfs.go index dd12502e6..f00fffb87 100644 --- a/coreiface/options/unixfs.go +++ b/coreiface/options/unixfs.go @@ -43,8 +43,10 @@ type UnixfsLsSettings struct { UseCumulativeSize bool } -type UnixfsAddOption func(*UnixfsAddSettings) error -type UnixfsLsOption func(*UnixfsLsSettings) error +type ( + UnixfsAddOption func(*UnixfsAddSettings) error + UnixfsLsOption func(*UnixfsLsSettings) error +) func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix, error) { options := &UnixfsAddSettings{ diff --git a/coreiface/tests/dag.go b/coreiface/tests/dag.go index ba74031f9..b9a03c8f4 100644 --- a/coreiface/tests/dag.go +++ b/coreiface/tests/dag.go @@ -31,15 +31,13 @@ func (tp *TestSuite) TestDag(t *testing.T) { t.Run("TestBatch", tp.TestBatch) } -var ( - treeExpected = map[string]struct{}{ - "a": {}, - "b": {}, - "c": {}, - "c/d": {}, - "c/e": {}, - } -) +var treeExpected = map[string]struct{}{ + "a": {}, + "b": {}, + "c": {}, + "c/d": {}, + "c/e": {}, +} func (tp *TestSuite) TestPut(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/coreiface/tests/name.go b/coreiface/tests/name.go index ab55d0425..74d88edff 100644 --- a/coreiface/tests/name.go +++ b/coreiface/tests/name.go @@ -164,4 +164,4 @@ func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { require.NoError(t, err) } -//TODO: When swarm api is created, add multinode tests +// TODO: When swarm api is created, add multinode tests diff --git a/coreiface/tests/object.go b/coreiface/tests/object.go index 77061b699..63c218eb3 100644 --- a/coreiface/tests/object.go +++ b/coreiface/tests/object.go @@ -74,7 +74,7 @@ func (tp *TestSuite) TestObjectPut(t *testing.T) { t.Fatal(err) } - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"YmFy"}`), opt.Object.DataType("base64")) //bar + p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"YmFy"}`), opt.Object.DataType("base64")) // bar if err != nil { t.Fatal(err) } diff --git a/coreiface/tests/unixfs.go b/coreiface/tests/unixfs.go index 2842b47bc..25c3ac1b7 100644 --- a/coreiface/tests/unixfs.go +++ b/coreiface/tests/unixfs.go @@ -53,8 +53,10 @@ func (tp *TestSuite) TestUnixfs(t *testing.T) { } // `echo -n 'hello, world!' | ipfs add` -var hello = "/ipfs/QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk" -var helloStr = "hello, world!" +var ( + hello = "/ipfs/QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk" + helloStr = "hello, world!" +) // `echo -n | ipfs add` var emptyFile = "/ipfs/QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH" @@ -213,7 +215,7 @@ func (tp *TestSuite) TestAdd(t *testing.T) { path: "/ipfs/bafkqaaa", opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(0), options.Unixfs.Inline(true), options.Unixfs.RawLeaves(true)}, }, - { //TODO: after coreapi add is used in `ipfs add`, consider making this default for inline + { // TODO: after coreapi add is used in `ipfs add`, consider making this default for inline name: "addInlineRaw", data: strFile(helloStr), path: "/ipfs/bafkqadlimvwgy3zmeb3w64tmmqqq", diff --git a/examples/unixfs-file-cid/main_test.go b/examples/unixfs-file-cid/main_test.go index 7b69fb52b..39586fed2 100644 --- a/examples/unixfs-file-cid/main_test.go +++ b/examples/unixfs-file-cid/main_test.go @@ -3,9 +3,10 @@ package main import ( "bytes" "context" - "github.com/ipfs/go-cid" "testing" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/fetcher/impl/blockservice/fetcher.go b/fetcher/impl/blockservice/fetcher.go index cbacd5984..a02e6ebbf 100644 --- a/fetcher/impl/blockservice/fetcher.go +++ b/fetcher/impl/blockservice/fetcher.go @@ -101,8 +101,8 @@ func (f *fetcherSession) NodeMatching(ctx context.Context, node ipld.Node, match } func (f *fetcherSession) BlockMatchingOfType(ctx context.Context, root ipld.Link, match ipld.Node, - _ ipld.NodePrototype, cb fetcher.FetchCallback) error { - + _ ipld.NodePrototype, cb fetcher.FetchCallback, +) error { // retrieve first node prototype, err := f.PrototypeFromLink(root) if err != nil { diff --git a/fetcher/impl/blockservice/fetcher_test.go b/fetcher/impl/blockservice/fetcher_test.go index ee5b5203c..5a0b071f4 100644 --- a/fetcher/impl/blockservice/fetcher_test.go +++ b/fetcher/impl/blockservice/fetcher_test.go @@ -362,5 +362,4 @@ func TestNodeReification(t *testing.T) { require.NoError(t, err) underlying4 := retrievedNode4.(*selfLoader).Node assert.Equal(t, node4, underlying4) - } diff --git a/files/file_test.go b/files/file_test.go index 8c6c62229..3edecf107 100644 --- a/files/file_test.go +++ b/files/file_test.go @@ -48,6 +48,7 @@ func TestReaderFiles(t *testing.T) { t.Fatal("Expected EOF when reading after close") } } + func TestMultipartFiles(t *testing.T) { data := ` --Boundary! diff --git a/files/filewriter.go b/files/filewriter.go index bf4bcf649..707d8bc2d 100644 --- a/files/filewriter.go +++ b/files/filewriter.go @@ -8,8 +8,10 @@ import ( "path/filepath" ) -var ErrInvalidDirectoryEntry = errors.New("invalid directory entry name") -var ErrPathExistsOverwrite = errors.New("path already exists and overwriting is not allowed") +var ( + ErrInvalidDirectoryEntry = errors.New("invalid directory entry name") + ErrPathExistsOverwrite = errors.New("path already exists and overwriting is not allowed") +) // WriteTo writes the given node to the local filesystem at fpath. func WriteTo(nd Node, fpath string) error { @@ -33,7 +35,7 @@ func WriteTo(nd Node, fpath string) error { } return nil case Directory: - err := os.Mkdir(fpath, 0777) + err := os.Mkdir(fpath, 0o777) if err != nil { return err } diff --git a/files/filewriter_unix.go b/files/filewriter_unix.go index 98d040018..cd99aeb9a 100644 --- a/files/filewriter_unix.go +++ b/files/filewriter_unix.go @@ -1,4 +1,4 @@ -//go:build darwin || linux || netbsd || openbsd || freebsd || dragonfly +//go:build darwin || linux || netbsd || openbsd || freebsd || dragonfly || js || wasip1 package files @@ -15,5 +15,5 @@ func isValidFilename(filename string) bool { } func createNewFile(path string) (*os.File, error) { - return os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_WRONLY|syscall.O_NOFOLLOW, 0666) + return os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_WRONLY|syscall.O_NOFOLLOW, 0o666) } diff --git a/files/filewriter_unix_test.go b/files/filewriter_unix_test.go index ffc33ce51..9f63fe0fe 100644 --- a/files/filewriter_unix_test.go +++ b/files/filewriter_unix_test.go @@ -1,4 +1,4 @@ -//go:build darwin || linux || netbsd || openbsd +//go:build darwin || linux || netbsd || openbsd || freebsd || dragonfly || js || wasip1 package files diff --git a/files/filewriter_windows.go b/files/filewriter_windows.go index a5d626199..8f61315e9 100644 --- a/files/filewriter_windows.go +++ b/files/filewriter_windows.go @@ -41,5 +41,5 @@ func isValidFilename(filename string) bool { } func createNewFile(path string) (*os.File, error) { - return os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0666) + return os.OpenFile(path, os.O_EXCL|os.O_CREATE|os.O_WRONLY, 0o666) } diff --git a/files/filewriter_windows_test.go b/files/filewriter_windows_test.go index ca0222ba3..dced6e6ef 100644 --- a/files/filewriter_windows_test.go +++ b/files/filewriter_windows_test.go @@ -24,9 +24,11 @@ func TestWriteToInvalidPaths(t *testing.T) { os.RemoveAll(path) // Now try all invalid entry names - for _, entryName := range []string{"", ".", "..", "/", "", "not/a/base/path", + for _, entryName := range []string{ + "", ".", "..", "/", "", "not/a/base/path", "<", ">", ":", "\"", "\\", "|", "?", "*", "\x00", - "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"} { + "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9", + } { assert.Equal(t, ErrInvalidDirectoryEntry, WriteTo(NewMapDirectory(map[string]Node{ entryName: NewBytesFile(nil), }), filepath.Join(path))) diff --git a/files/filter_test.go b/files/filter_test.go index 8ce25ee3b..00b2e8baf 100644 --- a/files/filter_test.go +++ b/files/filter_test.go @@ -40,7 +40,7 @@ func TestFileFilter(t *testing.T) { } ignoreFilePath := filepath.Join(tmppath, "ignoreFile") ignoreFileContents := []byte("a.txt") - if err := os.WriteFile(ignoreFilePath, ignoreFileContents, 0666); err != nil { + if err := os.WriteFile(ignoreFilePath, ignoreFileContents, 0o666); err != nil { t.Fatal(err) } filterWithIgnoreFile, err := NewFilter(ignoreFilePath, nil, false) diff --git a/files/multifilereader.go b/files/multifilereader.go index af708dc7f..1a5d4ac1a 100644 --- a/files/multifilereader.go +++ b/files/multifilereader.go @@ -29,19 +29,26 @@ type MultiFileReader struct { // if true, the content disposition will be "form-data" // if false, the content disposition will be "attachment" form bool + + // if true, 'abspath' header will be sent with raw (potentially binary) file + // name. This must only be used for legacy purposes to talk with old servers. + // if false, 'abspath-encoded' header will be sent with %-encoded filename + rawAbsPath bool } // NewMultiFileReader constructs a MultiFileReader. `file` can be any `commands.Directory`. // If `form` is set to true, the Content-Disposition will be "form-data". -// Otherwise, it will be "attachment". -func NewMultiFileReader(file Directory, form bool) *MultiFileReader { +// Otherwise, it will be "attachment". If `rawAbsPath` is set to true, the +// "abspath" header will be sent. Otherwise, the "abspath-encoded" header will be sent. +func NewMultiFileReader(file Directory, form, rawAbsPath bool) *MultiFileReader { it := file.Entries() mfr := &MultiFileReader{ - files: []DirIterator{it}, - path: []string{""}, - form: form, - mutex: &sync.Mutex{}, + files: []DirIterator{it}, + path: []string{""}, + form: form, + rawAbsPath: rawAbsPath, + mutex: &sync.Mutex{}, } mfr.mpWriter = multipart.NewWriter(&mfr.buf) @@ -114,7 +121,12 @@ func (mfr *MultiFileReader) Read(buf []byte) (written int, err error) { header.Set("Content-Type", contentType) if rf, ok := entry.Node().(FileInfo); ok { - header.Set("abspath", rf.AbsPath()) + if mfr.rawAbsPath { + // Legacy compatibility with old servers. + header.Set("abspath", rf.AbsPath()) + } else { + header.Set("abspath-encoded", url.QueryEscape(rf.AbsPath())) + } } _, err := mfr.mpWriter.CreatePart(header) diff --git a/files/multifilereader_binary_go119_test.go b/files/multifilereader_binary_go119_test.go new file mode 100644 index 000000000..c71514b61 --- /dev/null +++ b/files/multifilereader_binary_go119_test.go @@ -0,0 +1,10 @@ +//go:build !go1.20 + +package files + +import "testing" + +func TestAbspathHeaderWithBinaryFilenameSucceeds(t *testing.T) { + // Simulates old client talking to old server (< Go 1.20). + runMultiFileReaderToMultiFileTest(t, true, true, false) +} diff --git a/files/multifilereader_binary_go120_test.go b/files/multifilereader_binary_go120_test.go new file mode 100644 index 000000000..bc3cf097d --- /dev/null +++ b/files/multifilereader_binary_go120_test.go @@ -0,0 +1,13 @@ +//go:build go1.20 + +package files + +import ( + "testing" +) + +func TestAbspathHeaderWithBinaryFilenameFails(t *testing.T) { + // Simulates old client talking to new server (>= Go 1.20). Old client will + // send the binary filename in the regular headers and the new server will error. + runMultiFileReaderToMultiFileTest(t, true, true, true) +} diff --git a/files/multifilereader_test.go b/files/multifilereader_test.go index e36788a91..b39217037 100644 --- a/files/multifilereader_test.go +++ b/files/multifilereader_test.go @@ -1,29 +1,48 @@ package files import ( + "bytes" "io" "mime/multipart" "testing" + + "github.com/stretchr/testify/require" ) var text = "Some text! :)" -func getTestMultiFileReader(t *testing.T) *MultiFileReader { +func newBytesFileWithPath(abspath string, b []byte) File { + return &ReaderFile{abspath, bytesReaderCloser{bytes.NewReader(b)}, nil, int64(len(b))} +} + +func makeMultiFileReader(t *testing.T, binaryFileName, rawAbsPath bool) (string, *MultiFileReader) { + var ( + filename string + file File + ) + + if binaryFileName { + filename = "bad\x7fname.txt" + file = newBytesFileWithPath("/my/path/boop/bad\x7fname.txt", []byte("bloop")) + } else { + filename = "résumé🥳.txt" + file = newBytesFileWithPath("/my/path/boop/résumé🥳.txt", []byte("bloop")) + } + sf := NewMapDirectory(map[string]Node{ - "file.txt": NewBytesFile([]byte(text)), + "file.txt": newBytesFileWithPath("/my/path/file.txt", []byte(text)), "boop": NewMapDirectory(map[string]Node{ - "a.txt": NewBytesFile([]byte("bleep")), - "b.txt": NewBytesFile([]byte("bloop")), + "a.txt": newBytesFileWithPath("/my/path/boop/a.txt", []byte("bleep")), + filename: file, }), - "beep.txt": NewBytesFile([]byte("beep")), + "beep.txt": newBytesFileWithPath("/my/path/beep.txt", []byte("beep")), }) - // testing output by reading it with the go stdlib "mime/multipart" Reader - return NewMultiFileReader(sf, true) + return filename, NewMultiFileReader(sf, true, rawAbsPath) } -func TestMultiFileReaderToMultiFile(t *testing.T) { - mfr := getTestMultiFileReader(t) +func runMultiFileReaderToMultiFileTest(t *testing.T, binaryFileName, rawAbsPath, expectFailure bool) { + filename, mfr := makeMultiFileReader(t, binaryFileName, rawAbsPath) mpReader := multipart.NewReader(mfr, mfr.Boundary()) mf, err := NewFileFromPartReader(mpReader, multipartFormdataType) if err != nil { @@ -32,40 +51,68 @@ func TestMultiFileReaderToMultiFile(t *testing.T) { it := mf.Entries() - if !it.Next() || it.Name() != "beep.txt" { - t.Fatal("iterator didn't work as expected") - } - - if !it.Next() || it.Name() != "boop" || DirFromEntry(it) == nil { - t.Fatal("iterator didn't work as expected") - } + require.True(t, it.Next()) + require.Equal(t, "beep.txt", it.Name()) + require.True(t, it.Next()) + require.Equal(t, "boop", it.Name()) + require.NotNil(t, DirFromEntry(it)) subIt := DirFromEntry(it).Entries() + require.True(t, subIt.Next(), subIt.Err()) + require.Equal(t, "a.txt", subIt.Name()) + require.Nil(t, DirFromEntry(subIt)) - if !subIt.Next() || subIt.Name() != "a.txt" || DirFromEntry(subIt) != nil { - t.Fatal("iterator didn't work as expected") - } + if expectFailure { + require.False(t, subIt.Next()) + require.Error(t, subIt.Err()) + } else { + require.True(t, subIt.Next(), subIt.Err()) + require.Equal(t, filename, subIt.Name()) + require.Nil(t, DirFromEntry(subIt)) - if !subIt.Next() || subIt.Name() != "b.txt" || DirFromEntry(subIt) != nil { - t.Fatal("iterator didn't work as expected") - } + require.False(t, subIt.Next()) + require.Nil(t, it.Err()) - if subIt.Next() || it.Err() != nil { - t.Fatal("iterator didn't work as expected") - } + // try to break internal state + require.False(t, subIt.Next()) + require.Nil(t, it.Err()) - // try to break internal state - if subIt.Next() || it.Err() != nil { - t.Fatal("iterator didn't work as expected") - } + require.True(t, it.Next()) + require.Equal(t, "file.txt", it.Name()) + require.Nil(t, DirFromEntry(it)) + require.Nil(t, it.Err()) - if !it.Next() || it.Name() != "file.txt" || DirFromEntry(it) != nil || it.Err() != nil { - t.Fatal("iterator didn't work as expected") + require.False(t, it.Next()) + require.Nil(t, it.Err()) } +} - if it.Next() || it.Err() != nil { - t.Fatal("iterator didn't work as expected") - } +func TestMultiFileReaderToMultiFile(t *testing.T) { + t.Run("Header 'abspath' with unicode filename succeeds", func(t *testing.T) { + runMultiFileReaderToMultiFileTest(t, false, true, false) + }) + + t.Run("Header 'abspath-encoded' with unicode filename succeeds", func(t *testing.T) { + runMultiFileReaderToMultiFileTest(t, false, false, false) + }) + + t.Run("Header 'abspath-encoded' with binary filename succeeds", func(t *testing.T) { + runMultiFileReaderToMultiFileTest(t, true, false, false) + }) +} + +func getTestMultiFileReader(t *testing.T) *MultiFileReader { + sf := NewMapDirectory(map[string]Node{ + "file.txt": NewBytesFile([]byte(text)), + "boop": NewMapDirectory(map[string]Node{ + "a.txt": NewBytesFile([]byte("bleep")), + "b.txt": NewBytesFile([]byte("bloop")), + }), + "beep.txt": NewBytesFile([]byte("beep")), + }) + + // testing output by reading it with the go stdlib "mime/multipart" Reader + return NewMultiFileReader(sf, true, false) } func TestMultiFileReaderToMultiFileSkip(t *testing.T) { @@ -164,7 +211,7 @@ func TestCommonPrefix(t *testing.T) { "aaa": NewBytesFile([]byte("bleep")), }), }) - mfr := NewMultiFileReader(sf, true) + mfr := NewMultiFileReader(sf, true, false) reader, err := NewFileFromPartReader(multipart.NewReader(mfr, mfr.Boundary()), multipartFormdataType) if err != nil { t.Fatal(err) diff --git a/files/multipartfile.go b/files/multipartfile.go index 27653982c..b5aab9620 100644 --- a/files/multipartfile.go +++ b/files/multipartfile.go @@ -100,9 +100,19 @@ func (w *multipartWalker) nextFile() (Node, error) { return NewLinkFile(string(out), nil), nil default: + var absPath string + if absPathEncoded := part.Header.Get("abspath-encoded"); absPathEncoded != "" { + absPath, err = url.QueryUnescape(absPathEncoded) + if err != nil { + return nil, err + } + } else { + absPath = part.Header.Get("abspath") + } + return &ReaderFile{ reader: part, - abspath: part.Header.Get("abspath"), + abspath: absPath, }, nil } } diff --git a/files/readerfile.go b/files/readerfile.go index 7b4e07954..bf3fa1c9e 100644 --- a/files/readerfile.go +++ b/files/readerfile.go @@ -88,5 +88,7 @@ func (f *ReaderFile) Seek(offset int64, whence int) (int64, error) { return 0, ErrNotSupported } -var _ File = &ReaderFile{} -var _ FileInfo = &ReaderFile{} +var ( + _ File = &ReaderFile{} + _ FileInfo = &ReaderFile{} +) diff --git a/files/serialfile.go b/files/serialfile.go index ab4c1e2fe..bd25bd1bc 100644 --- a/files/serialfile.go +++ b/files/serialfile.go @@ -164,5 +164,7 @@ func (f *serialFile) Size() (int64, error) { return du, err } -var _ Directory = &serialFile{} -var _ DirIterator = &serialIterator{} +var ( + _ Directory = &serialFile{} + _ DirIterator = &serialIterator{} +) diff --git a/files/serialfile_test.go b/files/serialfile_test.go index 80c252a7e..2d480a1df 100644 --- a/files/serialfile_test.go +++ b/files/serialfile_test.go @@ -57,7 +57,7 @@ func testSerialFile(t *testing.T, hidden, withIgnoreRules bool) { if c != "" { continue } - if err := os.MkdirAll(path, 0777); err != nil { + if err := os.MkdirAll(path, 0o777); err != nil { t.Fatal(err) } } @@ -67,7 +67,7 @@ func testSerialFile(t *testing.T, hidden, withIgnoreRules bool) { if c == "" { continue } - if err := os.WriteFile(path, []byte(c), 0666); err != nil { + if err := os.WriteFile(path, []byte(c), 0o666); err != nil { t.Fatal(err) } } diff --git a/files/slicedirectory.go b/files/slicedirectory.go index d11656261..9cf910c6a 100644 --- a/files/slicedirectory.go +++ b/files/slicedirectory.go @@ -93,5 +93,7 @@ func (f *SliceFile) Size() (int64, error) { return size, nil } -var _ Directory = &SliceFile{} -var _ DirEntry = fileEntry{} +var ( + _ Directory = &SliceFile{} + _ DirEntry = fileEntry{} +) diff --git a/files/tarwriter.go b/files/tarwriter.go index cecbcae42..e5d857116 100644 --- a/files/tarwriter.go +++ b/files/tarwriter.go @@ -10,9 +10,7 @@ import ( "time" ) -var ( - ErrUnixFSPathOutsideRoot = errors.New("relative UnixFS paths outside the root are now allowed, use CAR instead") -) +var ErrUnixFSPathOutsideRoot = errors.New("relative UnixFS paths outside the root are now allowed, use CAR instead") type TarWriter struct { TarW *tar.Writer @@ -110,7 +108,7 @@ func writeDirHeader(w *tar.Writer, fpath string) error { return w.WriteHeader(&tar.Header{ Name: fpath, Typeflag: tar.TypeDir, - Mode: 0777, + Mode: 0o777, ModTime: time.Now().Truncate(time.Second), // TODO: set mode, dates, etc. when added to unixFS }) @@ -121,7 +119,7 @@ func writeFileHeader(w *tar.Writer, fpath string, size uint64) error { Name: fpath, Size: int64(size), Typeflag: tar.TypeReg, - Mode: 0644, + Mode: 0o644, ModTime: time.Now().Truncate(time.Second), // TODO: set mode, dates, etc. when added to unixFS }) @@ -131,7 +129,7 @@ func writeSymlinkHeader(w *tar.Writer, target, fpath string) error { return w.WriteHeader(&tar.Header{ Name: fpath, Linkname: target, - Mode: 0777, + Mode: 0o777, Typeflag: tar.TypeSymlink, }) } diff --git a/files/webfile.go b/files/webfile.go index 594b81c82..4586eab63 100644 --- a/files/webfile.go +++ b/files/webfile.go @@ -85,5 +85,7 @@ func (wf *WebFile) Stat() os.FileInfo { return nil } -var _ File = &WebFile{} -var _ FileInfo = &WebFile{} +var ( + _ File = &WebFile{} + _ FileInfo = &WebFile{} +) diff --git a/filestore/filestore.go b/filestore/filestore.go index feb5b0291..90a61549f 100644 --- a/filestore/filestore.go +++ b/filestore/filestore.go @@ -22,8 +22,10 @@ import ( var logger = logging.Logger("filestore") -var ErrFilestoreNotEnabled = errors.New("filestore is not enabled, see https://git.io/vNItf") -var ErrUrlstoreNotEnabled = errors.New("urlstore is not enabled") +var ( + ErrFilestoreNotEnabled = errors.New("filestore is not enabled, see https://git.io/vNItf") + ErrUrlstoreNotEnabled = errors.New("urlstore is not enabled") +) // Filestore implements a Blockstore by combining a standard Blockstore // to store regular blocks and a special Blockstore called diff --git a/filestore/fsrefstore.go b/filestore/fsrefstore.go index 585b6446c..158eadf7a 100644 --- a/filestore/fsrefstore.go +++ b/filestore/fsrefstore.go @@ -205,8 +205,10 @@ func (f *FileManager) readFileDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, er } if !origCid.Equals(outcid) { - return nil, &CorruptReferenceError{StatusFileChanged, - fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + return nil, &CorruptReferenceError{ + StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset()), + } } return outbuf, nil @@ -230,8 +232,10 @@ func (f *FileManager) readURLDataObj(ctx context.Context, m mh.Multihash, d *pb. return nil, &CorruptReferenceError{StatusFileError, err} } if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusPartialContent { - return nil, &CorruptReferenceError{StatusFileError, - fmt.Errorf("expected HTTP 200 or 206 got %d", res.StatusCode)} + return nil, &CorruptReferenceError{ + StatusFileError, + fmt.Errorf("expected HTTP 200 or 206 got %d", res.StatusCode), + } } outbuf := make([]byte, d.GetSize_()) @@ -252,8 +256,10 @@ func (f *FileManager) readURLDataObj(ctx context.Context, m mh.Multihash, d *pb. } if !origCid.Equals(outcid) { - return nil, &CorruptReferenceError{StatusFileChanged, - fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + return nil, &CorruptReferenceError{ + StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset()), + } } return outbuf, nil diff --git a/filestore/pb/dataobj.pb.go b/filestore/pb/dataobj.pb.go index d342cabe5..d47d1931e 100644 --- a/filestore/pb/dataobj.pb.go +++ b/filestore/pb/dataobj.pb.go @@ -13,9 +13,11 @@ import ( ) // Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -35,9 +37,11 @@ func (*DataObj) ProtoMessage() {} func (*DataObj) Descriptor() ([]byte, []int) { return fileDescriptor_a76cb282d869d683, []int{0} } + func (m *DataObj) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *DataObj) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_DataObj.Marshal(b, m, deterministic) @@ -50,12 +54,15 @@ func (m *DataObj) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } + func (m *DataObj) XXX_Merge(src proto.Message) { xxx_messageInfo_DataObj.Merge(m, src) } + func (m *DataObj) XXX_Size() int { return m.Size() } + func (m *DataObj) XXX_DiscardUnknown() { xxx_messageInfo_DataObj.DiscardUnknown(m) } @@ -148,6 +155,7 @@ func encodeVarintDataobj(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } + func (m *DataObj) Size() (n int) { if m == nil { return 0 @@ -164,9 +172,11 @@ func (m *DataObj) Size() (n int) { func sovDataobj(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } + func sozDataobj(x uint64) (n int) { return sovDataobj(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } + func (m *DataObj) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -290,6 +300,7 @@ func (m *DataObj) Unmarshal(dAtA []byte) error { } return nil } + func skipDataobj(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/gateway/assets/assets.go b/gateway/assets/assets.go index f411a47e0..3c0265f0c 100644 --- a/gateway/assets/assets.go +++ b/gateway/assets/assets.go @@ -2,12 +2,11 @@ package assets import ( "embed" + "html/template" "io" "io/fs" "net" "strconv" - - "html/template" "strings" "github.com/cespare/xxhash/v2" diff --git a/gateway/blocks_backend.go b/gateway/blocks_backend.go index 01ca49fec..208c92062 100644 --- a/gateway/blocks_backend.go +++ b/gateway/blocks_backend.go @@ -232,9 +232,45 @@ func (bb *BlocksBackend) Head(ctx context.Context, path ImmutablePath) (ContentP return md, fileNode, nil } +// emptyRoot is a CAR root with the empty identity CID. CAR files are recommended +// to always include a CID in their root, even if it's just the empty CID. +// https://ipld.io/specs/transport/car/carv1/#number-of-roots +var emptyRoot = []cid.Cid{cid.MustParse("bafkqaaa")} + func (bb *BlocksBackend) GetCAR(ctx context.Context, p ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { pathMetadata, err := bb.ResolvePath(ctx, p) if err != nil { + rootCid, err := cid.Decode(strings.Split(p.String(), "/")[2]) + if err != nil { + return ContentPathMetadata{}, nil, err + } + + var buf bytes.Buffer + cw, err := storage.NewWritable(&buf, emptyRoot, car.WriteAsCarV1(true)) + if err != nil { + return ContentPathMetadata{}, nil, err + } + + blockGetter := merkledag.NewDAGService(bb.blockService).Session(ctx) + + blockGetter = &nodeGetterToCarExporer{ + ng: blockGetter, + cw: cw, + } + + // Setup the UnixFS resolver. + f := newNodeGetterFetcherSingleUseFactory(ctx, blockGetter) + pathResolver := resolver.NewBasicResolver(f) + ip := ipfspath.FromString(p.String()) + _, _, err = pathResolver.ResolveToLastNode(ctx, ip) + + if isErrNotFound(err) { + return ContentPathMetadata{ + PathSegmentRoots: nil, + LastSegment: ifacepath.NewResolvedPath(ip, rootCid, rootCid, ""), + ContentType: "", + }, io.NopCloser(&buf), nil + } return ContentPathMetadata{}, nil, err } @@ -780,5 +816,7 @@ func blockOpener(ctx context.Context, ng format.NodeGetter) ipld.BlockReadOpener } } -var _ fetcher.Fetcher = (*nodeGetterFetcherSingleUseFactory)(nil) -var _ fetcher.Factory = (*nodeGetterFetcherSingleUseFactory)(nil) +var ( + _ fetcher.Fetcher = (*nodeGetterFetcherSingleUseFactory)(nil) + _ fetcher.Factory = (*nodeGetterFetcherSingleUseFactory)(nil) +) diff --git a/gateway/errors.go b/gateway/errors.go index e9671438e..17a22ca62 100644 --- a/gateway/errors.go +++ b/gateway/errors.go @@ -12,7 +12,6 @@ import ( "github.com/ipfs/boxo/gateway/assets" "github.com/ipfs/boxo/path/resolver" "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" "github.com/ipld/go-ipld-prime/datamodel" ) @@ -177,11 +176,9 @@ func webError(w http.ResponseWriter, r *http.Request, c *Config, err error, defa } } +// isErrNotFound returns true for IPLD errors that should return 4xx errors (e.g. the path doesn't exist, the data is +// the wrong type, etc.), rather than issues with just finding and retrieving the data. func isErrNotFound(err error) bool { - if ipld.IsNotFound(err) { - return true - } - // Checks if err is of a type that does not implement the .Is interface and // cannot be directly compared to. Therefore, errors.Is cannot be used. for { diff --git a/gateway/gateway_test.go b/gateway/gateway_test.go index 7fae55f3e..98996acb3 100644 --- a/gateway/gateway_test.go +++ b/gateway/gateway_test.go @@ -392,7 +392,6 @@ func TestHeaders(t *testing.T) { // Expect OPTIONS response to have implicit default Allow-Methods // set by boxo/gateway library assert.Equal(t, expectedACAM, res.Header[headerACAM]) - } cid := root.String() @@ -763,7 +762,7 @@ func TestErrorBubblingFromBackend(t *testing.T) { }) } - testError("404 Not Found from IPLD", &ipld.ErrNotFound{}, http.StatusNotFound) + testError("500 Not Found from IPLD", &ipld.ErrNotFound{}, http.StatusInternalServerError) testError("404 Not Found from path resolver", resolver.ErrNoLink{}, http.StatusNotFound) testError("502 Bad Gateway", ErrBadGateway, http.StatusBadGateway) testError("504 Gateway Timeout", ErrGatewayTimeout, http.StatusGatewayTimeout) diff --git a/gateway/handler_car.go b/gateway/handler_car.go index 553519988..000e0dc9c 100644 --- a/gateway/handler_car.go +++ b/gateway/handler_car.go @@ -178,7 +178,6 @@ func buildCarParams(r *http.Request, contentTypeParams map[string]string) (CarPa // not break legacy clients, and responses to requests made via ?format=car // should benefit from block deduplication params.Duplicates = DuplicateBlocksExcluded - } return params, nil diff --git a/gateway/hostname.go b/gateway/hostname.go index 4df23d22c..0bf6b4d72 100644 --- a/gateway/hostname.go +++ b/gateway/hostname.go @@ -201,7 +201,6 @@ func NewHostnameHandler(c Config, backend IPFSBackend, next http.Handler) http.H // else, treat it as an old school gateway, I guess. next.ServeHTTP(w, r) - }) } diff --git a/ipld/merkledag/coding.go b/ipld/merkledag/coding.go index 7c4bfb7e6..ad9440404 100644 --- a/ipld/merkledag/coding.go +++ b/ipld/merkledag/coding.go @@ -75,6 +75,7 @@ func fromImmutableNode(encoded *immutableProtoNode) *ProtoNode { // serialized form needs to be stable, until we start mutating the ProtoNode return n } + func (n *ProtoNode) marshalImmutable() (*immutableProtoNode, error) { links := n.Links() nd, err := qp.BuildMap(dagpb.Type.PBNode, 2, func(ma ipld.MapAssembler) { diff --git a/ipld/merkledag/dagutils/diffenum.go b/ipld/merkledag/dagutils/diffenum.go index 75f86b952..d2d57a8ec 100644 --- a/ipld/merkledag/dagutils/diffenum.go +++ b/ipld/merkledag/dagutils/diffenum.go @@ -72,7 +72,7 @@ func getLinkDiff(a, b ipld.Node) []diffpair { inb[l.Cid.KeyString()] = l } for _, l := range a.Links() { - var key = l.Cid.KeyString() + key := l.Cid.KeyString() ina[key] = l if inb[key] == nil { aonly = append(aonly, l.Cid) diff --git a/ipld/merkledag/dagutils/diffenum_test.go b/ipld/merkledag/dagutils/diffenum_test.go index b96e6f759..41fbfc669 100644 --- a/ipld/merkledag/dagutils/diffenum_test.go +++ b/ipld/merkledag/dagutils/diffenum_test.go @@ -219,7 +219,6 @@ func TestDiffEnumFail(t *testing.T) { if err != nil { t.Fatal(err) } - } func TestDiffEnumRecurse(t *testing.T) { diff --git a/ipld/merkledag/merkledag.go b/ipld/merkledag/merkledag.go index fce59a995..7d830ad2e 100644 --- a/ipld/merkledag/merkledag.go +++ b/ipld/merkledag/merkledag.go @@ -140,7 +140,6 @@ type sesGetter struct { // Get gets a single node from the DAG. func (sg *sesGetter) Get(ctx context.Context, c cid.Cid) (format.Node, error) { blk, err := sg.bs.GetBlock(ctx, c) - if err != nil { return nil, err } @@ -583,7 +582,9 @@ func parallelWalkDepth(ctx context.Context, getLinks GetLinks, root cid.Cid, vis } } -var _ format.LinkGetter = &dagService{} -var _ format.NodeGetter = &dagService{} -var _ format.NodeGetter = &sesGetter{} -var _ format.DAGService = &dagService{} +var ( + _ format.LinkGetter = &dagService{} + _ format.NodeGetter = &dagService{} + _ format.NodeGetter = &sesGetter{} + _ format.DAGService = &dagService{} +) diff --git a/ipld/merkledag/merkledag_test.go b/ipld/merkledag/merkledag_test.go index 6c9af68d7..785159aff 100644 --- a/ipld/merkledag/merkledag_test.go +++ b/ipld/merkledag/merkledag_test.go @@ -263,7 +263,6 @@ func TestLinkChecking(t *testing.T) { } func TestNode(t *testing.T) { - n1 := NodeWithData([]byte("beep")) n2 := NodeWithData([]byte("boop")) n3 := NodeWithData([]byte("beep boop")) @@ -572,7 +571,6 @@ func TestFetchGraphWithDepthLimit(t *testing.T) { return true } return false - } err = WalkDepth(context.Background(), offlineDS.GetLinks, root.Cid(), visitF) @@ -756,7 +754,6 @@ func TestGetRawNodes(t *testing.T) { } func TestProtoNodeResolve(t *testing.T) { - nd := new(ProtoNode) nd.SetLinks([]*ipld.Link{{Name: "foo", Cid: someCid}}) diff --git a/ipld/merkledag/node.go b/ipld/merkledag/node.go index c0f5f02f7..70acbc73e 100644 --- a/ipld/merkledag/node.go +++ b/ipld/merkledag/node.go @@ -29,8 +29,10 @@ var ( var log = logging.Logger("merkledag") // for testing custom CidBuilders -var zeros [256]byte -var zeroCid = mustZeroCid() +var ( + zeros [256]byte + zeroCid = mustZeroCid() +) type immutableProtoNode struct { encoded []byte diff --git a/ipld/merkledag/pb/compat_test.go b/ipld/merkledag/pb/compat_test.go index 529cee0ca..6282356fd 100644 --- a/ipld/merkledag/pb/compat_test.go +++ b/ipld/merkledag/pb/compat_test.go @@ -8,14 +8,16 @@ import ( "testing" ) -var dataZero []byte = make([]byte, 0) -var dataSome []byte = []byte{0, 1, 2, 3, 4} -var cidBytes []byte = []byte{1, 85, 0, 5, 0, 1, 2, 3, 4} -var zeroName string = "" -var someName string = "some name" -var zeroTsize uint64 = 0 -var someTsize uint64 = 1010 -var largeTsize uint64 = 9007199254740991 // JavaScript Number.MAX_SAFE_INTEGER +var ( + dataZero []byte = make([]byte, 0) + dataSome []byte = []byte{0, 1, 2, 3, 4} + cidBytes []byte = []byte{1, 85, 0, 5, 0, 1, 2, 3, 4} + zeroName string = "" + someName string = "some name" + zeroTsize uint64 = 0 + someTsize uint64 = 1010 + largeTsize uint64 = 9007199254740991 // JavaScript Number.MAX_SAFE_INTEGER +) type testCase struct { name string diff --git a/ipld/merkledag/pb/merkledag.pb.go b/ipld/merkledag/pb/merkledag.pb.go index 4646b7325..428459527 100644 --- a/ipld/merkledag/pb/merkledag.pb.go +++ b/ipld/merkledag/pb/merkledag.pb.go @@ -26,9 +26,11 @@ Do *not regenerate this file. ` // Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -54,9 +56,11 @@ func (*PBLink) ProtoMessage() {} func (*PBLink) Descriptor() ([]byte, []int) { return fileDescriptor_10837cc3557cec00, []int{0} } + func (m *PBLink) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *PBLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PBLink.Marshal(b, m, deterministic) @@ -69,12 +73,15 @@ func (m *PBLink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } + func (m *PBLink) XXX_Merge(src proto.Message) { xxx_messageInfo_PBLink.Merge(m, src) } + func (m *PBLink) XXX_Size() int { return m.Size() } + func (m *PBLink) XXX_DiscardUnknown() { xxx_messageInfo_PBLink.DiscardUnknown(m) } @@ -118,9 +125,11 @@ func (*PBNode) ProtoMessage() {} func (*PBNode) Descriptor() ([]byte, []int) { return fileDescriptor_10837cc3557cec00, []int{1} } + func (m *PBNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } + func (m *PBNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PBNode.Marshal(b, m, deterministic) @@ -133,12 +142,15 @@ func (m *PBNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } + func (m *PBNode) XXX_Merge(src proto.Message) { xxx_messageInfo_PBNode.Merge(m, src) } + func (m *PBNode) XXX_Size() int { return m.Size() } + func (m *PBNode) XXX_DiscardUnknown() { xxx_messageInfo_PBNode.DiscardUnknown(m) } @@ -236,6 +248,7 @@ func (pbLink *PBLink) VerboseEqual(that interface{}) error { } return nil } + func (pbLink *PBLink) Equal(that interface{}) bool { if that == nil { return pbLink == nil @@ -281,6 +294,7 @@ func (pbLink *PBLink) Equal(that interface{}) bool { } return true } + func (pbLink *PBNode) VerboseEqual(that interface{}) error { if that == nil { if pbLink == nil { @@ -358,6 +372,7 @@ func (pbNode *PBNode) Equal(that interface{}) bool { } return true } + func (pbLink *PBLink) GoString() string { if pbLink == nil { return "nil" @@ -379,6 +394,7 @@ func (pbLink *PBLink) GoString() string { s = append(s, "}") return strings.Join(s, "") } + func (pbNode *PBNode) GoString() string { if pbNode == nil { return "nil" @@ -397,6 +413,7 @@ func (pbNode *PBNode) GoString() string { s = append(s, "}") return strings.Join(s, "") } + func valueToGoStringMerkledag(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -405,6 +422,7 @@ func valueToGoStringMerkledag(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } + func (m *PBLink) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -510,6 +528,7 @@ func encodeVarintMerkledag(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } + func NewPopulatedPBLink(r randyMerkledag, easy bool) *PBLink { this := &PBLink{} if r.Intn(5) != 0 { @@ -573,6 +592,7 @@ func randUTF8RuneMerkledag(r randyMerkledag) rune { } return rune(ru + 61) } + func randStringMerkledag(r randyMerkledag) string { v6 := r.Intn(100) tmps := make([]rune, v6) @@ -581,6 +601,7 @@ func randStringMerkledag(r randyMerkledag) string { } return string(tmps) } + func randUnrecognizedMerkledag(r randyMerkledag, maxFieldNumber int) (dAtA []byte) { l := r.Intn(5) for i := 0; i < l; i++ { @@ -593,6 +614,7 @@ func randUnrecognizedMerkledag(r randyMerkledag, maxFieldNumber int) (dAtA []byt } return dAtA } + func randFieldMerkledag(dAtA []byte, r randyMerkledag, fieldNumber int, wire int) []byte { key := uint32(fieldNumber)<<3 | uint32(wire) switch wire { @@ -619,6 +641,7 @@ func randFieldMerkledag(dAtA []byte, r randyMerkledag, fieldNumber int, wire int } return dAtA } + func encodeVarintPopulateMerkledag(dAtA []byte, v uint64) []byte { for v >= 1<<7 { dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) @@ -627,6 +650,7 @@ func encodeVarintPopulateMerkledag(dAtA []byte, v uint64) []byte { dAtA = append(dAtA, uint8(v)) return dAtA } + func (m *PBLink) Size() (n int) { if m == nil { return 0 @@ -675,11 +699,13 @@ func (m *PBNode) Size() (n int) { func sovMerkledag(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } + func (pbLink *PBLink) String() string { if pbLink == nil { return "nil" } - s := strings.Join([]string{`&PBLink{`, + s := strings.Join([]string{ + `&PBLink{`, `Hash:` + valueToStringMerkledag(pbLink.Hash) + `,`, `Name:` + valueToStringMerkledag(pbLink.Name) + `,`, `Tsize:` + valueToStringMerkledag(pbLink.Tsize) + `,`, @@ -688,6 +714,7 @@ func (pbLink *PBLink) String() string { }, "") return s } + func (pbNode *PBNode) String() string { if pbNode == nil { return "nil" @@ -697,7 +724,8 @@ func (pbNode *PBNode) String() string { repeatedStringForLinks += strings.Replace(f.String(), "PBLink", "PBLink", 1) + "," } repeatedStringForLinks += "}" - s := strings.Join([]string{`&PBNode{`, + s := strings.Join([]string{ + `&PBNode{`, `Data:` + valueToStringMerkledag(pbNode.Data) + `,`, `Links:` + repeatedStringForLinks + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", pbNode.XXX_unrecognized) + `,`, @@ -705,6 +733,7 @@ func (pbNode *PBNode) String() string { }, "") return s } + func valueToStringMerkledag(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -713,6 +742,7 @@ func valueToStringMerkledag(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } + func (m *PBLink) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -854,6 +884,7 @@ func (m *PBLink) Unmarshal(dAtA []byte) error { } return nil } + func (m *PBNode) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -976,6 +1007,7 @@ func (m *PBNode) Unmarshal(dAtA []byte) error { } return nil } + func skipMerkledag(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/ipld/merkledag/pb/merkledagpb_test.go b/ipld/merkledag/pb/merkledagpb_test.go index f72b306da..da757b6b2 100644 --- a/ipld/merkledag/pb/merkledagpb_test.go +++ b/ipld/merkledag/pb/merkledagpb_test.go @@ -18,9 +18,11 @@ import ( ) // Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) func TestPBLinkProto(t *testing.T) { seed := time.Now().UnixNano() @@ -247,6 +249,7 @@ func TestPBLinkJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } + func TestPBNodeJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -268,6 +271,7 @@ func TestPBNodeJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } + func TestPBLinkProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -351,6 +355,7 @@ func TestPBLinkVerboseEqual(t *testing.T) { t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err) } } + func TestPBNodeVerboseEqual(t *testing.T) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) p := NewPopulatedPBNode(popr, false) @@ -366,6 +371,7 @@ func TestPBNodeVerboseEqual(t *testing.T) { t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err) } } + func TestPBLinkGoString(t *testing.T) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) p := NewPopulatedPBLink(popr, false) @@ -379,6 +385,7 @@ func TestPBLinkGoString(t *testing.T) { t.Fatal(err) } } + func TestPBNodeGoString(t *testing.T) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) p := NewPopulatedPBNode(popr, false) @@ -392,6 +399,7 @@ func TestPBNodeGoString(t *testing.T) { t.Fatal(err) } } + func TestPBLinkSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -473,6 +481,7 @@ func TestPBLinkStringer(t *testing.T) { t.Fatalf("String want %v got %v", s1, s2) } } + func TestPBNodeStringer(t *testing.T) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) p := NewPopulatedPBNode(popr, false) @@ -483,4 +492,4 @@ func TestPBNodeStringer(t *testing.T) { } } -//These tests are generated by github.com/gogo/protobuf/plugin/testgen +// These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/ipld/merkledag/pb/stability_test.go b/ipld/merkledag/pb/stability_test.go index 5da833713..245296a80 100644 --- a/ipld/merkledag/pb/stability_test.go +++ b/ipld/merkledag/pb/stability_test.go @@ -20,5 +20,4 @@ func TestStability(t *testing.T) { t.Logf("%q", d) t.Fatal("protobuf not stable") } - } diff --git a/ipld/merkledag/traverse/traverse.go b/ipld/merkledag/traverse/traverse.go index dbc426fa9..a3836e385 100644 --- a/ipld/merkledag/traverse/traverse.go +++ b/ipld/merkledag/traverse/traverse.go @@ -63,7 +63,6 @@ func (t *traversal) callFunc(next State) error { // // the error handling is a little complicated. func (t *traversal) getNode(link *ipld.Link) (ipld.Node, error) { - getNode := func(l *ipld.Link) (ipld.Node, error) { next, err := l.GetNode(context.TODO(), t.opts.DAG) if err != nil { @@ -164,7 +163,6 @@ func dfsDescend(df dfsFunc, curr State, t *traversal) error { } func bfsTraverse(root State, t *traversal) error { - if skip, err := t.shouldSkip(root.Node); skip || err != nil { return err } diff --git a/ipld/unixfs/file/unixfile.go b/ipld/unixfs/file/unixfile.go index 4eae1bf07..5ef968d1b 100644 --- a/ipld/unixfs/file/unixfile.go +++ b/ipld/unixfs/file/unixfile.go @@ -179,5 +179,7 @@ func NewUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (fi }, nil } -var _ files.Directory = &ufsDirectory{} -var _ files.File = &ufsFile{} +var ( + _ files.Directory = &ufsDirectory{} + _ files.File = &ufsFile{} +) diff --git a/ipld/unixfs/hamt/hamt_test.go b/ipld/unixfs/hamt/hamt_test.go index e6892919e..2defc0d3e 100644 --- a/ipld/unixfs/hamt/hamt_test.go +++ b/ipld/unixfs/hamt/hamt_test.go @@ -79,7 +79,6 @@ func assertLink(s *Shard, name string, found bool) error { } func assertLinksEqual(linksA []*ipld.Link, linksB []*ipld.Link) error { - if len(linksA) != len(linksB) { return fmt.Errorf("links arrays are different sizes") } @@ -164,8 +163,6 @@ func TestDirBuilding(t *testing.T) { t.Fatal(err) } - //printDag(ds, nd, 0) - k := nd.Cid() if k.String() != "QmY89TkSEVHykWMHDmyejSWFj9CYNtvzw4UwnT9xbc4Zjc" { diff --git a/ipld/unixfs/importer/balanced/balanced_test.go b/ipld/unixfs/importer/balanced/balanced_test.go index 0c2d7a29f..17afbb232 100644 --- a/ipld/unixfs/importer/balanced/balanced_test.go +++ b/ipld/unixfs/importer/balanced/balanced_test.go @@ -298,7 +298,6 @@ func TestSeekingStress(t *testing.T) { t.Fatal(err) } } - } func TestSeekingConsistency(t *testing.T) { diff --git a/ipld/unixfs/importer/helpers/dagbuilder.go b/ipld/unixfs/importer/helpers/dagbuilder.go index 6d6014311..25514d795 100644 --- a/ipld/unixfs/importer/helpers/dagbuilder.go +++ b/ipld/unixfs/importer/helpers/dagbuilder.go @@ -177,7 +177,6 @@ func (db *DagBuilderHelper) NewLeafNode(data []byte, fsNodeType pb.Data_DataType // NOTE: This function creates raw data nodes so it only works // for the `trickle.Layout`. func (db *DagBuilderHelper) FillNodeLayer(node *FSNodeOverDag) error { - // while we have room AND we're not done for node.NumChildren() < db.maxlinks && !db.Done() { child, childFileSize, err := db.NewLeafDataNode(ft.TRaw) diff --git a/ipld/unixfs/importer/helpers/helpers.go b/ipld/unixfs/importer/helpers/helpers.go index 20cb598e6..0a199d2b6 100644 --- a/ipld/unixfs/importer/helpers/helpers.go +++ b/ipld/unixfs/importer/helpers/helpers.go @@ -8,8 +8,10 @@ import ( var BlockSizeLimit = 1048576 // 1 MB // rough estimates on expected sizes -var roughLinkBlockSize = 1 << 13 // 8KB -var roughLinkSize = 34 + 8 + 5 // sha256 multihash + size + no name + protobuf framing +var ( + roughLinkBlockSize = 1 << 13 // 8KB + roughLinkSize = 34 + 8 + 5 // sha256 multihash + size + no name + protobuf framing +) // DefaultLinksPerBlock governs how the importer decides how many links there // will be per block. This calculation is based on expected distributions of: diff --git a/ipld/unixfs/importer/trickle/trickle_test.go b/ipld/unixfs/importer/trickle/trickle_test.go index 59231f49c..70bc2dd13 100644 --- a/ipld/unixfs/importer/trickle/trickle_test.go +++ b/ipld/unixfs/importer/trickle/trickle_test.go @@ -431,7 +431,6 @@ func testSeekingStress(t *testing.T, rawLeaves UseRawLeaves) { t.Fatal(err) } } - } func TestSeekingConsistency(t *testing.T) { diff --git a/ipld/unixfs/io/dagreader.go b/ipld/unixfs/io/dagreader.go index 83d33af6c..77dc8d921 100644 --- a/ipld/unixfs/io/dagreader.go +++ b/ipld/unixfs/io/dagreader.go @@ -100,7 +100,6 @@ func NewDagReader(ctx context.Context, n ipld.Node, serv ipld.NodeGetter) (DagRe // dagReader provides a way to easily read the data contained in a dag. type dagReader struct { - // Structure to perform the DAG iteration and search, the reader // just needs to add logic to the `Visitor` callback passed to // `Iterate` and `Seek`. @@ -227,7 +226,6 @@ func (dr *dagReader) saveNodeData(node ipld.Node) error { // any errors as it's always reading from a `bytes.Reader` and asking only // the available data in it. func (dr *dagReader) readNodeDataBuffer(out []byte) int { - n, _ := dr.currentNodeData.Read(out) // Ignore the error as the EOF may not be returned in the first // `Read` call, explicitly ask for an empty buffer below to check @@ -253,7 +251,6 @@ func (dr *dagReader) readNodeDataBuffer(out []byte) int { // TODO: Check what part of the logic between the two functions // can be extracted away. func (dr *dagReader) writeNodeDataBuffer(w io.Writer) (int64, error) { - n, err := dr.currentNodeData.WriteTo(w) if err != nil { return n, err @@ -450,7 +447,6 @@ func (dr *dagReader) Seek(offset int64, whence int) (int64, error) { // In the leaf node case the search will stop here. } }) - if err != nil { return 0, err } diff --git a/ipld/unixfs/io/dagreader_test.go b/ipld/unixfs/io/dagreader_test.go index 9787c4281..1f9f93558 100644 --- a/ipld/unixfs/io/dagreader_test.go +++ b/ipld/unixfs/io/dagreader_test.go @@ -186,7 +186,6 @@ func TestRelativeSeek(t *testing.T) { } reader.Seek(-5, io.SeekCurrent) // seek 4 bytes but we read one byte every time so 5 bytes } - } func TestTypeFailures(t *testing.T) { @@ -283,7 +282,6 @@ func TestWriteTo(t *testing.T) { if err != nil { t.Fatal(err) } - } func TestReaderSzie(t *testing.T) { diff --git a/ipld/unixfs/io/directory.go b/ipld/unixfs/io/directory.go index a3638676c..9de460e38 100644 --- a/ipld/unixfs/io/directory.go +++ b/ipld/unixfs/io/directory.go @@ -38,7 +38,6 @@ var DefaultShardWidth = 256 // directory trees is out of its scope, they are managed by the MFS layer // (which is the main consumer of this interface). type Directory interface { - // SetCidBuilder sets the CID Builder of the root node. SetCidBuilder(cid.Builder) diff --git a/ipld/unixfs/mod/dagmodifier_test.go b/ipld/unixfs/mod/dagmodifier_test.go index 14adeca35..fab7a125b 100644 --- a/ipld/unixfs/mod/dagmodifier_test.go +++ b/ipld/unixfs/mod/dagmodifier_test.go @@ -83,6 +83,7 @@ func runAllSubtests(t *testing.T, tfunc func(*testing.T, testu.NodeOpts)) { func TestDagModifierBasic(t *testing.T) { runAllSubtests(t, testDagModifierBasic) } + func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() b, n := testu.GetRandomNode(t, dserv, 50000, opts) @@ -143,6 +144,7 @@ func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) { func TestMultiWrite(t *testing.T) { runAllSubtests(t, testMultiWrite) } + func testMultiWrite(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -186,6 +188,7 @@ func testMultiWrite(t *testing.T, opts testu.NodeOpts) { func TestMultiWriteAndFlush(t *testing.T) { runAllSubtests(t, testMultiWriteAndFlush) } + func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -224,6 +227,7 @@ func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) { func TestWriteNewFile(t *testing.T) { runAllSubtests(t, testWriteNewFile) } + func testWriteNewFile(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -256,6 +260,7 @@ func testWriteNewFile(t *testing.T, opts testu.NodeOpts) { func TestMultiWriteCoal(t *testing.T) { runAllSubtests(t, testMultiWriteCoal) } + func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -292,6 +297,7 @@ func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) { func TestLargeWriteChunks(t *testing.T) { runAllSubtests(t, testLargeWriteChunks) } + func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -341,6 +347,7 @@ func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) { func TestDagTruncate(t *testing.T) { runAllSubtests(t, testDagTruncate) } + func testDagTruncate(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() b, n := testu.GetRandomNode(t, dserv, 50000, opts) @@ -473,6 +480,7 @@ func TestDagSync(t *testing.T) { func TestDagTruncateSameSize(t *testing.T) { runAllSubtests(t, testDagTruncateSameSize) } + func testDagTruncateSameSize(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() _, n := testu.GetRandomNode(t, dserv, 50000, opts) @@ -508,6 +516,7 @@ func testDagTruncateSameSize(t *testing.T, opts testu.NodeOpts) { func TestSparseWrite(t *testing.T) { runAllSubtests(t, testSparseWrite) } + func testSparseWrite(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -552,6 +561,7 @@ func testSparseWrite(t *testing.T, opts testu.NodeOpts) { func TestSeekPastEndWrite(t *testing.T) { runAllSubtests(t, testSeekPastEndWrite) } + func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -605,6 +615,7 @@ func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) { func TestRelativeSeek(t *testing.T) { runAllSubtests(t, testRelativeSeek) } + func testRelativeSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -641,6 +652,7 @@ func testRelativeSeek(t *testing.T, opts testu.NodeOpts) { func TestInvalidSeek(t *testing.T) { runAllSubtests(t, testInvalidSeek) } + func testInvalidSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() n := testu.GetEmptyNode(t, dserv, opts) @@ -665,6 +677,7 @@ func testInvalidSeek(t *testing.T, opts testu.NodeOpts) { func TestEndSeek(t *testing.T) { runAllSubtests(t, testEndSeek) } + func testEndSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() @@ -713,6 +726,7 @@ func testEndSeek(t *testing.T, opts testu.NodeOpts) { func TestReadAndSeek(t *testing.T) { runAllSubtests(t, testReadAndSeek) } + func testReadAndSeek(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() @@ -765,7 +779,7 @@ func testReadAndSeek(t *testing.T, opts testu.NodeOpts) { t.Fatalf("error: %s, offset %d, reader offset %d", err, dagmod.curWrOff, getOffset(dagmod.read)) } - //read 5,6,7 + // read 5,6,7 readBuf = make([]byte, 3) c, err = dagmod.Read(readBuf) if err != nil { @@ -779,14 +793,13 @@ func testReadAndSeek(t *testing.T, opts testu.NodeOpts) { if readBuf[i] != i+5 { t.Fatalf("wrong value %d [at index %d]", readBuf[i], i) } - } - } func TestCtxRead(t *testing.T) { runAllSubtests(t, testCtxRead) } + func testCtxRead(t *testing.T, opts testu.NodeOpts) { dserv := testu.GetDAGServ() diff --git a/ipld/unixfs/pb/unixfs.pb.go b/ipld/unixfs/pb/unixfs.pb.go index 2f7cb44ab..805c11289 100644 --- a/ipld/unixfs/pb/unixfs.pb.go +++ b/ipld/unixfs/pb/unixfs.pb.go @@ -5,14 +5,17 @@ package unixfs_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" math "math" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -90,18 +93,23 @@ func (*Data) ProtoMessage() {} func (*Data) Descriptor() ([]byte, []int) { return fileDescriptor_e2fd76cc44dfc7c3, []int{0} } + func (m *Data) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Data.Unmarshal(m, b) } + func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Data.Marshal(b, m, deterministic) } + func (m *Data) XXX_Merge(src proto.Message) { xxx_messageInfo_Data.Merge(m, src) } + func (m *Data) XXX_Size() int { return xxx_messageInfo_Data.Size(m) } + func (m *Data) XXX_DiscardUnknown() { xxx_messageInfo_Data.DiscardUnknown(m) } @@ -163,18 +171,23 @@ func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor_e2fd76cc44dfc7c3, []int{1} } + func (m *Metadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metadata.Unmarshal(m, b) } + func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } + func (m *Metadata) XXX_Merge(src proto.Message) { xxx_messageInfo_Metadata.Merge(m, src) } + func (m *Metadata) XXX_Size() int { return xxx_messageInfo_Metadata.Size(m) } + func (m *Metadata) XXX_DiscardUnknown() { xxx_messageInfo_Metadata.DiscardUnknown(m) } diff --git a/ipld/unixfs/unixfs.go b/ipld/unixfs/unixfs.go index ebb334e5a..4131df837 100644 --- a/ipld/unixfs/unixfs.go +++ b/ipld/unixfs/unixfs.go @@ -77,7 +77,7 @@ func FolderPBData() []byte { data, err := proto.Marshal(pbfile) if err != nil { - //this really shouldnt happen, i promise + // this really shouldnt happen, i promise panic(err) } return data @@ -174,7 +174,6 @@ func size(pbdata *pb.Data) (uint64, error) { // to guarantee that the required (`Type` and `Filesize`) fields in the `format` // structure are initialized before marshaling (in `GetBytes()`). type FSNode struct { - // UnixFS format defined as a protocol buffers message. format pb.Data } diff --git a/ipld/unixfs/unixfs_test.go b/ipld/unixfs/unixfs_test.go index d06ababe2..b785be8ad 100644 --- a/ipld/unixfs/unixfs_test.go +++ b/ipld/unixfs/unixfs_test.go @@ -165,7 +165,6 @@ func TestMetadata(t *testing.T) { if !mimeAiff { t.Fatal("Metadata does not Marshal and Unmarshal properly!") } - } func TestIsDir(t *testing.T) { diff --git a/ipns/pb/record.pb.go b/ipns/pb/record.pb.go index fe6950929..1f0effbd8 100644 --- a/ipns/pb/record.pb.go +++ b/ipns/pb/record.pb.go @@ -7,10 +7,11 @@ package pb import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -226,12 +227,15 @@ func file_record_proto_rawDescGZIP() []byte { return file_record_proto_rawDescData } -var file_record_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_record_proto_goTypes = []interface{}{ - (IpnsRecord_ValidityType)(0), // 0: github.com.boxo.ipns.pb.IpnsRecord.ValidityType - (*IpnsRecord)(nil), // 1: github.com.boxo.ipns.pb.IpnsRecord -} +var ( + file_record_proto_enumTypes = make([]protoimpl.EnumInfo, 1) + file_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) + file_record_proto_goTypes = []interface{}{ + (IpnsRecord_ValidityType)(0), // 0: github.com.boxo.ipns.pb.IpnsRecord.ValidityType + (*IpnsRecord)(nil), // 1: github.com.boxo.ipns.pb.IpnsRecord + } +) + var file_record_proto_depIdxs = []int32{ 0, // 0: github.com.boxo.ipns.pb.IpnsRecord.validityType:type_name -> github.com.boxo.ipns.pb.IpnsRecord.ValidityType 1, // [1:1] is the sub-list for method output_type diff --git a/keystore/keystore.go b/keystore/keystore.go index b762686e4..4f6b250d1 100644 --- a/keystore/keystore.go +++ b/keystore/keystore.go @@ -1,13 +1,12 @@ package keystore import ( + "encoding/base32" "fmt" "os" "path/filepath" "strings" - "encoding/base32" - logging "github.com/ipfs/go-log/v2" ci "github.com/libp2p/go-libp2p/core/crypto" ) @@ -46,7 +45,7 @@ type FSKeystore struct { // NewFSKeystore returns a new filesystem-backed keystore. func NewFSKeystore(dir string) (*FSKeystore, error) { - err := os.Mkdir(dir, 0700) + err := os.Mkdir(dir, 0o700) switch { case os.IsExist(err): case err == nil: @@ -87,7 +86,7 @@ func (ks *FSKeystore) Put(name string, k ci.PrivKey) error { kp := filepath.Join(ks.dir, name) - fi, err := os.OpenFile(kp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0400) + fi, err := os.OpenFile(kp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o400) if err != nil { if os.IsExist(err) { err = ErrKeyExists diff --git a/keystore/keystore_test.go b/keystore/keystore_test.go index 29e1cf849..fbd16a0e0 100644 --- a/keystore/keystore_test.go +++ b/keystore/keystore_test.go @@ -140,7 +140,6 @@ func TestKeystoreBasics(t *testing.T) { func TestInvalidKeyFiles(t *testing.T) { tdir, err := os.MkdirTemp("", "keystore-test") - if err != nil { t.Fatal(err) } @@ -164,12 +163,12 @@ func TestInvalidKeyFiles(t *testing.T) { t.Fatal(err) } - err = os.WriteFile(filepath.Join(ks.dir, encodedName), bytes, 0644) + err = os.WriteFile(filepath.Join(ks.dir, encodedName), bytes, 0o644) if err != nil { t.Fatal(err) } - err = os.WriteFile(filepath.Join(ks.dir, "z.invalid"), bytes, 0644) + err = os.WriteFile(filepath.Join(ks.dir, "z.invalid"), bytes, 0o644) if err != nil { t.Fatal(err) } diff --git a/mfs/dir.go b/mfs/dir.go index 27e012da9..ab9812754 100644 --- a/mfs/dir.go +++ b/mfs/dir.go @@ -17,9 +17,11 @@ import ( ipld "github.com/ipfs/go-ipld-format" ) -var ErrNotYetImplemented = errors.New("not yet implemented") -var ErrInvalidChild = errors.New("invalid child node") -var ErrDirExists = errors.New("directory already has entry by that name") +var ( + ErrNotYetImplemented = errors.New("not yet implemented") + ErrInvalidChild = errors.New("invalid child node") + ErrDirExists = errors.New("directory already has entry by that name") +) // TODO: There's too much functionality associated with this structure, // let's organize it (and if possible extract part of it elsewhere) diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index e57404b82..9ecdbffd5 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -213,7 +213,6 @@ func setupRoot(ctx context.Context, t testing.TB) (ipld.DAGService, *Root) { fmt.Println("PUBLISHED: ", c) return nil }) - if err != nil { t.Fatal(err) } @@ -644,7 +643,6 @@ func TestMfsDirListNames(t *testing.T) { } list, err := rootdir.ListNames(ctx) - if err != nil { t.Fatal(err) } @@ -1488,7 +1486,6 @@ func getFileHandle(r *Root, path string, create bool, builder cid.Builder) (*Fil } func FuzzMkdirAndWriteConcurrently(f *testing.F) { - testCases := []struct { flush bool mkparents bool @@ -1567,5 +1564,4 @@ func FuzzMkdirAndWriteConcurrently(f *testing.F) { t.Logf("error writting to file from filepath %s: %s", filepath, err) } }) - } diff --git a/mfs/root.go b/mfs/root.go index d807da3bd..c08d2d053 100644 --- a/mfs/root.go +++ b/mfs/root.go @@ -17,8 +17,10 @@ import ( ) // TODO: Remove if not used. -var ErrNotExist = errors.New("no such rootfs") -var ErrClosed = errors.New("file closed") +var ( + ErrNotExist = errors.New("no such rootfs") + ErrClosed = errors.New("file closed") +) var log = logging.Logger("mfs") @@ -85,7 +87,6 @@ func IsFile(fsn FSNode) bool { // Root represents the root of a filesystem tree. type Root struct { - // Root directory of the MFS layout. dir *Directory @@ -94,7 +95,6 @@ type Root struct { // NewRoot creates a new Root and starts up a republisher routine for it. func NewRoot(parent context.Context, ds ipld.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) { - var repub *Republisher if pf != nil { repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) diff --git a/namesys/interface.go b/namesys/interface.go index 655be179d..5d50936ee 100644 --- a/namesys/interface.go +++ b/namesys/interface.go @@ -30,9 +30,8 @@ For command-line bindings to this functionality, see: package namesys import ( - "errors" - "context" + "errors" opts "github.com/ipfs/boxo/coreiface/options/namesys" "github.com/ipfs/boxo/path" @@ -69,7 +68,6 @@ type Result struct { // Resolver is an object capable of resolving names. type Resolver interface { - // Resolve performs a recursive lookup, returning the dereferenced // path. For example, if ipfs.io has a DNS TXT record pointing to // /ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy diff --git a/namesys/namesys.go b/namesys/namesys.go index de74f1488..df4403570 100644 --- a/namesys/namesys.go +++ b/namesys/namesys.go @@ -198,7 +198,6 @@ func (ns *mpns) resolveOnceAsync(ctx context.Context, name string, options opts. var res resolver ipnsKey, err := peer.Decode(key) - // CIDs in IPNS are expected to have libp2p-key multicodec // We ease the transition by returning a more meaningful error with a valid CID if err != nil { diff --git a/path/path.go b/path/path.go index 6d53ade04..a9b36c3ce 100644 --- a/path/path.go +++ b/path/path.go @@ -60,7 +60,6 @@ func (p Path) IsJustAKey() bool { // segment, separately. If there is no more to pop (the path is just a key), // the original path is returned. func (p Path) PopLastSegment() (Path, string, error) { - if p.IsJustAKey() { return p, "", nil } @@ -107,7 +106,7 @@ func ParsePath(txt string) (Path, error) { return "", &ErrInvalidPath{error: fmt.Errorf("invalid ipfs path"), path: txt} } - //TODO: make this smarter + // TODO: make this smarter switch parts[1] { case "ipfs", "ipld": if parts[2] == "" { diff --git a/path/resolver/resolver_test.go b/path/resolver/resolver_test.go index c91d950f1..c20f9306d 100644 --- a/path/resolver/resolver_test.go +++ b/path/resolver/resolver_test.go @@ -132,6 +132,7 @@ func TestRecurivePathResolution(t *testing.T) { p.String(), rCid.String(), cKey.String())) } } + func TestResolveToLastNode_ErrNoLink(t *testing.T) { ctx := context.Background() bsrv := dagmock.Bserv() diff --git a/pinning/remote/client/client.go b/pinning/remote/client/client.go index 6c869d40f..e1da64ffb 100644 --- a/pinning/remote/client/client.go +++ b/pinning/remote/client/client.go @@ -114,8 +114,10 @@ func (pinLsOpts) FilterAfter(t time.Time) LsOption { } } -const recordLimit = 1000 -const defaultLimit = 10 +const ( + recordLimit = 1000 + defaultLimit = 10 +) func (pinLsOpts) Limit(limit int) LsOption { return func(options *lsSettings) error { diff --git a/pinning/remote/client/openapi/client.go b/pinning/remote/client/openapi/client.go index b3cea998b..84ceeaafb 100644 --- a/pinning/remote/client/openapi/client.go +++ b/pinning/remote/client/openapi/client.go @@ -168,8 +168,8 @@ func (c *APIClient) prepareRequest( formParams url.Values, formFileName string, fileName string, - fileBytes []byte) (localVarRequest *http.Request, err error) { - + fileBytes []byte, +) (localVarRequest *http.Request, err error) { var body *bytes.Buffer // Detect postBody type and post. diff --git a/pinning/remote/client/openapi/response.go b/pinning/remote/client/openapi/response.go index 8f9fb0b08..822455243 100644 --- a/pinning/remote/client/openapi/response.go +++ b/pinning/remote/client/openapi/response.go @@ -33,14 +33,12 @@ type APIResponse struct { // NewAPIResponse returns a new APIResonse object. func NewAPIResponse(r *http.Response) *APIResponse { - response := &APIResponse{Response: r} return response } // NewAPIResponseWithError returns a new APIResponse object with the provided error message. func NewAPIResponseWithError(errorMessage string) *APIResponse { - response := &APIResponse{Message: errorMessage} return response } diff --git a/provider/internal/queue/queue.go b/provider/internal/queue/queue.go index 800d3be4e..2fc32baf5 100644 --- a/provider/internal/queue/queue.go +++ b/provider/internal/queue/queue.go @@ -130,7 +130,6 @@ func (q *Queue) worker() { } case dequeue <- c: err := q.ds.Delete(q.ctx, k) - if err != nil { log.Errorf("Failed to delete queued cid %s with key %s: %s", c, k, err) continue diff --git a/provider/provider.go b/provider/provider.go index a3eff2f04..6fb021695 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -64,7 +64,6 @@ func NewPinnedProvider(onlyRoots bool, pinning pin.Pinner, fetchConfig fetcher.F case outCh <- c: } } - }() return outCh, nil diff --git a/provider/reprovider.go b/provider/reprovider.go index 619bf8196..f9d1b4203 100644 --- a/provider/reprovider.go +++ b/provider/reprovider.go @@ -13,7 +13,7 @@ import ( "github.com/ipfs/boxo/verifcid" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - namespace "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-datastore/namespace" logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multihash" ) @@ -47,6 +47,7 @@ type reprovider struct { initalReprovideDelay time.Duration initialReprovideDelaySet bool + allowlist verifcid.Allowlist rsys Provide keyProvider KeyChanFunc @@ -90,8 +91,10 @@ type Ready interface { // BatchProvidingSystem instances type Option func(system *reprovider) error -var lastReprovideKey = datastore.NewKey("/reprovide/lastreprovide") -var DefaultKeyPrefix = datastore.NewKey("/provider") +var ( + lastReprovideKey = datastore.NewKey("/reprovide/lastreprovide") + DefaultKeyPrefix = datastore.NewKey("/provider") +) // New creates a new [System]. By default it is offline, that means it will // enqueue tasks in ds. @@ -102,6 +105,7 @@ var DefaultKeyPrefix = datastore.NewKey("/provider") // If provider casts to [Ready], it will wait until [Ready.Ready] is true. func New(ds datastore.Batching, opts ...Option) (System, error) { s := &reprovider{ + allowlist: verifcid.DefaultAllowlist, reprovideInterval: DefaultReproviderInterval, maxReprovideBatchSize: math.MaxUint, keyPrefix: DefaultKeyPrefix, @@ -149,6 +153,13 @@ func New(ds datastore.Batching, opts ...Option) (System, error) { return s, nil } +func Allowlist(allowlist verifcid.Allowlist) Option { + return func(system *reprovider) error { + system.allowlist = allowlist + return nil + } +} + func ReproviderInterval(duration time.Duration) Option { return func(system *reprovider) error { system.reprovideInterval = duration @@ -294,7 +305,7 @@ func (s *reprovider) run() { delete(m, c) // hash security - if err := verifcid.ValidateCid(c); err != nil { + if err := verifcid.ValidateCid(s.allowlist, c); err != nil { log.Errorf("insecure hash in reprovider, %s (%s)", c, err) continue } diff --git a/routing/http/client/transport_test.go b/routing/http/client/transport_test.go index 7da545062..82567bb47 100644 --- a/routing/http/client/transport_test.go +++ b/routing/http/client/transport_test.go @@ -71,7 +71,6 @@ func TestResponseBodyLimitedTransport(t *testing.T) { } else { assert.Contains(t, err.Error(), c.expErr) } - }) } } diff --git a/routing/http/contentrouter/contentrouter_test.go b/routing/http/contentrouter/contentrouter_test.go index 4ca620c5d..3830482e2 100644 --- a/routing/http/contentrouter/contentrouter_test.go +++ b/routing/http/contentrouter/contentrouter_test.go @@ -27,10 +27,12 @@ func (m *mockClient) FindProviders(ctx context.Context, key cid.Cid) (iter.Resul args := m.Called(ctx, key) return args.Get(0).(iter.ResultIter[types.ProviderResponse]), args.Error(1) } + func (m *mockClient) Ready(ctx context.Context) (bool, error) { args := m.Called(ctx) return args.Bool(0), args.Error(1) } + func makeCID() cid.Cid { buf := make([]byte, 63) _, err := rand.Read(buf) @@ -78,7 +80,6 @@ func TestProvide(t *testing.T) { if c.expNotProvided { client.AssertNumberOfCalls(t, "ProvideBitswap", 0) } - }) } } diff --git a/routing/http/server/server.go b/routing/http/server/server.go index 8526fed84..835262990 100644 --- a/routing/http/server/server.go +++ b/routing/http/server/server.go @@ -156,7 +156,6 @@ func (s *server) provide(w http.ResponseWriter, httpReq *http.Request) { keys := make([]cid.Cid, len(v.Payload.Keys)) for i, k := range v.Payload.Keys { keys[i] = k.Cid - } addrs := make([]multiaddr.Multiaddr, len(v.Payload.Addrs)) for i, a := range v.Payload.Addrs { diff --git a/routing/http/server/server_test.go b/routing/http/server/server_test.go index bec959cf8..dfe38f0da 100644 --- a/routing/http/server/server_test.go +++ b/routing/http/server/server_test.go @@ -32,7 +32,8 @@ func TestHeaders(t *testing.T) { {Val: &types.ReadBitswapProviderRecord{ Protocol: "transport-bitswap", Schema: types.SchemaBitswap, - }}}, + }}, + }, ) c := "baeabep4vu3ceru7nerjjbk37sxb7wmftteve4hcosmyolsbsiubw2vr6pqzj6mw7kv6tbn6nqkkldnklbjgm5tzbi4hkpkled4xlcr7xz4bq" @@ -84,7 +85,8 @@ func TestResponse(t *testing.T) { Schema: types.SchemaBitswap, ID: &pid2, Addrs: []types.Multiaddr{}, - }}}, + }}, + }, ) router := &mockContentRouter{} @@ -261,6 +263,7 @@ func (m *mockContentRouter) FindProviders(ctx context.Context, key cid.Cid, limi args := m.Called(ctx, key, limit) return args.Get(0).(iter.ResultIter[types.ProviderResponse]), args.Error(1) } + func (m *mockContentRouter) ProvideBitswap(ctx context.Context, req *BitswapWriteProvideRequest) (time.Duration, error) { args := m.Called(ctx, req) return args.Get(0).(time.Duration), args.Error(1) diff --git a/routing/http/types/iter/json_test.go b/routing/http/types/iter/json_test.go index 99c3bde07..f3e5a3341 100644 --- a/routing/http/types/iter/json_test.go +++ b/routing/http/types/iter/json_test.go @@ -69,5 +69,4 @@ func TestJSONIter(t *testing.T) { } }) } - } diff --git a/routing/http/types/provider_bitswap.go b/routing/http/types/provider_bitswap.go index 66243dd5d..f0b5056e4 100644 --- a/routing/http/types/provider_bitswap.go +++ b/routing/http/types/provider_bitswap.go @@ -162,8 +162,10 @@ func (wbprr *WriteBitswapProviderRecordResponse) GetSchema() string { return wbprr.Schema } -var _ ReadProviderRecord = &ReadBitswapProviderRecord{} -var _ ProviderResponse = &ReadBitswapProviderRecord{} +var ( + _ ReadProviderRecord = &ReadBitswapProviderRecord{} + _ ProviderResponse = &ReadBitswapProviderRecord{} +) // ReadBitswapProviderRecord is a provider result with parameters for bitswap providers type ReadBitswapProviderRecord struct { diff --git a/routing/http/types/provider_unknown.go b/routing/http/types/provider_unknown.go index 3dadc0e9b..915cac481 100644 --- a/routing/http/types/provider_unknown.go +++ b/routing/http/types/provider_unknown.go @@ -6,9 +6,11 @@ import ( "github.com/ipfs/boxo/routing/http/internal/drjson" ) -var _ ReadProviderRecord = &UnknownProviderRecord{} -var _ WriteProviderRecord = &UnknownProviderRecord{} -var _ ProviderResponse = &UnknownProviderRecord{} +var ( + _ ReadProviderRecord = &UnknownProviderRecord{} + _ WriteProviderRecord = &UnknownProviderRecord{} + _ ProviderResponse = &UnknownProviderRecord{} +) // UnknownProviderRecord is used when we cannot parse the provider record using `GetProtocol` type UnknownProviderRecord struct { diff --git a/routing/http/types/time.go b/routing/http/types/time.go index 1d938807e..4b08f9bed 100644 --- a/routing/http/types/time.go +++ b/routing/http/types/time.go @@ -12,6 +12,7 @@ type Time struct{ time.Time } func (t *Time) MarshalJSON() ([]byte, error) { return drjson.MarshalJSONBytes(t.Time.UnixMilli()) } + func (t *Time) UnmarshalJSON(b []byte) error { var timestamp int64 err := json.Unmarshal(b, ×tamp) diff --git a/routing/mock/centralized_test.go b/routing/mock/centralized_test.go index 403e21350..983d6a23b 100644 --- a/routing/mock/centralized_test.go +++ b/routing/mock/centralized_test.go @@ -13,10 +13,9 @@ import ( ) func TestKeyNotFound(t *testing.T) { - - var pi = tnet.RandIdentityOrFatal(t) - var key = cid.NewCidV0(u.Hash([]byte("mock key"))) - var ctx = context.Background() + pi := tnet.RandIdentityOrFatal(t) + key := cid.NewCidV0(u.Hash([]byte("mock key"))) + ctx := context.Background() rs := NewServer() providers := rs.Client(pi).FindProvidersAsync(ctx, key, 10) diff --git a/routing/none/none_client.go b/routing/none/none_client.go index 6f400b54a..c8bcc1a3c 100644 --- a/routing/none/none_client.go +++ b/routing/none/none_client.go @@ -13,8 +13,7 @@ import ( "github.com/libp2p/go-libp2p/core/routing" ) -type nilclient struct { -} +type nilclient struct{} func (c *nilclient) PutValue(_ context.Context, _ string, _ []byte, _ ...routing.Option) error { return nil diff --git a/tar/extractor.go b/tar/extractor.go index b5377ddca..8b13b06db 100644 --- a/tar/extractor.go +++ b/tar/extractor.go @@ -10,9 +10,11 @@ import ( "strings" ) -var errTraverseSymlink = errors.New("cannot traverse symlinks") -var errInvalidRoot = errors.New("tar has invalid root") -var errInvalidRootMultipleRoots = fmt.Errorf("contains more than one root or the root directory is not the first entry : %w", errInvalidRoot) +var ( + errTraverseSymlink = errors.New("cannot traverse symlinks") + errInvalidRoot = errors.New("tar has invalid root") + errInvalidRootMultipleRoots = fmt.Errorf("contains more than one root or the root directory is not the first entry : %w", errInvalidRoot) +) // Extractor is used for extracting tar files to a filesystem. // @@ -252,7 +254,7 @@ func (te *Extractor) outputPath(basePlatformPath, relativeTarPath string) (strin var errExtractedDirToSymlink = errors.New("cannot extract to symlink") func (te *Extractor) extractDir(path string) error { - err := os.MkdirAll(path, 0755) + err := os.MkdirAll(path, 0o755) if err != nil { return err } diff --git a/tar/extractor_test.go b/tar/extractor_test.go index 717c65d19..d2b4e00fc 100644 --- a/tar/extractor_test.go +++ b/tar/extractor_test.go @@ -15,8 +15,10 @@ import ( "github.com/stretchr/testify/assert" ) -var symlinksEnabled bool -var symlinksEnabledErr error +var ( + symlinksEnabled bool + symlinksEnabledErr error +) func init() { // check if the platform supports symlinks @@ -320,7 +322,7 @@ func testTarExtraction(t *testing.T, setup func(t *testing.T, rootDir string), t rootDir, err := os.MkdirTemp("", "tar-extraction-test") assert.NoError(t, err) extractDir := fp.Join(rootDir, tarOutRoot) - err = os.MkdirAll(extractDir, 0755) + err = os.MkdirAll(extractDir, 0o755) assert.NoError(t, err) // Generated TAR file. @@ -376,9 +378,11 @@ type tarEntry interface { write(tw *tar.Writer) error } -var _ tarEntry = (*fileTarEntry)(nil) -var _ tarEntry = (*dirTarEntry)(nil) -var _ tarEntry = (*symlinkTarEntry)(nil) +var ( + _ tarEntry = (*fileTarEntry)(nil) + _ tarEntry = (*dirTarEntry)(nil) + _ tarEntry = (*symlinkTarEntry)(nil) +) type fileTarEntry struct { path string @@ -397,12 +401,13 @@ func (e *fileTarEntry) write(tw *tar.Writer) error { tw.Flush() return nil } + func writeFileHeader(w *tar.Writer, fpath string, size uint64) error { return w.WriteHeader(&tar.Header{ Name: fpath, Size: int64(size), Typeflag: tar.TypeReg, - Mode: 0644, + Mode: 0o644, ModTime: time.Now(), // TODO: set mode, dates, etc. when added to unixFS }) @@ -416,7 +421,7 @@ func (e *dirTarEntry) write(tw *tar.Writer) error { return tw.WriteHeader(&tar.Header{ Name: e.path, Typeflag: tar.TypeDir, - Mode: 0777, + Mode: 0o777, ModTime: time.Now(), // TODO: set mode, dates, etc. when added to unixFS }) @@ -431,7 +436,7 @@ func (e *symlinkTarEntry) write(w *tar.Writer) error { return w.WriteHeader(&tar.Header{ Name: e.path, Linkname: e.target, - Mode: 0777, + Mode: 0o777, Typeflag: tar.TypeSymlink, }) } diff --git a/tar/sanitize_windows.go b/tar/sanitize_windows.go index 4a788a484..a88a7055f 100644 --- a/tar/sanitize_windows.go +++ b/tar/sanitize_windows.go @@ -9,7 +9,7 @@ import ( // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx var reservedNames = [...]string{"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"} -const reservedCharsStr = `[<>:"\|?*]` + "\x00" //NOTE: `/` is not included as it is our standard path separator +const reservedCharsStr = `[<>:"\|?*]` + "\x00" // NOTE: `/` is not included as it is our standard path separator func isNullDevice(path string) bool { // This is a case insensitive comparison to NUL @@ -30,7 +30,7 @@ func isNullDevice(path string) bool { // validatePathComponent returns an error if the given path component is not allowed on the platform func validatePathComponent(c string) error { - //MSDN: Do not end a file or directory name with a space or a period + // MSDN: Do not end a file or directory name with a space or a period if strings.HasSuffix(c, ".") { return fmt.Errorf("invalid platform path: path components cannot end with '.' : %q", c) } diff --git a/tracing/file_exporter.go b/tracing/file_exporter.go index 32ca20ee2..e7fdfa99c 100644 --- a/tracing/file_exporter.go +++ b/tracing/file_exporter.go @@ -16,7 +16,7 @@ type fileExporter struct { } func newFileExporter(file string) (*fileExporter, error) { - f, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + f, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o666) if err != nil { return nil, fmt.Errorf("opening '%s' for OpenTelemetry file exporter: %w", file, err) } diff --git a/verifcid/allowlist.go b/verifcid/allowlist.go new file mode 100644 index 000000000..b572de3a6 --- /dev/null +++ b/verifcid/allowlist.go @@ -0,0 +1,69 @@ +package verifcid + +import ( + mh "github.com/multiformats/go-multihash" +) + +// DefaultAllowlist is the default list of hashes allowed in IPFS. +var DefaultAllowlist defaultAllowlist + +// Allowlist defines an interface containing list of allowed multihashes. +type Allowlist interface { + // IsAllowed checks for multihash allowance by the code. + IsAllowed(code uint64) bool +} + +// NewAllowlist constructs new [Allowlist] from the given map set. +func NewAllowlist(allowset map[uint64]bool) Allowlist { + return allowlist{allowset: allowset} +} + +// NewOverridingAllowlist is like [NewAllowlist] but it will fallback to an other [AllowList] if keys are missing. +// If override is nil it will return unsecure for unknown things. +func NewOverridingAllowlist(override Allowlist, allowset map[uint64]bool) Allowlist { + return allowlist{override, allowset} +} + +type allowlist struct { + override Allowlist + allowset map[uint64]bool +} + +func (al allowlist) IsAllowed(code uint64) bool { + if good, found := al.allowset[code]; found { + return good + } + + if al.override != nil { + return al.override.IsAllowed(code) + } + + return false +} + +type defaultAllowlist struct{} + +func (defaultAllowlist) IsAllowed(code uint64) bool { + switch code { + case mh.SHA2_256, mh.SHA2_512, + mh.SHAKE_256, + mh.DBL_SHA2_256, + mh.BLAKE3, + mh.IDENTITY, + + mh.SHA3_224, mh.SHA3_256, mh.SHA3_384, mh.SHA3_512, + mh.KECCAK_224, mh.KECCAK_256, mh.KECCAK_384, mh.KECCAK_512, + + mh.SHA1: // not really secure but still useful for git + return true + default: + if code >= mh.BLAKE2B_MIN+19 && code <= mh.BLAKE2B_MAX { + return true + } + if code >= mh.BLAKE2S_MIN+19 && code <= mh.BLAKE2S_MAX { + return true + } + + return false + } +} diff --git a/verifcid/validate_test.go b/verifcid/allowlist_test.go similarity index 69% rename from verifcid/validate_test.go rename to verifcid/allowlist_test.go index 5129b861a..ab8c415e1 100644 --- a/verifcid/validate_test.go +++ b/verifcid/allowlist_test.go @@ -5,10 +5,10 @@ import ( mh "github.com/multiformats/go-multihash" - cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-cid" ) -func TestValidateCids(t *testing.T) { +func TestDefaultAllowList(t *testing.T) { assertTrue := func(v bool) { t.Helper() if !v { @@ -21,17 +21,6 @@ func TestValidateCids(t *testing.T) { t.Fatal("expected failure") } } - - assertTrue(IsGoodHash(mh.SHA2_256)) - assertTrue(IsGoodHash(mh.BLAKE2B_MIN + 32)) - assertTrue(IsGoodHash(mh.DBL_SHA2_256)) - assertTrue(IsGoodHash(mh.KECCAK_256)) - assertTrue(IsGoodHash(mh.SHA3)) - - assertTrue(IsGoodHash(mh.SHA1)) - - assertFalse(IsGoodHash(mh.BLAKE2B_MIN + 5)) - mhcid := func(code uint64, length int) cid.Cid { mhash, err := mh.Sum([]byte{}, code, length) if err != nil { @@ -40,6 +29,15 @@ func TestValidateCids(t *testing.T) { return cid.NewCidV1(cid.DagCBOR, mhash) } + allowlist := DefaultAllowlist + assertTrue(allowlist.IsAllowed(mh.SHA2_256)) + assertTrue(allowlist.IsAllowed(mh.BLAKE2B_MIN + 32)) + assertTrue(allowlist.IsAllowed(mh.DBL_SHA2_256)) + assertTrue(allowlist.IsAllowed(mh.KECCAK_256)) + assertTrue(allowlist.IsAllowed(mh.SHA3)) + assertTrue(allowlist.IsAllowed(mh.SHA1)) + assertFalse(allowlist.IsAllowed(mh.BLAKE2B_MIN + 5)) + cases := []struct { cid cid.Cid err error @@ -53,9 +51,9 @@ func TestValidateCids(t *testing.T) { } for i, cas := range cases { - if ValidateCid(cas.cid) != cas.err { + if ValidateCid(allowlist, cas.cid) != cas.err { t.Errorf("wrong result in case of %s (index %d). Expected: %s, got %s", - cas.cid, i, cas.err, ValidateCid(cas.cid)) + cas.cid, i, cas.err, ValidateCid(DefaultAllowlist, cas.cid)) } } @@ -64,7 +62,7 @@ func TestValidateCids(t *testing.T) { if err != nil { t.Fatalf("failed to produce a multihash from the long blake3 hash: %v", err) } - if ValidateCid(cid.NewCidV1(cid.DagCBOR, longBlake3Mh)) != ErrAboveMaximumHashLength { + if ValidateCid(allowlist, cid.NewCidV1(cid.DagCBOR, longBlake3Mh)) != ErrAboveMaximumHashLength { t.Errorf("a CID that was longer than the maximum hash length did not error with ErrAboveMaximumHashLength") } } diff --git a/verifcid/cid.go b/verifcid/cid.go new file mode 100644 index 000000000..df8db8ddf --- /dev/null +++ b/verifcid/cid.go @@ -0,0 +1,37 @@ +package verifcid + +import ( + "fmt" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var ( + ErrPossiblyInsecureHashFunction = fmt.Errorf("potentially insecure hash functions not allowed") + ErrBelowMinimumHashLength = fmt.Errorf("hashes must be at least %d bytes long", minimumHashLength) + ErrAboveMaximumHashLength = fmt.Errorf("hashes must be at most %d bytes long", maximumHashLength) +) + +const ( + minimumHashLength = 20 + maximumHashLength = 128 +) + +// ValidateCid validates multihash allowance behind given CID. +func ValidateCid(allowlist Allowlist, c cid.Cid) error { + pref := c.Prefix() + if !allowlist.IsAllowed(pref.MhType) { + return ErrPossiblyInsecureHashFunction + } + + if pref.MhType != mh.IDENTITY && pref.MhLength < minimumHashLength { + return ErrBelowMinimumHashLength + } + + if pref.MhType != mh.IDENTITY && pref.MhLength > maximumHashLength { + return ErrAboveMaximumHashLength + } + + return nil +} diff --git a/verifcid/validate.go b/verifcid/validate.go deleted file mode 100644 index 7b27debc9..000000000 --- a/verifcid/validate.go +++ /dev/null @@ -1,69 +0,0 @@ -package verifcid - -import ( - "fmt" - - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -var ErrPossiblyInsecureHashFunction = fmt.Errorf("potentially insecure hash functions not allowed") -var ErrBelowMinimumHashLength = fmt.Errorf("hashes must be at least %d bytes long", minimumHashLength) -var ErrAboveMaximumHashLength = fmt.Errorf("hashes must be at most %d bytes long", maximumHashLength) - -const minimumHashLength = 20 -const maximumHashLength = 128 - -var goodset = map[uint64]bool{ - mh.SHA2_256: true, - mh.SHA2_512: true, - mh.SHA3_224: true, - mh.SHA3_256: true, - mh.SHA3_384: true, - mh.SHA3_512: true, - mh.SHAKE_256: true, - mh.DBL_SHA2_256: true, - mh.KECCAK_224: true, - mh.KECCAK_256: true, - mh.KECCAK_384: true, - mh.KECCAK_512: true, - mh.BLAKE3: true, - mh.IDENTITY: true, - - mh.SHA1: true, // not really secure but still useful -} - -func IsGoodHash(code uint64) bool { - good, found := goodset[code] - if good { - return true - } - - if !found { - if code >= mh.BLAKE2B_MIN+19 && code <= mh.BLAKE2B_MAX { - return true - } - if code >= mh.BLAKE2S_MIN+19 && code <= mh.BLAKE2S_MAX { - return true - } - } - - return false -} - -func ValidateCid(c cid.Cid) error { - pref := c.Prefix() - if !IsGoodHash(pref.MhType) { - return ErrPossiblyInsecureHashFunction - } - - if pref.MhType != mh.IDENTITY && pref.MhLength < minimumHashLength { - return ErrBelowMinimumHashLength - } - - if pref.MhType != mh.IDENTITY && pref.MhLength > maximumHashLength { - return ErrAboveMaximumHashLength - } - - return nil -}