diff --git a/.eslintrc.js b/.eslintrc.js index 6d06a615..b39fae9d 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -29,8 +29,9 @@ module.exports = { } ], '@typescript-eslint/indent': 'off', // This is the job of StandardJS, they are competing rules so we turn off the Typescript one. - 'node/no-unsupported-features/es-syntax': 'off', // Allows us to use Import and Export keywords. '@typescript-eslint/no-non-null-assertion': 'off', + '@typescript-eslint/member-delimiter-style': 'off', + 'node/no-unsupported-features/es-syntax': 'off', // Allows us to use Import and Export keywords. "@typescript-eslint/strict-boolean-expressions": [ "error", { @@ -42,6 +43,8 @@ module.exports = { 'no-mixed-operators': 'off', 'space-before-function-paren': 'off', 'comma-dangle': 'off', + // Allow to place comments before the else {} block + 'brace-style': 'off', indent: 'off' } } diff --git a/package.json b/package.json index 501e0cd9..ca75eb7b 100644 --- a/package.json +++ b/package.json @@ -44,8 +44,11 @@ "debug": "^4.3.1", "denque": "^1.5.0", "err-code": "^3.0.1", + "iso-random-stream": "^2.0.2", "it-pipe": "^1.1.0", - "libp2p-interfaces": "^4.0.4", + "libp2p-crypto": "^0.21.2", + "libp2p-interfaces": "4.0.4", + "multiformats": "^9.6.4", "peer-id": "^0.16.0", "protobufjs": "^6.11.2", "uint8arrays": "^3.0.0" @@ -75,10 +78,10 @@ "eslint-plugin-standard": "^4.0.1", "it-pair": "^1.0.0", "libp2p": "0.36.1", - "libp2p-floodsub": "^0.29.0", - "libp2p-interfaces-compliance-tests": "^4.0.6", - "libp2p-mplex": "^0.10.3", - "libp2p-websockets": "^0.16.1", + "libp2p-floodsub": "^0.29.1", + "libp2p-interfaces-compliance-tests": "^4.0.8", + "libp2p-mplex": "^0.10.7", + "libp2p-websockets": "^0.16.2", "lodash": "^4.17.15", "multiaddr": "^10.0.0", "os": "^0.1.1", diff --git a/test/2-nodes.spec.ts b/test/2-nodes.spec.ts index d321ecc6..3f3ffd9a 100644 --- a/test/2-nodes.spec.ts +++ b/test/2-nodes.spec.ts @@ -1,10 +1,12 @@ import chai from 'chai' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import delay from 'delay' -import Gossipsub, { multicodec } from '../ts' -import { createGossipsubs, createConnectedGossipsubs, expectSet, stopNode, first } from './utils' +import Gossipsub from '../ts' +import { createGossipsubs, createPubsubs, createConnectedGossipsubs, expectSet, stopNode, first } from './utils' import { RPC } from '../ts/message/rpc' -import { InMessage, PeerId } from 'libp2p-interfaces/src/pubsub' +import PubsubBaseProtocol, { PeerId } from 'libp2p-interfaces/src/pubsub' +import { FloodsubID, GossipsubIDv11 } from '../ts/constants' +import { GossipsubMessage } from '../ts/types' chai.use(require('dirty-chai')) chai.use(require('chai-spies')) @@ -13,6 +15,28 @@ const expect = chai.expect const shouldNotHappen = () => expect.fail() describe('2 nodes', () => { + describe('Pubsub dial', () => { + let nodes: PubsubBaseProtocol[] + + // Create pubsub nodes + before(async () => { + nodes = await createPubsubs({ number: 2 }) + }) + + after(() => Promise.all(nodes.map(stopNode))) + + it('Dial from nodeA to nodeB happened with pubsub', async () => { + await nodes[0]._libp2p.dialProtocol(nodes[1]._libp2p.peerId, FloodsubID) + + while (nodes[0]['peers'].size === 0 || nodes[1]['peers'].size === 0) { + await delay(10) + } + + expect(nodes[0]['peers'].size).to.be.eql(1) + expect(nodes[1]['peers'].size).to.be.eql(1) + }) + }) + describe('basics', () => { let nodes: Gossipsub[] = [] @@ -24,15 +48,14 @@ describe('2 nodes', () => { after(() => Promise.all(nodes.map(stopNode))) it('Dial from nodeA to nodeB happened with pubsub', async () => { - await nodes[0]._libp2p.dialProtocol(nodes[1]._libp2p.peerId, multicodec) - await delay(10) - await Promise.all([ - new Promise((resolve) => nodes[0].once('gossipsub:heartbeat', resolve)), - new Promise((resolve) => nodes[1].once('gossipsub:heartbeat', resolve)) - ]) + await nodes[0]._libp2p.dialProtocol(nodes[1]._libp2p.peerId, GossipsubIDv11) - expect(nodes[0].peers.size).to.be.eql(1) - expect(nodes[1].peers.size).to.be.eql(1) + while (nodes[0]['peers'].size === 0 || nodes[1]['peers'].size === 0) { + await delay(10) + } + + expect(nodes[0]['peers'].size).to.be.eql(1) + expect(nodes[1]['peers'].size).to.be.eql(1) }) }) @@ -47,7 +70,14 @@ describe('2 nodes', () => { after(() => Promise.all(nodes.map(stopNode))) it('Subscribe to a topic', async () => { - const topic = 'Z' + const topic = 'test_topic' + + // await subscription change, after calling subscribe + const subscriptionEventPromise = Promise.all([ + new Promise((resolve) => nodes[0].once('pubsub:subscription-change', (...args) => resolve(args))), + new Promise((resolve) => nodes[1].once('pubsub:subscription-change', (...args) => resolve(args))) + ]) + nodes[0].subscribe(topic) nodes[1].subscribe(topic) @@ -61,14 +91,14 @@ describe('2 nodes', () => { const [changedPeerId, changedSubs] = evt0 as [PeerId, RPC.ISubOpts[]] - expectSet(nodes[0].subscriptions, [topic]) - expectSet(nodes[1].subscriptions, [topic]) - expect(nodes[0].peers.size).to.equal(1) - expect(nodes[1].peers.size).to.equal(1) - expectSet(nodes[0].topics.get(topic), [nodes[1].peerId.toB58String()]) - expectSet(nodes[1].topics.get(topic), [nodes[0].peerId.toB58String()]) + expectSet(nodes[0]['subscriptions'], [topic]) + expectSet(nodes[1]['subscriptions'], [topic]) + expect(nodes[0]['peers'].size).to.equal(1) + expect(nodes[1]['peers'].size).to.equal(1) + expectSet(nodes[0]['topics'].get(topic), [nodes[1].peerId.toB58String()]) + expectSet(nodes[1]['topics'].get(topic), [nodes[0].peerId.toB58String()]) - expect(changedPeerId.toB58String()).to.equal(first(nodes[0].peers).id.toB58String()) + expect(changedPeerId.toB58String()).to.equal(first(nodes[0]['peers']).id.toB58String()) expect(changedSubs).to.have.lengthOf(1) expect(changedSubs[0].topicID).to.equal(topic) expect(changedSubs[0].subscribe).to.equal(true) @@ -79,8 +109,8 @@ describe('2 nodes', () => { new Promise((resolve) => nodes[1].once('gossipsub:heartbeat', resolve)) ]) - expect(first(nodes[0].mesh.get(topic))).to.equal(first(nodes[0].peers).id.toB58String()) - expect(first(nodes[1].mesh.get(topic))).to.equal(first(nodes[1].peers).id.toB58String()) + expect(first(nodes[0]['mesh'].get(topic))).to.equal(first(nodes[0]['peers']).id.toB58String()) + expect(first(nodes[1]['mesh'].get(topic))).to.equal(first(nodes[1]['peers']).id.toB58String()) }) }) @@ -109,7 +139,7 @@ describe('2 nodes', () => { afterEach(() => Promise.all(nodes.map(stopNode))) it('Publish to a topic - nodeA', async () => { - const promise = new Promise((resolve) => nodes[1].once(topic, resolve)) + const promise = new Promise((resolve) => nodes[1].once(topic, resolve)) nodes[0].once(topic, (m) => shouldNotHappen) nodes[0].publish(topic, uint8ArrayFromString('hey')) @@ -117,13 +147,13 @@ describe('2 nodes', () => { const msg = await promise expect(msg.data.toString()).to.equal('hey') - expect(msg.from).to.be.eql(nodes[0].peerId.toB58String()) + expect(msg.from).to.be.eql(nodes[0].peerId.toBytes()) nodes[0].removeListener(topic, shouldNotHappen) }) it('Publish to a topic - nodeB', async () => { - const promise = new Promise((resolve) => nodes[0].once(topic, resolve)) + const promise = new Promise((resolve) => nodes[0].once(topic, resolve)) nodes[1].once(topic, shouldNotHappen) nodes[1].publish(topic, uint8ArrayFromString('banana')) @@ -131,7 +161,7 @@ describe('2 nodes', () => { const msg = await promise expect(msg.data.toString()).to.equal('banana') - expect(msg.from).to.be.eql(nodes[1].peerId.toB58String()) + expect(msg.from).to.be.eql(nodes[1].peerId.toBytes()) nodes[1].removeListener(topic, shouldNotHappen) }) @@ -143,11 +173,11 @@ describe('2 nodes', () => { nodes[0].on(topic, receivedMsg) - function receivedMsg(msg: InMessage) { - expect(msg.data.toString().startsWith('banana')).to.be.true - expect(msg.from).to.be.eql(nodes[1].peerId.toB58String()) + function receivedMsg(msg: RPC.IMessage) { + expect(msg.data!.toString().startsWith('banana')).to.be.true + expect(msg.from).to.be.eql(nodes[1].peerId.toBytes()) expect(msg.seqno).to.be.a('Uint8Array') - expect(msg.topicIDs).to.be.eql([topic]) + expect(msg.topic).to.be.eql(topic) if (++counter === 10) { nodes[0].removeListener(topic, receivedMsg) @@ -168,7 +198,7 @@ describe('2 nodes', () => { // Create pubsub nodes beforeEach(async () => { - nodes = await createConnectedGossipsubs({ number: 2 }) + nodes = await createConnectedGossipsubs({ number: 2, options: {allowPublishToZeroPeers: true} }) }) // Create subscriptions @@ -188,16 +218,16 @@ describe('2 nodes', () => { it('Unsubscribe from a topic', async () => { nodes[0].unsubscribe(topic) - expect(nodes[0].subscriptions.size).to.equal(0) + expect(nodes[0]['subscriptions'].size).to.equal(0) const [changedPeerId, changedSubs] = await new Promise<[PeerId, RPC.ISubOpts[]]>((resolve) => { nodes[1].once('pubsub:subscription-change', (...args: [PeerId, RPC.ISubOpts[]]) => resolve(args)) }) await new Promise((resolve) => nodes[1].once('gossipsub:heartbeat', resolve)) - expect(nodes[1].peers.size).to.equal(1) - expectSet(nodes[1].topics.get(topic), []) - expect(changedPeerId.toB58String()).to.equal(first(nodes[1].peers).id.toB58String()) + expect(nodes[1]['peers'].size).to.equal(1) + expectSet(nodes[1]['topics'].get(topic), []) + expect(changedPeerId.toB58String()).to.equal(first(nodes[1]['peers']).id.toB58String()) expect(changedSubs).to.have.lengthOf(1) expect(changedSubs[0].topicID).to.equal(topic) expect(changedSubs[0].subscribe).to.equal(false) @@ -245,10 +275,10 @@ describe('2 nodes', () => { nodes[0].subscribe('Za') nodes[1].subscribe('Zb') - expect(nodes[0].peers.size).to.equal(0) - expectSet(nodes[0].subscriptions, ['Za']) - expect(nodes[1].peers.size).to.equal(0) - expectSet(nodes[1].subscriptions, ['Zb']) + expect(nodes[0]['peers'].size).to.equal(0) + expectSet(nodes[0]['subscriptions'], ['Za']) + expect(nodes[1]['peers'].size).to.equal(0) + expectSet(nodes[1]['subscriptions'], ['Zb']) }) after(() => Promise.all(nodes.map(stopNode))) @@ -257,20 +287,20 @@ describe('2 nodes', () => { this.timeout(5000) await Promise.all([ - nodes[0]._libp2p.dialProtocol(nodes[1]._libp2p.peerId, multicodec), + nodes[0]._libp2p.dialProtocol(nodes[1]._libp2p.peerId, GossipsubIDv11), new Promise((resolve) => nodes[0].once('pubsub:subscription-change', resolve)), new Promise((resolve) => nodes[1].once('pubsub:subscription-change', resolve)) ]) - expect(nodes[0].peers.size).to.equal(1) - expect(nodes[1].peers.size).to.equal(1) + expect(nodes[0]['peers'].size).to.equal(1) + expect(nodes[1]['peers'].size).to.equal(1) - expectSet(nodes[0].subscriptions, ['Za']) - expect(nodes[1].peers.size).to.equal(1) - expectSet(nodes[1].topics.get('Za'), [nodes[0].peerId.toB58String()]) + expectSet(nodes[0]['subscriptions'], ['Za']) + expect(nodes[1]['peers'].size).to.equal(1) + expectSet(nodes[1]['topics'].get('Za'), [nodes[0].peerId.toB58String()]) - expectSet(nodes[1].subscriptions, ['Zb']) - expect(nodes[0].peers.size).to.equal(1) - expectSet(nodes[0].topics.get('Zb'), [nodes[1].peerId.toB58String()]) + expectSet(nodes[1]['subscriptions'], ['Zb']) + expect(nodes[0]['peers'].size).to.equal(1) + expectSet(nodes[0]['topics'].get('Zb'), [nodes[1].peerId.toB58String()]) }) }) @@ -284,8 +314,8 @@ describe('2 nodes', () => { it("nodes don't have peers after stopped", async () => { await Promise.all(nodes.map(stopNode)) - expect(nodes[0].peers.size).to.equal(0) - expect(nodes[1].peers.size).to.equal(0) + expect(nodes[0]['peers'].size).to.equal(0) + expect(nodes[1]['peers'].size).to.equal(0) }) }) }) diff --git a/test/accept-from.spec.ts b/test/accept-from.spec.ts index 6944820e..a778114c 100644 --- a/test/accept-from.spec.ts +++ b/test/accept-from.spec.ts @@ -1,7 +1,8 @@ import { expect } from 'chai' -import { Libp2p } from 'libp2p-interfaces/src/pubsub' +import Libp2p from 'libp2p' import sinon from 'sinon' import Gossipsub from '../ts' +import { createPeerId } from './utils' import { fastMsgIdFn } from './utils/msgId' describe('Gossipsub acceptFrom', () => { @@ -11,13 +12,17 @@ describe('Gossipsub acceptFrom', () => { beforeEach(async () => { sandbox = sinon.createSandbox() - sandbox.useFakeTimers(Date.now()) - gossipsub = new Gossipsub({} as Libp2p, { emitSelf: false, fastMsgIdFn }) + // not able to use fake timers or tests in browser are suspended + // sandbox.useFakeTimers(Date.now()) + + const peerId = await createPeerId() + gossipsub = new Gossipsub({ peerId } as Libp2p, { emitSelf: false, fastMsgIdFn }) + // stubbing PeerScore causes some pending issue in firefox browser environment // we can only spy it // using scoreSpy.withArgs("peerA").calledOnce causes the pending issue in firefox // while spy.getCall() is fine - scoreSpy = sandbox.spy(gossipsub.score, 'score') + scoreSpy = sandbox.spy(gossipsub['score'], 'score') }) afterEach(() => { @@ -26,50 +31,50 @@ describe('Gossipsub acceptFrom', () => { it('should only white list peer with positive score', () => { // by default the score is 0 - gossipsub._acceptFrom('peerA') + gossipsub['acceptFrom']('peerA') // 1st time, we have to compute score expect(scoreSpy.getCall(0).args[0]).to.be.equal('peerA') expect(scoreSpy.getCall(0).returnValue).to.be.equal(0) - expect(scoreSpy.getCall(1)).to.be.undefined + expect(scoreSpy.getCall(1)).to.not.be.ok // 2nd time, use a cached score since it's white listed - gossipsub._acceptFrom('peerA') - expect(scoreSpy.getCall(1)).to.be.undefined + gossipsub['acceptFrom']('peerA') + expect(scoreSpy.getCall(1)).to.not.be.ok }) - it('should recompute score after 1s', () => { + it('should recompute score after 1s', async () => { // by default the score is 0 - gossipsub._acceptFrom('peerA') + gossipsub['acceptFrom']('peerA') // 1st time, we have to compute score expect(scoreSpy.getCall(0).args[0]).to.be.equal('peerA') - expect(scoreSpy.getCall(1)).to.be.undefined - gossipsub._acceptFrom('peerA') + expect(scoreSpy.getCall(1)).to.not.be.ok + gossipsub['acceptFrom']('peerA') // score is cached - expect(scoreSpy.getCall(1)).to.be.undefined + expect(scoreSpy.getCall(1)).to.not.be.ok // after 1s - sandbox.clock.tick(1001) + await new Promise((resolve) => setTimeout(resolve, 1001)) - gossipsub._acceptFrom('peerA') + gossipsub['acceptFrom']('peerA') expect(scoreSpy.getCall(1).args[0]).to.be.equal('peerA') - expect(scoreSpy.getCall(2)).to.be.undefined + expect(scoreSpy.getCall(2)).to.not.be.ok }) it('should recompute score after max messages accepted', () => { // by default the score is 0 - gossipsub._acceptFrom('peerA') + gossipsub['acceptFrom']('peerA') // 1st time, we have to compute score expect(scoreSpy.getCall(0).args[0]).to.be.equal('peerA') - expect(scoreSpy.getCall(1)).to.be.undefined + expect(scoreSpy.getCall(1)).to.not.be.ok for (let i = 0; i < 128; i++) { - gossipsub._acceptFrom('peerA') + gossipsub['acceptFrom']('peerA') } - expect(scoreSpy.getCall(1)).to.be.undefined + expect(scoreSpy.getCall(1)).to.not.be.ok // max messages reached - gossipsub._acceptFrom('peerA') + gossipsub['acceptFrom']('peerA') expect(scoreSpy.getCall(1).args[0]).to.be.equal('peerA') - expect(scoreSpy.getCall(2)).to.be.undefined + expect(scoreSpy.getCall(2)).to.not.be.ok }) // TODO: run this in a unit test setup @@ -77,11 +82,11 @@ describe('Gossipsub acceptFrom', () => { // it.skip('should NOT white list peer with negative score', () => { // // peerB is not white listed since score is negative // scoreStub.score.withArgs('peerB').returns(-1) - // gossipsub._acceptFrom('peerB') + // gossipsub["acceptFrom"]('peerB') // // 1st time, we have to compute score // expect(scoreStub.score.withArgs('peerB').calledOnce).to.be.true // // 2nd time, still have to compute score since it's NOT white listed - // gossipsub._acceptFrom('peerB') + // gossipsub["acceptFrom"]('peerB') // expect(scoreStub.score.withArgs('peerB').calledTwice).to.be.true // }) }) diff --git a/test/compliance.spec.ts b/test/compliance.spec.ts index 2088bb9b..265efa4e 100644 --- a/test/compliance.spec.ts +++ b/test/compliance.spec.ts @@ -19,6 +19,9 @@ describe('interface compliance', function () { // we don't want to cache anything, spec test sends duplicate messages and expect // peer to receive all. seenTTL: -1, + // libp2p-interfaces-compliance-tests in test 'can subscribe and unsubscribe correctly' publishes to no peers + // Disable check to allow passing tests + allowPublishToZeroPeers: true, ...options }) @@ -29,6 +32,7 @@ describe('interface compliance', function () { return pubsubNodes }, + async teardown() { await Promise.all(pubsubNodes.map((ps) => ps.stop())) if (peers) { @@ -38,4 +42,25 @@ describe('interface compliance', function () { pubsubNodes = [] } }) + + // As of Mar 15 2022 only 4/29 tests are failing due to: + // - 1. Tests want to stub internal methods like `_emitMessage` that are not spec and not in this Gossipsub version + // - 2. Old protobuf RPC.Message version where + skipIds( + this, + new Set([ + 'should emit normalized signed messages on publish', + 'should drop unsigned messages', + 'should not drop unsigned messages if strict signing is disabled', + 'Publish 10 msg to a topic in nodeB' + ]) + ) }) + +function skipIds(suite: Mocha.Suite, ids: Set): void { + suite.tests = suite.tests.filter((test) => !ids.has(test.title)) + + for (const suiteChild of suite.suites) { + skipIds(suiteChild, ids) + } +} diff --git a/test/fixtures/peers.js b/test/fixtures/peers.js index b063fbfc..7cd12a40 100644 --- a/test/fixtures/peers.js +++ b/test/fixtures/peers.js @@ -3,7 +3,6 @@ /** * These peer id / keypairs are used across tests to seed peers */ - module.exports = [ { id: 'QmNMMAqSxPetRS1cVMmutW5BCN1qQQyEr4u98kUvZjcfEw', diff --git a/test/floodsub.spec.ts b/test/floodsub.spec.ts index 1da5e6bc..4943d47c 100644 --- a/test/floodsub.spec.ts +++ b/test/floodsub.spec.ts @@ -6,7 +6,7 @@ import FloodSub from 'libp2p-floodsub' import Gossipsub from '../ts' import { createPeer, createFloodsubNode, expectSet, first, startNode, stopNode } from './utils' import { RPC } from '../ts/message/rpc' -import { InMessage } from 'libp2p-interfaces/src/pubsub' +import { GossipsubMessage } from '../ts/types' const expect = chai.expect chai.use(require('dirty-chai')) @@ -31,7 +31,7 @@ describe('gossipsub fallbacks to floodsub', () => { it('Dial event happened from nodeGs to nodeFs', async () => { await nodeGs._libp2p.dialProtocol(nodeFs._libp2p.peerId, nodeGs.multicodecs) - expect(nodeGs.peers.size).to.equal(1) + expect(nodeGs['peers'].size).to.equal(1) expect(nodeFs.peers.size).to.equal(1) }) }) @@ -58,7 +58,7 @@ describe('gossipsub fallbacks to floodsub', () => { await nodeGs._libp2p.dialProtocol(nodeFs._libp2p.peerId, nodeGs.multicodecs) expect.fail('Dial should not have succeed') } catch (err) { - expect(err.code).to.be.equal('ERR_UNSUPPORTED_PROTOCOL') + expect((err as { code: string }).code).to.be.equal('ERR_UNSUPPORTED_PROTOCOL') } }) }) @@ -93,14 +93,14 @@ describe('gossipsub fallbacks to floodsub', () => { }) await delay(1000) - expectSet(nodeGs.subscriptions, [topic]) + expectSet(nodeGs['subscriptions'], [topic]) expectSet(nodeFs.subscriptions, [topic]) - expect(nodeGs.peers.size).to.equal(1) + expect(nodeGs['peers'].size).to.equal(1) expect(nodeFs.peers.size).to.equal(1) - expectSet(nodeGs.topics.get(topic), [nodeFs.peerId.toB58String()]) + expectSet(nodeGs['topics'].get(topic), [nodeFs.peerId.toB58String()]) expectSet(nodeFs.topics.get(topic), [nodeGs.peerId.toB58String()]) - expect(changedPeerId.toB58String()).to.equal(first(nodeGs.peers).id.toB58String()) + expect(changedPeerId.toB58String()).to.equal(first(nodeGs['peers']).id.toB58String()) expect(changedSubs).to.have.lengthOf(1) expect(changedSubs[0].topicID).to.equal(topic) expect(changedSubs[0].subscribe).to.equal(true) @@ -136,7 +136,7 @@ describe('gossipsub fallbacks to floodsub', () => { }) it('Publish to a topic - nodeGs', async () => { - const promise = new Promise((resolve) => nodeFs.once(topic, resolve)) + const promise = new Promise((resolve) => nodeFs.once(topic, resolve)) nodeGs.publish(topic, uint8ArrayFromString('hey')) @@ -146,14 +146,14 @@ describe('gossipsub fallbacks to floodsub', () => { }) it('Publish to a topic - nodeFs', async () => { - const promise = new Promise((resolve) => nodeGs.once(topic, resolve)) + const promise = new Promise((resolve) => nodeGs.once(topic, resolve)) nodeFs.publish(topic, uint8ArrayFromString('banana')) const msg = await promise expect(msg.data.toString()).to.equal('banana') - expect(msg.from).to.be.eql(nodeFs.peerId.toB58String()) + expect(msg.from).to.be.eql(nodeFs.peerId.toBytes()) }) }) @@ -189,7 +189,7 @@ describe('gossipsub fallbacks to floodsub', () => { it('Unsubscribe from a topic', async () => { nodeGs.unsubscribe(topic) - expect(nodeGs.subscriptions.size).to.equal(0) + expect(nodeGs['subscriptions'].size).to.equal(0) const [changedPeerId, changedSubs] = await new Promise<[PeerId, RPC.ISubOpts[]]>((resolve) => { nodeFs.once('pubsub:subscription-change', (...args: [PeerId, RPC.ISubOpts[]]) => resolve(args)) diff --git a/test/go-gossipsub.ts b/test/go-gossipsub.ts index 335be350..26e63e2e 100644 --- a/test/go-gossipsub.ts +++ b/test/go-gossipsub.ts @@ -1,6 +1,5 @@ import chai from 'chai' import delay from 'delay' -import errcode from 'err-code' import sinon from 'sinon' import pRetry from 'p-retry' import { EventEmitter } from 'events' @@ -11,6 +10,7 @@ import { IRPC, RPC } from '../ts/message/rpc' import { TopicScoreParams } from '../ts/score' import Floodsub from 'libp2p-floodsub' import Gossipsub from '../ts' +import { MessageAcceptance } from '../ts/types' import * as constants from '../ts/constants' import { GossipsubD } from '../ts/constants' import { @@ -23,7 +23,8 @@ import { expectSet, fastMsgIdFn, tearDownGossipsubs, - createPeers + createPeers, + PubsubBaseMinimal } from './utils' import PeerId from 'peer-id' @@ -45,7 +46,7 @@ const checkReceivedSubscription = (psub: Gossipsub, peerIdStr: string, topic: st if (peerId.toB58String() === peerIdStr && subs[0].topicID === topic && subs[0].subscribe === true) { clearTimeout(t) psub.off(event, cb) - if (Array.from(psub.topics.get(topic) || []).includes(peerIdStr)) { + if (Array.from(psub['topics'].get(topic) || []).includes(peerIdStr)) { resolve() } else { reject(Error('topics should include the peerId')) @@ -59,9 +60,9 @@ const checkReceivedSubscriptions = async (psub: Gossipsub, peerIdStrs: string[], const recvPeerIdStrs = peerIdStrs.filter((peerIdStr) => peerIdStr !== psub.peerId.toB58String()) const promises = recvPeerIdStrs.map((peerIdStr, idx) => checkReceivedSubscription(psub, peerIdStr, topic, idx)) await Promise.all(promises) - expect(Array.from(psub.topics.get(topic) || []).sort()).to.be.deep.equal(recvPeerIdStrs.sort()) + expect(Array.from(psub['topics'].get(topic) || []).sort()).to.be.deep.equal(recvPeerIdStrs.sort()) recvPeerIdStrs.forEach((peerIdStr) => { - const peerStream = psub.peers.get(peerIdStr) + const peerStream = psub['peers'].get(peerIdStr) expect(peerStream && peerStream.isWritable, "no peerstream or peerstream is not writable").to.be.true }) } @@ -335,12 +336,12 @@ describe('go-libp2p-pubsub gossipsub tests', function () { } await Promise.all(sendRecv) - expect(psubs[0].fanout.size).to.be.gt(0) + expect(psubs[0]['fanout'].size).to.be.gt(0) - // wait for TTL to expore fanout peers in owner - await delay(2000) + // wait for heartbeats to expire fanout peers + await Promise.all(psubs.map((ps) => awaitEvents(ps, 'gossipsub:heartbeat', 2))) - expect(psubs[0].fanout.size).to.be.eql(0) + expect(psubs[0]['fanout'].size, 'should have no fanout peers after not publishing for a while').to.be.eql(0) await tearDownGossipsubs(psubs) }) it('test gossipsub gossip', async function () { @@ -433,11 +434,8 @@ describe('go-libp2p-pubsub gossipsub tests', function () { }) ) ) - try { - await results - } catch (e) { - expect.fail(e) - } + + await results await tearDownGossipsubs(psubs) }) @@ -661,7 +659,7 @@ describe('go-libp2p-pubsub gossipsub tests', function () { // Publish 100 messages, each from a random node // Assert that the subscribed nodes receive every message const libp2ps = await createPeers({ number: 30 }) - const gsubs: PubsubBaseProtocol[] = libp2ps.slice(0, 20).map((libp2p) => { + const gsubs: PubsubBaseMinimal[] = libp2ps.slice(0, 20).map((libp2p) => { return new Gossipsub(libp2p, { scoreParams: { IPColocationFactorThreshold: 20 }, fastMsgIdFn }) }) const fsubs = libp2ps.slice(20).map((libp2p) => { @@ -687,7 +685,7 @@ describe('go-libp2p-pubsub gossipsub tests', function () { const results = Promise.all( psubs.filter((psub, j) => j !== owner).map(checkReceivedMessage(topic, msg, owner, i)) ) - sendRecv.push(psubs[owner].publish(topic, msg)) + sendRecv.push((psubs[owner] as PubsubBaseProtocol).publish(topic, msg)) sendRecv.push(results) } await Promise.all(sendRecv) @@ -793,13 +791,13 @@ describe('go-libp2p-pubsub gossipsub tests', function () { await Promise.all(psubs.map((ps) => awaitEvents(ps, 'gossipsub:heartbeat', 2))) await Promise.all(subscriptionPromises) - expectSet(new Set(psubs[0].peers.keys()), [psubs[1].peerId.toB58String(), psubs[5].peerId.toB58String()]) - expectSet(new Set(psubs[1].peers.keys()), [ + expectSet(new Set(psubs[0]['peers'].keys()), [psubs[1].peerId.toB58String(), psubs[5].peerId.toB58String()]) + expectSet(new Set(psubs[1]['peers'].keys()), [ psubs[0].peerId.toB58String(), psubs[2].peerId.toB58String(), psubs[4].peerId.toB58String() ]) - expectSet(new Set(psubs[2].peers.keys()), [psubs[1].peerId.toB58String(), psubs[3].peerId.toB58String()]) + expectSet(new Set(psubs[2]['peers'].keys()), [psubs[1].peerId.toB58String(), psubs[3].peerId.toB58String()]) let sendRecv = [] for (const owner of [9, 3]) { @@ -836,10 +834,10 @@ describe('go-libp2p-pubsub gossipsub tests', function () { }) // configure the center of the star with very low D - psubs[0]._options.D = 0 - psubs[0]._options.Dhi = 0 - psubs[0]._options.Dlo = 0 - psubs[0]._options.Dscore = 0 + psubs[0].opts.D = 0 + psubs[0].opts.Dhi = 0 + psubs[0].opts.Dlo = 0 + psubs[0].opts.Dscore = 0 // build the star await psubs.slice(1).map((ps) => psubs[0]._libp2p.dialProtocol(ps._libp2p.peerId, ps.multicodecs)) @@ -1092,13 +1090,10 @@ describe('go-libp2p-pubsub gossipsub tests', function () { await psubs[1]._libp2p.dialProtocol(psubs[2].peerId, multicodecs) await psubs[0]._libp2p.dialProtocol(psubs[2].peerId, multicodecs) - psubs[0].topicValidators.set(topic, async (topic, m) => { - if (m.receivedFrom === psubs[1].peerId.toB58String()) { - throw errcode(new Error(), constants.ERR_TOPIC_VALIDATOR_IGNORE) - } - if (m.receivedFrom === psubs[2].peerId.toB58String()) { - throw errcode(new Error(), constants.ERR_TOPIC_VALIDATOR_REJECT) - } + psubs[0]['topicValidators'].set(topic, async (topic, m, propagationSource) => { + if (propagationSource.equals(psubs[1].peerId)) return MessageAcceptance.Ignore + if (propagationSource.equals(psubs[2].peerId)) return MessageAcceptance.Reject + throw Error('Unknown PeerId') }) psubs[0].subscribe(topic) @@ -1114,8 +1109,8 @@ describe('go-libp2p-pubsub gossipsub tests', function () { await Promise.all(psubs.map((ps) => awaitEvents(ps, 'gossipsub:heartbeat', 2))) - expect(psubs[0].score.score(psubs[1].peerId.toB58String())).to.be.eql(0) - expect(psubs[0].score.score(psubs[2].peerId.toB58String())).to.be.lt(0) + expect(psubs[0]['score'].score(psubs[1].peerId.toB58String())).to.be.eql(0) + expect(psubs[0]['score'].score(psubs[2].peerId.toB58String())).to.be.lt(0) await tearDownGossipsubs(psubs) }) @@ -1128,11 +1123,11 @@ describe('go-libp2p-pubsub gossipsub tests', function () { const test1 = 'test1' const test2 = 'test2' const test3 = 'test3' - psub.mesh.set(test1, new Set([otherId])) - psub.mesh.set(test2, new Set()) + psub['mesh'].set(test1, new Set([otherId])) + psub['mesh'].set(test2, new Set()) const rpc: IRPC = {} - psub._piggybackControl(otherId, rpc, { + psub['piggybackControl'](otherId, rpc, { graft: [{ topicID: test1 }, { topicID: test2 }, { topicID: test3 }], prune: [{ topicID: test1 }, { topicID: test2 }, { topicID: test3 }] }) @@ -1192,12 +1187,12 @@ describe('go-libp2p-pubsub gossipsub tests', function () { const real = psubs.slice(0, 6) const sybils = psubs.slice(6) + const connectPromises = real.map((psub) => awaitEvents(psub._libp2p.connectionManager, 'peer:connect', 3)) await connectSome(real, 5) + await Promise.all(connectPromises) sybils.forEach((s) => { - s._processRpc = async function () { - return true - } + s['handleReceivedRpc'] = async function () {} }) for (let i = 0; i < sybils.length; i++) { @@ -1207,12 +1202,17 @@ describe('go-libp2p-pubsub gossipsub tests', function () { } await Promise.all(psubs.map((ps) => awaitEvents(ps, 'gossipsub:heartbeat', 1))) - + const realPeerIdStrs = real.map((psub) => psub.peerId.toB58String()) + const subscriptionPromises = real.map((psub) => { + const waitingPeerIdStrs = Array.from(psub['peers'].keys()).filter((peerIdStr) => realPeerIdStrs.includes(peerIdStr)) + return checkReceivedSubscriptions(psub, waitingPeerIdStrs, topic) + }) psubs.forEach((ps) => ps.subscribe(topic)) + await Promise.all(subscriptionPromises) for (let i = 0; i < 300; i++) { const msg = uint8ArrayFromString(`${i} its not a flooooood ${i}`) - const owner = i % 10 + const owner = i % real.length await psubs[owner].publish(topic, msg) await delay(20) } @@ -1228,7 +1228,7 @@ describe('go-libp2p-pubsub gossipsub tests', function () { () => new Promise((resolve, reject) => { real.forEach(async (r, i) => { - const meshPeers = r.mesh.get(topic) + const meshPeers = r['mesh'].get(topic) let count = 0 realPeerIds.forEach((p) => { if (meshPeers!.has(p)) { diff --git a/test/gossip-incoming.spec.ts b/test/gossip-incoming.spec.ts index a7824cb9..1ab1f19a 100644 --- a/test/gossip-incoming.spec.ts +++ b/test/gossip-incoming.spec.ts @@ -2,9 +2,9 @@ import chai from 'chai' import delay from 'delay' -import { InMessage } from 'libp2p-interfaces/src/pubsub' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import Gossipsub from '../ts' +import { GossipsubMessage } from '../ts/types' import { createConnectedGossipsubs, stopNode } from './utils' const expect = chai.expect @@ -40,7 +40,7 @@ describe('gossip incoming', () => { after(() => Promise.all(nodes.map(stopNode))) it('should gossip incoming messages', async () => { - const promise = new Promise((resolve) => nodes[2].once(topic, resolve)) + const promise = new Promise((resolve) => nodes[2].once(topic, resolve)) nodes[0].once(topic, (m) => shouldNotHappen) nodes[0].publish(topic, uint8ArrayFromString('hey')) @@ -48,7 +48,7 @@ describe('gossip incoming', () => { const msg = await promise expect(msg.data.toString()).to.equal('hey') - expect(msg.from).to.be.eql(nodes[0].peerId.toB58String()) + expect(msg.from).to.be.eql(nodes[0].peerId.toBytes()) nodes[0].removeListener(topic, shouldNotHappen) }) diff --git a/test/gossip.spec.ts b/test/gossip.spec.ts index 13399dcb..8f1c7708 100644 --- a/test/gossip.spec.ts +++ b/test/gossip.spec.ts @@ -1,8 +1,5 @@ -'use strict' -/* eslint-env mocha */ - import { expect } from 'chai' -import sinon from 'sinon' +import sinon, { SinonStubbedInstance } from 'sinon' import delay from 'delay' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { GossipsubDhi } from '../ts/constants' @@ -36,24 +33,28 @@ describe('gossip', () => { // await mesh rebalancing await Promise.all(nodes.map((n) => new Promise((resolve) => n.once('gossipsub:heartbeat', resolve)))) await delay(500) - // set spy - sinon.spy(nodeA, '_pushGossip') + + // set spy. NOTE: Forcing private property to be public + const nodeASpy = nodeA as Partial as SinonStubbedInstance<{ + pushGossip: Gossipsub['pushGossip'] + }> + sinon.spy(nodeASpy, 'pushGossip') await nodeA.publish(topic, uint8ArrayFromString('hey')) await new Promise((resolve) => nodeA.once('gossipsub:heartbeat', resolve)) - nodeA._pushGossip + nodeASpy.pushGossip .getCalls() .map((call) => call.args[0]) .forEach((peerId) => { - nodeA.mesh.get(topic)!.forEach((meshPeerId) => { + nodeA['mesh'].get(topic)!.forEach((meshPeerId) => { expect(meshPeerId).to.not.equal(peerId) }) }) // unset spy - nodeA._pushGossip.restore() + nodeASpy.pushGossip.restore() }) it('should send piggyback control into other sent messages', async function () { @@ -72,24 +73,27 @@ describe('gossip', () => { await Promise.all(nodes.map((n) => new Promise((resolve) => n.once('gossipsub:heartbeat', resolve)))) await delay(500) - const peerB = first(nodeA.mesh.get(topic)) + const peerB = first(nodeA['mesh'].get(topic)) const nodeB = nodes.find((n) => n.peerId.toB58String() === peerB) - // set spy - sinon.spy(nodeA, '_piggybackControl') + // set spy. NOTE: Forcing private property to be public + const nodeASpy = nodeA as Partial as SinonStubbedInstance<{ + piggybackControl: Gossipsub['piggybackGossip'] + }> + sinon.spy(nodeASpy, 'piggybackControl') // manually add control message to be sent to peerB const graft = { graft: [{ topicID: topic }] } - nodeA.control.set(peerB, graft) + nodeA['control'].set(peerB, graft) await nodeA.publish(topic, uint8ArrayFromString('hey')) - expect(nodeA._piggybackControl.callCount).to.be.equal(1) + expect(nodeASpy.piggybackControl.callCount).to.be.equal(1) // expect control message to be sent alongside published message - const call = nodeA._piggybackControl.getCalls()[0] - expect(call.args[2].graft).to.deep.equal(graft.graft) + const call = nodeASpy.piggybackControl.getCalls()[0] + expect(call.args[1].control!.graft).to.deep.equal(graft.graft) // unset spy - nodeA._piggybackControl.restore() + nodeASpy.piggybackControl.restore() }) }) diff --git a/test/heartbeat.spec.ts b/test/heartbeat.spec.ts index 5c2489d9..e8969c47 100644 --- a/test/heartbeat.spec.ts +++ b/test/heartbeat.spec.ts @@ -1,6 +1,3 @@ -'use strict' -/* eslint-env mocha */ - import { expect } from 'chai' import Gossipsub from '../ts' import { GossipsubHeartbeatInterval } from '../ts/constants' @@ -17,12 +14,12 @@ describe('heartbeat', () => { after(() => stopNode(gossipsub)) it('should occur with regularity defined by a constant', async function () { - this.timeout(3000) + this.timeout(GossipsubHeartbeatInterval * 5) await new Promise((resolve) => gossipsub.once('gossipsub:heartbeat', resolve)) const t1 = Date.now() await new Promise((resolve) => gossipsub.once('gossipsub:heartbeat', resolve)) const t2 = Date.now() - const safeDelta = 100 // ms - expect(t2 - t1).to.be.lt(GossipsubHeartbeatInterval + safeDelta) + const safeFactor = 1.5 + expect(t2 - t1).to.be.lt(GossipsubHeartbeatInterval * safeFactor) }) }) diff --git a/test/mesh.spec.ts b/test/mesh.spec.ts index 0f505f18..1f6b4483 100644 --- a/test/mesh.spec.ts +++ b/test/mesh.spec.ts @@ -1,6 +1,3 @@ -'use strict' -/* eslint-env mocha */ - import { expect } from 'chai' import delay from 'delay' import Gossipsub from '../ts' @@ -38,7 +35,7 @@ describe('mesh overlay', () => { // await mesh rebalancing await new Promise((resolve) => node0.once('gossipsub:heartbeat', resolve)) - expect(node0.mesh.get(topic)!.size).to.equal(N) + expect(node0['mesh'].get(topic)!.size).to.equal(N) }) it('should remove mesh peers once above threshold', async function () { @@ -55,6 +52,6 @@ describe('mesh overlay', () => { await delay(500) // await mesh rebalancing await new Promise((resolve) => node0.once('gossipsub:heartbeat', resolve)) - expect(node0.mesh.get(topic)!.size).to.be.lte(GossipsubDhi) + expect(node0['mesh'].get(topic)!.size).to.be.lte(GossipsubDhi) }) }) diff --git a/test/message-cache.spec.ts b/test/message-cache.spec.ts index b069a283..54ab454d 100644 --- a/test/message-cache.spec.ts +++ b/test/message-cache.spec.ts @@ -6,8 +6,9 @@ import chaiSpies from 'chai-spies' import { messageIdToString } from '../ts/utils/messageIdToString' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { MessageCache } from '../ts/message-cache' -import { InMessage, utils } from 'libp2p-interfaces/src/pubsub' +import { utils } from 'libp2p-interfaces/src/pubsub' import { getMsgId } from './utils' +import { GossipsubMessage } from '../ts/types' /* eslint-disable no-unused-expressions */ @@ -17,16 +18,15 @@ const expect = chai.expect describe('Testing Message Cache Operations', () => { const messageCache = new MessageCache(3, 5) - const testMessages: InMessage[] = [] + const testMessages: GossipsubMessage[] = [] before(async () => { - const makeTestMessage = (n: number): InMessage => { + const makeTestMessage = (n: number): GossipsubMessage => { return { - receivedFrom: '', - from: 'test', + from: new Uint8Array(0), data: uint8ArrayFromString(n.toString()), seqno: utils.randomSeqno(), - topicIDs: ['test'] + topic: 'test' } } @@ -35,7 +35,7 @@ describe('Testing Message Cache Operations', () => { } for (let i = 0; i < 10; i++) { - await messageCache.put(testMessages[i], messageIdToString(getMsgId(testMessages[i]))) + messageCache.put(messageIdToString(getMsgId(testMessages[i])), testMessages[i]) } }) @@ -60,7 +60,7 @@ describe('Testing Message Cache Operations', () => { it('Shift message cache', async () => { messageCache.shift() for (let i = 10; i < 20; i++) { - await messageCache.put(testMessages[i], messageIdToString(getMsgId(testMessages[i]))) + messageCache.put(messageIdToString(getMsgId(testMessages[i])), testMessages[i]) } for (let i = 0; i < 20; i++) { @@ -84,22 +84,22 @@ describe('Testing Message Cache Operations', () => { messageCache.shift() for (let i = 20; i < 30; i++) { - await messageCache.put(testMessages[i], messageIdToString(getMsgId(testMessages[i]))) + messageCache.put(messageIdToString(getMsgId(testMessages[i])), testMessages[i]) } messageCache.shift() for (let i = 30; i < 40; i++) { - await messageCache.put(testMessages[i], messageIdToString(getMsgId(testMessages[i]))) + messageCache.put(messageIdToString(getMsgId(testMessages[i])), testMessages[i]) } messageCache.shift() for (let i = 40; i < 50; i++) { - await messageCache.put(testMessages[i], messageIdToString(getMsgId(testMessages[i]))) + messageCache.put(messageIdToString(getMsgId(testMessages[i])), testMessages[i]) } messageCache.shift() for (let i = 50; i < 60; i++) { - await messageCache.put(testMessages[i], messageIdToString(getMsgId(testMessages[i]))) + messageCache.put(messageIdToString(getMsgId(testMessages[i])), testMessages[i]) } expect(messageCache.msgs.size).to.equal(50) diff --git a/test/peer-score.spec.ts b/test/peer-score.spec.ts index 03405ce2..a4fd936b 100644 --- a/test/peer-score.spec.ts +++ b/test/peer-score.spec.ts @@ -3,15 +3,18 @@ import { expect } from 'chai' import PeerId from 'peer-id' import delay from 'delay' import ConnectionManager from 'libp2p/src/connection-manager' -import { InMessage } from 'libp2p-interfaces/src/pubsub' import { PeerScore, createPeerScoreParams, createTopicScoreParams, TopicScoreParams } from '../ts/score' import * as computeScoreModule from '../ts/score/compute-score' -import { ERR_TOPIC_VALIDATOR_IGNORE, ERR_TOPIC_VALIDATOR_REJECT } from '../ts/constants' -import { makeTestMessage, getMsgId, getMsgIdStr } from './utils' +import { getMsgIdStr, makeTestMessage } from './utils' +import { RejectReason } from '../ts/types' +import { ScorePenalty } from '../ts/metrics' const connectionManager = new Map() as unknown as ConnectionManager connectionManager.getAll = () => [] +/** Placeholder for some ScorePenalty value, only used for metrics */ +const scorePenaltyAny = ScorePenalty.BrokenPromise + describe('PeerScore', () => { it('should score based on time in mesh', async () => { // Create parameters with reasonable default values @@ -27,7 +30,7 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) let aScore = ps.score(peerA) @@ -38,10 +41,11 @@ describe('PeerScore', () => { const elapsed = tparams.timeInMeshQuantum * 100 await delay(elapsed + 10) - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) expect(aScore).to.be.gte(((tparams.topicWeight * tparams.timeInMeshWeight) / tparams.timeInMeshQuantum) * elapsed) }) + it('should cap time in mesh score', async () => { // Create parameters with reasonable default values const mytopic = 'mytopic' @@ -55,7 +59,7 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) let aScore = ps.score(peerA) @@ -66,11 +70,12 @@ describe('PeerScore', () => { const elapsed = tparams.timeInMeshQuantum * 40 await delay(elapsed) - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) expect(aScore).to.be.gt(tparams.topicWeight * tparams.timeInMeshWeight * tparams.timeInMeshCap * 0.5) expect(aScore).to.be.lt(tparams.topicWeight * tparams.timeInMeshWeight * tparams.timeInMeshCap * 1.5) }) + it('should score first message deliveries', async () => { // Create parameters with reasonable default values const mytopic = 'mytopic' @@ -86,25 +91,25 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.graft(peerA, mytopic) // deliver a bunch of messages from peer A const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(i, mytopic) ps.validateMessage(getMsgIdStr(msg)) - ps.deliverMessage(msg, getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) } - ps._refreshScores() + ps['refreshScores']() const aScore = ps.score(peerA) expect(aScore).to.be.equal( tparams.topicWeight * tparams.firstMessageDeliveriesWeight * nMessages * tparams.firstMessageDeliveriesDecay ) }) + it('should cap first message deliveries score', async () => { // Create parameters with reasonable default values const mytopic = 'mytopic' @@ -121,7 +126,7 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) let aScore = ps.score(peerA) @@ -133,13 +138,12 @@ describe('PeerScore', () => { // deliver a bunch of messages from peer A const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(i, mytopic) ps.validateMessage(getMsgIdStr(msg)) - ps.deliverMessage(msg, getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) } - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) expect(aScore).to.be.equal( tparams.topicWeight * @@ -148,6 +152,7 @@ describe('PeerScore', () => { tparams.firstMessageDeliveriesDecay ) }) + it('should decay first message deliveries score', async () => { // Create parameters with reasonable default values const mytopic = 'mytopic' @@ -164,7 +169,7 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) let aScore = ps.score(peerA) @@ -176,13 +181,12 @@ describe('PeerScore', () => { // deliver a bunch of messages from peer A const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(i, mytopic) ps.validateMessage(getMsgIdStr(msg)) - ps.deliverMessage(msg, getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) } - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) let expected = tparams.topicWeight * @@ -194,12 +198,13 @@ describe('PeerScore', () => { // refreshing the scores applies the decay param const decayInterals = 10 for (let i = 0; i < decayInterals; i++) { - ps._refreshScores() + ps['refreshScores']() expected *= tparams.firstMessageDeliveriesDecay } aScore = ps.score(peerA) expect(aScore).to.be.equal(expected) }) + it('should score mesh message deliveries', async function () { this.timeout(10000) // Create parameters with reasonable default values @@ -227,14 +232,14 @@ describe('PeerScore', () => { const peerC = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() const peers = [peerA, peerB, peerC] // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) peers.forEach((p) => { ps.addPeer(p) ps.graft(p, mytopic) }) // assert that nobody has been penalized yet for not delivering messages before activation time - ps._refreshScores() + ps['refreshScores']() peers.forEach((p) => { const score = ps.score(p) expect(score, 'expected no mesh delivery penalty before activation time').to.equal(0) @@ -245,20 +250,17 @@ describe('PeerScore', () => { // deliver a bunch of messages from peers const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(i, mytopic) ps.validateMessage(getMsgIdStr(msg)) - ps.deliverMessage(msg, getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) - msg.receivedFrom = peerB - ps.duplicateMessage(msg, getMsgIdStr(msg)) + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) // deliver duplicate from peer C after the window await delay(tparams.meshMessageDeliveriesWindow + 5) - msg.receivedFrom = peerC - ps.duplicateMessage(msg, getMsgIdStr(msg)) + ps.duplicateMessage(peerC, getMsgIdStr(msg), msg.topic) } - ps._refreshScores() + ps['refreshScores']() const aScore = ps.score(peerA) const bScore = ps.score(peerB) const cScore = ps.score(peerC) @@ -271,6 +273,7 @@ describe('PeerScore', () => { const expected = tparams.topicWeight * tparams.meshMessageDeliveriesWeight * penalty expect(cScore).to.be.equal(expected) }) + it('should decay mesh message deliveries score', async function () { this.timeout(10000) // Create parameters with reasonable default values @@ -290,7 +293,7 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() // Peer score should start at 0 - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.graft(peerA, mytopic) @@ -300,19 +303,18 @@ describe('PeerScore', () => { // deliver a bunch of messages from peer A const nMessages = 40 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(i, mytopic) ps.validateMessage(getMsgIdStr(msg)) - ps.deliverMessage(msg, getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) } - ps._refreshScores() + ps['refreshScores']() let aScore = ps.score(peerA) expect(aScore).to.be.gte(0) // we need to refresh enough times for the decay to bring us below the threshold let decayedDeliveryCount = nMessages * tparams.meshMessageDeliveriesDecay for (let i = 0; i < 20; i++) { - ps._refreshScores() + ps['refreshScores']() decayedDeliveryCount *= tparams.meshMessageDeliveriesDecay } aScore = ps.score(peerA) @@ -322,6 +324,7 @@ describe('PeerScore', () => { const expected = tparams.topicWeight * tparams.meshMessageDeliveriesWeight * penalty expect(aScore).to.be.equal(expected) }) + it('should score mesh message failures', async function () { this.timeout(10000) // Create parameters with reasonable default values @@ -350,7 +353,7 @@ describe('PeerScore', () => { const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() const peerB = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() const peers = [peerA, peerB] - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) peers.forEach((p) => { ps.addPeer(p) @@ -363,13 +366,12 @@ describe('PeerScore', () => { // deliver a bunch of messages from peer A. peer B does nothing const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(i, mytopic) ps.validateMessage(getMsgIdStr(msg)) - ps.deliverMessage(msg, getMsgIdStr(msg)) + ps.deliverMessage(peerA, getMsgIdStr(msg), msg.topic) } // peers A and B should both have zero scores, since the failure penalty hasn't been applied yet - ps._refreshScores() + ps['refreshScores']() let aScore = ps.score(peerA) let bScore = ps.score(peerB) expect(aScore).to.be.equal(0) @@ -377,7 +379,7 @@ describe('PeerScore', () => { // prune peer B to apply the penalty ps.prune(peerB, mytopic) - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) bScore = ps.score(peerB) expect(aScore).to.be.equal(0) @@ -388,6 +390,7 @@ describe('PeerScore', () => { const expected = tparams.topicWeight * tparams.meshFailurePenaltyWeight * penalty * tparams.meshFailurePenaltyDecay expect(bScore).to.be.equal(expected) }) + it('should score invalid message deliveries', async function () { // Create parameters with reasonable default values const mytopic = 'mytopic' @@ -399,18 +402,17 @@ describe('PeerScore', () => { timeInMeshWeight: 0 })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.graft(peerA, mytopic) // deliver a bunch of messages from peer A const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA - await ps.rejectMessage(msg, getMsgIdStr(msg), ERR_TOPIC_VALIDATOR_REJECT) + const msg = makeTestMessage(i, mytopic) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) } - ps._refreshScores() + ps['refreshScores']() let aScore = ps.score(peerA) const expected = @@ -419,6 +421,7 @@ describe('PeerScore', () => { (nMessages * tparams.invalidMessageDeliveriesDecay) ** 2 expect(aScore).to.be.equal(expected) }) + it('should decay invalid message deliveries score', async function () { // Create parameters with reasonable default values const mytopic = 'mytopic' @@ -430,18 +433,17 @@ describe('PeerScore', () => { timeInMeshWeight: 0 })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.graft(peerA, mytopic) // deliver a bunch of messages from peer A const nMessages = 100 for (let i = 0; i < nMessages; i++) { - const msg = makeTestMessage(i, [mytopic]) - msg.receivedFrom = peerA - await ps.rejectMessage(msg, getMsgIdStr(msg), ERR_TOPIC_VALIDATOR_REJECT) + const msg = makeTestMessage(i, mytopic) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) } - ps._refreshScores() + ps['refreshScores']() let aScore = ps.score(peerA) let expected = @@ -452,12 +454,13 @@ describe('PeerScore', () => { // refresh scores a few times to apply decay for (let i = 0; i < 10; i++) { - ps._refreshScores() + ps['refreshScores']() expected *= tparams.invalidMessageDeliveriesDecay ** 2 } aScore = ps.score(peerA) expect(aScore).to.be.equal(expected) }) + it('should score invalid/ignored messages', async function () { // this test adds coverage for the dark corners of message rejection const mytopic = 'mytopic' @@ -470,20 +473,18 @@ describe('PeerScore', () => { })) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() const peerB = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.addPeer(peerB) - const msg = makeTestMessage(0, [mytopic]) - msg.receivedFrom = peerA + const msg = makeTestMessage(0, mytopic) // insert a record - await ps.validateMessage(getMsgIdStr(msg)) + ps.validateMessage(getMsgIdStr(msg)) // this should have no effect in the score, and subsequent duplicate messages should have no effect either - await ps.rejectMessage(msg, getMsgIdStr(msg), ERR_TOPIC_VALIDATOR_IGNORE) - msg.receivedFrom = peerB - await ps.duplicateMessage(msg, getMsgIdStr(msg)) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Ignore) + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) let aScore = ps.score(peerA) let bScore = ps.score(peerB) @@ -497,13 +498,11 @@ describe('PeerScore', () => { ps.deliveryRecords.gc() // insert a new record in the message deliveries - msg.receivedFrom = peerA - await ps.validateMessage(getMsgIdStr(msg)) + ps.validateMessage(getMsgIdStr(msg)) // and reject the message to make sure duplicates are also penalized - await ps.rejectMessage(msg, getMsgIdStr(msg), ERR_TOPIC_VALIDATOR_REJECT) - msg.receivedFrom = peerB - await ps.duplicateMessage(msg, getMsgIdStr(msg)) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) aScore = ps.score(peerA) bScore = ps.score(peerB) @@ -517,14 +516,11 @@ describe('PeerScore', () => { ps.deliveryRecords.gc() // insert a new record in the message deliveries - msg.receivedFrom = peerA - await ps.validateMessage(getMsgIdStr(msg)) + ps.validateMessage(getMsgIdStr(msg)) // and reject the message after a duplicate has arrived - msg.receivedFrom = peerB - await ps.duplicateMessage(msg, getMsgIdStr(msg)) - msg.receivedFrom = peerA - await ps.rejectMessage(msg, getMsgIdStr(msg), ERR_TOPIC_VALIDATOR_REJECT) + ps.duplicateMessage(peerB, getMsgIdStr(msg), msg.topic) + ps.rejectMessage(peerA, getMsgIdStr(msg), msg.topic, RejectReason.Reject) aScore = ps.score(peerA) bScore = ps.score(peerB) @@ -532,6 +528,7 @@ describe('PeerScore', () => { expect(aScore).to.equal(expected) expect(bScore).to.equal(expected) }) + it('should score w/ application score', async function () { const mytopic = 'mytopic' let appScoreValue = 0 @@ -540,18 +537,19 @@ describe('PeerScore', () => { appSpecificWeight: 0.5 }) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.graft(peerA, mytopic) for (let i = -100; i < 100; i++) { appScoreValue = i - ps._refreshScores() + ps['refreshScores']() const aScore = ps.score(peerA) const expected = i * params.appSpecificWeight expect(aScore).to.equal(expected) } }) + it('should score w/ IP colocation', async function () { const mytopic = 'mytopic' const params = createPeerScoreParams({ @@ -564,14 +562,14 @@ describe('PeerScore', () => { const peerD = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() const peers = [peerA, peerB, peerC, peerD] - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) peers.forEach((p) => { ps.addPeer(p) ps.graft(p, mytopic) }) const setIPsForPeer = (p: string, ips: string[]) => { - ps._setIPs(p, ips, []) + ps['setIPs'](p, ips, []) const pstats = ps.peerStats.get(p) pstats!.ips = ips } @@ -581,7 +579,7 @@ describe('PeerScore', () => { setIPsForPeer(peerC, ['2.3.4.5', '3.4.5.6']) setIPsForPeer(peerD, ['2.3.4.5']) - ps._refreshScores() + ps['refreshScores']() const aScore = ps.score(peerA) const bScore = ps.score(peerB) const cScore = ps.score(peerC) @@ -597,6 +595,7 @@ describe('PeerScore', () => { expect(cScore).to.equal(expected) expect(dScore).to.equal(expected) }) + it('should score w/ behavior penalty', async function () { const params = createPeerScoreParams({ behaviourPenaltyWeight: -1, @@ -604,10 +603,10 @@ describe('PeerScore', () => { }) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) // add penalty on a non-existent peer - ps.addPenalty(peerA, 1) + ps.addPenalty(peerA, 1, ScorePenalty.MessageDeficit) let aScore = ps.score(peerA) expect(aScore).to.equal(0) @@ -617,19 +616,20 @@ describe('PeerScore', () => { aScore = ps.score(peerA) expect(aScore).to.equal(0) - ps.addPenalty(peerA, 1) + ps.addPenalty(peerA, 1, scorePenaltyAny) aScore = ps.score(peerA) expect(aScore).to.equal(-1) - ps.addPenalty(peerA, 1) + ps.addPenalty(peerA, 1, scorePenaltyAny) aScore = ps.score(peerA) expect(aScore).to.equal(-4) - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) expect(aScore).to.equal(-3.9204) }) + it('should handle score retention', async function () { const mytopic = 'mytopic' const params = createPeerScoreParams({ @@ -639,12 +639,12 @@ describe('PeerScore', () => { }) const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() - const ps = new PeerScore(params, connectionManager) + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) ps.addPeer(peerA) ps.graft(peerA, mytopic) // score should equal -1000 (app-specific score) const expected = -1000 - ps._refreshScores() + ps['refreshScores']() let aScore = ps.score(peerA) expect(aScore).to.equal(expected) @@ -653,13 +653,13 @@ describe('PeerScore', () => { ps.removePeer(peerA) const _delay = params.retainScore / 2 await delay(_delay) - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) expect(aScore).to.equal(expected) // wait remaining time (plus a little slop) and the score should reset to 0 await delay(_delay + 5) - ps._refreshScores() + ps['refreshScores']() aScore = ps.score(peerA) expect(aScore).to.equal(0) }) @@ -676,7 +676,7 @@ describe('PeerScore score cache', function () { decayInterval: 1000, topics: { a: { topicWeight: 10 } as TopicScoreParams } }) - const ps2 = new PeerScore(params, connectionManager) + const ps2 = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) beforeEach(() => { sandbox = sinon.createSandbox() @@ -700,22 +700,18 @@ describe('PeerScore score cache', function () { expect(computeStoreStub.calledOnce).to.be.true }) - function toInMessage(topic: string): InMessage { - return { receivedFrom: '', data: new Uint8Array(0), topicIDs: [topic] } - } - const testCases = [ { name: 'decayInterval timeout', fun: () => sandbox.clock.tick(params.decayInterval) }, - { name: '_refreshScores', fun: () => ps2._refreshScores() }, - { name: 'addPenalty', fun: () => ps2.addPenalty(peerA, 10) }, + { name: 'refreshScores', fun: () => ps2['refreshScores']() }, + { name: 'addPenalty', fun: () => ps2.addPenalty(peerA, 10, scorePenaltyAny) }, { name: 'graft', fun: () => ps2.graft(peerA, 'a') }, { name: 'prune', fun: () => ps2.prune(peerA, 'a') }, - { name: '_markInvalidMessageDelivery', fun: () => ps2._markInvalidMessageDelivery(peerA, toInMessage('a')) }, - { name: '_markFirstMessageDelivery', fun: () => ps2._markFirstMessageDelivery(peerA, toInMessage('a')) }, - { name: '_markDuplicateMessageDelivery', fun: () => ps2._markDuplicateMessageDelivery(peerA, toInMessage('a')) }, - { name: '_setIPs', fun: () => ps2._setIPs(peerA, [], ['127.0.0.1']) }, - { name: '_removeIPs', fun: () => ps2._removeIPs(peerA, ['127.0.0.1']) }, - { name: '_updateIPs', fun: () => ps2._updateIPs() } + { name: 'markInvalidMessageDelivery', fun: () => ps2['markInvalidMessageDelivery'](peerA, 'a') }, + { name: 'markFirstMessageDelivery', fun: () => ps2['markFirstMessageDelivery'](peerA, 'a') }, + { name: 'markDuplicateMessageDelivery', fun: () => ps2['markDuplicateMessageDelivery'](peerA, 'a') }, + { name: 'setIPs', fun: () => ps2['setIPs'](peerA, [], ['127.0.0.1']) }, + { name: 'removeIPs', fun: () => ps2['removeIPs'](peerA, ['127.0.0.1']) }, + { name: 'updateIPs', fun: () => ps2['updateIPs']() } ] for (const { name, fun } of testCases) { diff --git a/test/scoreMetrics.spec.ts b/test/scoreMetrics.spec.ts new file mode 100644 index 00000000..a50898bb --- /dev/null +++ b/test/scoreMetrics.spec.ts @@ -0,0 +1,50 @@ +import ConnectionManager from 'libp2p/src/connection-manager' +import PeerId from 'peer-id' +import { computeAllPeersScoreWeights } from '../ts/score/scoreMetrics' +import { createPeerScoreParams, createTopicScoreParams, PeerScore } from '../ts/score' +import { ScorePenalty } from '../ts/metrics' +import { expect } from 'chai' + +const connectionManager = new Map() as unknown as ConnectionManager +connectionManager.getAll = () => [] + +describe('score / scoreMetrics', () => { + it('computeScoreWeights', async () => { + // Create parameters with reasonable default values + const topic = 'test_topic' + + const params = createPeerScoreParams({ + topicScoreCap: 1000 + }) + params.topics[topic] = createTopicScoreParams({ + topicWeight: 0.5, + timeInMeshWeight: 1, + timeInMeshQuantum: 1, + timeInMeshCap: 3600 + }) + + // Add Map for metrics + const topicStrToLabel = new Map() + topicStrToLabel.set(topic, topic) + + const peerA = (await PeerId.create({ keyType: 'secp256k1' })).toB58String() + // Peer score should start at 0 + const ps = new PeerScore(params, connectionManager, null, { scoreCacheValidityMs: 0 }) + ps.addPeer(peerA) + + // Do some actions that penalize the peer + const msgId = 'aaaaaaaaaaaaaaaa' + ps.addPenalty(peerA, 1, ScorePenalty.BrokenPromise) + ps.validateMessage(msgId) + ps.deliverMessage(peerA, msgId, topic) + + const sw = computeAllPeersScoreWeights([peerA], ps.peerStats, ps.params, ps.peerIPs, topicStrToLabel) + + // Ensure score is the same + expect(sw.score).to.deep.equal([ps.score(peerA)], 'Score from metrics and actual score not equal') + expect(sw.byTopic.get(topic)).to.deep.equal( + { p1w: [0], p2w: [1], p3w: [0], p3bw: [0], p4w: [0] }, + 'Wrong score weights by topic' + ) + }) +}) diff --git a/test/tracer.spec.ts b/test/tracer.spec.ts index 4ee2ac1b..26500ea1 100644 --- a/test/tracer.spec.ts +++ b/test/tracer.spec.ts @@ -1,6 +1,5 @@ import { expect } from 'chai' import delay from 'delay' -import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { IWantTracer } from '../ts/tracer' import * as constants from '../ts/constants' import { makeTestMessage, getMsgId, getMsgIdStr } from './utils' @@ -9,14 +8,13 @@ describe('IWantTracer', () => { it('should track broken promises', async function () { // tests that unfullfilled promises are tracked correctly this.timeout(6000) - const t = new IWantTracer() + const t = new IWantTracer(constants.GossipsubIWantFollowupTime, null) const peerA = 'A' const peerB = 'B' const msgIds = [] for (let i = 0; i < 100; i++) { - const m = makeTestMessage(i) - m.from = peerA + const m = makeTestMessage(i, 'test_topic') msgIds.push(getMsgId(m)) } @@ -38,15 +36,14 @@ describe('IWantTracer', () => { it('should track unbroken promises', async function () { // like above, but this time we deliver messages to fullfil the promises this.timeout(6000) - const t = new IWantTracer() + const t = new IWantTracer(constants.GossipsubIWantFollowupTime, null) const peerA = 'A' const peerB = 'B' const msgs = [] const msgIds = [] for (let i = 0; i < 100; i++) { - const m = makeTestMessage(i) - m.from = peerA + const m = makeTestMessage(i, 'test_topic') msgs.push(m) msgIds.push(getMsgId(m)) } diff --git a/test/utils/create-gossipsub.ts b/test/utils/create-gossipsub.ts index 8aca0c25..94b846f0 100644 --- a/test/utils/create-gossipsub.ts +++ b/test/utils/create-gossipsub.ts @@ -1,12 +1,17 @@ -import Gossipsub, { GossipInputOptions } from '../../ts' +import { EventEmitter } from 'events' +import PubsubBaseProtocol from 'libp2p-interfaces/src/pubsub' +import Gossipsub, { GossipsubOpts } from '../../ts' import { fastMsgIdFn } from './msgId' import { createPeers } from './create-peer' -import PubsubBaseProtocol from 'libp2p-interfaces/src/pubsub' +import { FloodsubID } from '../../ts/constants' + +export type PubsubBaseMinimal = EventEmitter & + Pick /** * Start node - gossipsub + libp2p */ -export async function startNode(gs: PubsubBaseProtocol) { +export async function startNode(gs: PubsubBaseMinimal) { await gs._libp2p.start() await gs.start() } @@ -14,12 +19,12 @@ export async function startNode(gs: PubsubBaseProtocol) { /** * Stop node - gossipsub + libp2p */ -export async function stopNode(gs: PubsubBaseProtocol) { +export async function stopNode(gs: PubsubBaseMinimal) { await gs._libp2p.stop() await gs.stop() } -export async function connectGossipsub(gs1: PubsubBaseProtocol, gs2: PubsubBaseProtocol) { +export async function connectGossipsub(gs1: PubsubBaseMinimal, gs2: PubsubBaseMinimal) { await gs1._libp2p.dialProtocol(gs2._libp2p.peerId, gs1.multicodecs) } @@ -33,7 +38,7 @@ export async function createGossipsubs({ }: { number?: number started?: boolean - options?: Partial + options?: Partial } = {}) { const libp2ps = await createPeers({ number, started }) const gss = libp2ps.map((libp2p) => new Gossipsub(libp2p, { ...options, fastMsgIdFn: fastMsgIdFn })) @@ -45,10 +50,36 @@ export async function createGossipsubs({ return gss } +export async function createPubsubs({ + number = 1, + started = true, + options = {} +}: { + number?: number + started?: boolean + options?: Partial +} = {}) { + const libp2ps = await createPeers({ number, started }) + const pubsubs = libp2ps.map( + (libp2p) => + new PubsubBaseProtocol({ + debugName: 'pubsub', + multicodecs: FloodsubID, + libp2p + }) + ) + + if (started) { + await Promise.all(pubsubs.map((gs) => gs.start())) + } + + return pubsubs +} + /** * Stop gossipsub nodes */ -export async function tearDownGossipsubs(gss: PubsubBaseProtocol[]) { +export async function tearDownGossipsubs(gss: PubsubBaseMinimal[]) { await Promise.all( gss.map(async (p) => { await p.stop() @@ -62,7 +93,7 @@ export async function tearDownGossipsubs(gss: PubsubBaseProtocol[]) { * @param {Gossipsub[]} gss * @param {number} num number of peers to connect */ -export async function connectSome(gss: PubsubBaseProtocol[], num: number) { +export async function connectSome(gss: PubsubBaseMinimal[], num: number) { for (let i = 0; i < gss.length; i++) { for (let j = 0; j < num; j++) { const n = Math.floor(Math.random() * gss.length) @@ -75,11 +106,11 @@ export async function connectSome(gss: PubsubBaseProtocol[], num: number) { } } -export async function sparseConnect(gss: PubsubBaseProtocol[]) { +export async function sparseConnect(gss: PubsubBaseMinimal[]) { await connectSome(gss, 3) } -export async function denseConnect(gss: PubsubBaseProtocol[]) { +export async function denseConnect(gss: PubsubBaseMinimal[]) { await connectSome(gss, 10) } @@ -87,7 +118,7 @@ export async function denseConnect(gss: PubsubBaseProtocol[]) { * Connect every gossipsub node to every other * @param {Gossipsub[]} gss */ -export async function connectGossipsubs(gss: PubsubBaseProtocol[]) { +export async function connectGossipsubs(gss: PubsubBaseMinimal[]) { for (let i = 0; i < gss.length; i++) { for (let j = i + 1; j < gss.length; j++) { await connectGossipsub(gss[i], gss[j]) @@ -101,7 +132,7 @@ export async function connectGossipsubs(gss: PubsubBaseProtocol[]) { export async function createConnectedGossipsubs({ number = 2, options = {} -}: { number?: number; options?: Partial } = {}) { +}: { number?: number; options?: Partial } = {}) { const gss = await createGossipsubs({ number, started: true, options }) await connectGossipsubs(gss) return gss diff --git a/test/utils/create-peer.ts b/test/utils/create-peer.ts index c92f360a..98b981ff 100644 --- a/test/utils/create-peer.ts +++ b/test/utils/create-peer.ts @@ -64,6 +64,10 @@ function getListenAddress(peerId: PeerId) { } } +export async function createPeerId() { + return await PeerId.createFromJSON(Peers[0]) +} + /** * Create libp2p node, selectively determining the listen address based on the operating environment * If no peerId is given, default to the first peer in the fixtures peer list diff --git a/test/utils/index.ts b/test/utils/index.ts index eed73efe..565e7829 100644 --- a/test/utils/index.ts +++ b/test/utils/index.ts @@ -4,10 +4,10 @@ import PeerId from 'peer-id' import delay from 'delay' import Libp2p from 'libp2p' import Gossipsub from '../../ts' +import { GossipsubMessage, TopicStr } from '../../ts/types' export * from './create-peer' export * from './create-gossipsub' -export * from './make-test-message' export * from './msgId' export const first = (map: Map | Set | undefined): T => { @@ -44,14 +44,14 @@ export const createFloodsubNode = async (libp2p: Libp2p, shouldStart = false) => } export const waitForAllNodesToBePeered = async (peers: Gossipsub[], attempts = 10, delayMs = 100) => { - const nodeIds = peers.map((peer) => peer.peerId.toB58String()) + const nodeIds = peers.map((peer) => peer.peerId!.toB58String()) for (let i = 0; i < attempts; i++) { for (const node of peers) { - const nodeId = node.peerId.toB58String() + const nodeId = node.peerId!.toB58String() const others = nodeIds.filter((peerId) => peerId !== nodeId) - const missing = others.some((other) => !node.peers.has(other)) + const missing = others.some((other) => !node['peers'].has(other)) if (!missing) { return @@ -61,3 +61,12 @@ export const waitForAllNodesToBePeered = async (peers: Gossipsub[], attempts = 1 await delay(delayMs) } } + +export function makeTestMessage(i: number, topic: TopicStr): GossipsubMessage { + return { + seqno: Uint8Array.from(new Array(8).fill(i)), + data: Uint8Array.from([i]), + from: new Uint8Array(0), + topic + } +} diff --git a/test/utils/make-test-message.ts b/test/utils/make-test-message.ts deleted file mode 100644 index 4d5b7bc5..00000000 --- a/test/utils/make-test-message.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { InMessage } from 'libp2p-interfaces/src/pubsub' - -export const makeTestMessage = (i: number, topicIDs: string[] = []): InMessage => { - return { - receivedFrom: '', - seqno: Uint8Array.from(new Array(8).fill(i)), - data: Uint8Array.from([i]), - from: 'test', - topicIDs - } -} diff --git a/test/utils/msgId.ts b/test/utils/msgId.ts index 2d03f29b..05ac9b76 100644 --- a/test/utils/msgId.ts +++ b/test/utils/msgId.ts @@ -1,10 +1,10 @@ import SHA256 from '@chainsafe/as-sha256' -import { InMessage } from 'libp2p-interfaces/src/pubsub' +import { RPC } from '../../ts/message/rpc' import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' import { messageIdToString } from '../../ts/utils/messageIdToString' -export const getMsgId = (msg: InMessage) => { - const from = uint8ArrayFromString(msg.from ?? '') +export const getMsgId = (msg: RPC.IMessage) => { + const from = msg.from ? msg.from : new Uint8Array(0) const seqno = msg.seqno instanceof Uint8Array ? msg.seqno : uint8ArrayFromString(msg.seqno ?? '') const result = new Uint8Array(from.length + seqno.length) result.set(from, 0) @@ -12,6 +12,6 @@ export const getMsgId = (msg: InMessage) => { return result } -export const getMsgIdStr = (msg: InMessage) => messageIdToString(getMsgId(msg)) +export const getMsgIdStr = (msg: RPC.IMessage) => messageIdToString(getMsgId(msg)) -export const fastMsgIdFn = (msg: InMessage) => messageIdToString(SHA256.digest(msg.data)) +export const fastMsgIdFn = (msg: RPC.IMessage) => (msg.data ? messageIdToString(SHA256.digest(msg.data)) : '0') diff --git a/ts/config.ts b/ts/config.ts new file mode 100644 index 00000000..e754352f --- /dev/null +++ b/ts/config.ts @@ -0,0 +1,28 @@ +export type GossipsubOptsSpec = { + /** D sets the optimal degree for a Gossipsub topic mesh. */ + D: number + /** Dlo sets the lower bound on the number of peers we keep in a Gossipsub topic mesh. */ + Dlo: number + /** Dhi sets the upper bound on the number of peers we keep in a Gossipsub topic mesh. */ + Dhi: number + /** Dscore affects how peers are selected when pruning a mesh due to over subscription. */ + Dscore: number + /** Dout sets the quota for the number of outbound connections to maintain in a topic mesh. */ + Dout: number + /** Dlazy affects how many peers we will emit gossip to at each heartbeat. */ + Dlazy: number + /** heartbeatInterval is the time between heartbeats in milliseconds */ + heartbeatInterval: number + /** + * fanoutTTL controls how long we keep track of the fanout state. If it's been + * fanoutTTL milliseconds since we've published to a topic that we're not subscribed to, + * we'll delete the fanout map for that topic. + */ + fanoutTTL: number + /** mcacheLength is the number of windows to retain full messages for IWANT responses */ + mcacheLength: number + /** mcacheGossip is the number of windows to gossip about */ + mcacheGossip: number + /** seenTTL is the number of milliseconds to retain message IDs in the seen cache */ + seenTTL: number +} diff --git a/ts/constants.ts b/ts/constants.ts index 18127433..78873b51 100644 --- a/ts/constants.ts +++ b/ts/constants.ts @@ -238,3 +238,8 @@ export const ACCEPT_FROM_WHITELIST_MAX_MESSAGES = 128 * this peer up to this time duration. */ export const ACCEPT_FROM_WHITELIST_DURATION_MS = 1000 + +/** + * The default MeshMessageDeliveriesWindow to be used in metrics. + */ +export const DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS = 1000 diff --git a/ts/index.ts b/ts/index.ts index b2255c94..f6798327 100644 --- a/ts/index.ts +++ b/ts/index.ts @@ -1,4 +1,12 @@ -import Pubsub, { InMessage, utils } from 'libp2p-interfaces/src/pubsub' +import pipe from 'it-pipe' +import Libp2p, { Connection, EventEmitter } from 'libp2p' +import Envelope from 'libp2p/src/record/envelope' +import Registrar from 'libp2p/src/registrar' +import PeerId, { createFromB58String, createFromBytes } from 'peer-id' +import debug, { Debugger } from 'debug' +import MulticodecTopology from 'libp2p-interfaces/src/topology/multicodec-topology' +import PeerStreams from 'libp2p-interfaces/src/pubsub/peer-streams' + import { MessageCache } from './message-cache' import { RPC, IRPC } from './message/rpc' import * as constants from './constants' @@ -8,101 +16,143 @@ import { PeerScoreParams, PeerScoreThresholds, createPeerScoreParams, - createPeerScoreThresholds + createPeerScoreThresholds, + PeerScoreStatsDump } from './score' import { IWantTracer } from './tracer' -import { AddrInfo, MessageIdFunction } from './interfaces' import { SimpleTimeCache } from './utils/time-cache' -import { Debugger } from 'debug' -import Libp2p from 'libp2p' - -import PeerStreams from 'libp2p-interfaces/src/pubsub/peer-streams' -import PeerId = require('peer-id') -// eslint-disable-next-line @typescript-eslint/ban-ts-comment -// @ts-ignore -import Envelope = require('libp2p/src/record/envelope') import { ACCEPT_FROM_WHITELIST_DURATION_MS, ACCEPT_FROM_WHITELIST_MAX_MESSAGES, ACCEPT_FROM_WHITELIST_THRESHOLD_SCORE } from './constants' +import { + ChurnReason, + getMetrics, + IHaveIgnoreReason, + InclusionReason, + Metrics, + MetricsRegister, + ScorePenalty, + TopicStrToLabel, + ToSendGroupCount +} from './metrics' +import { + GossipsubMessage, + MessageAcceptance, + MsgIdFn, + PublishConfig, + SignaturePolicy, + TopicStr, + MsgIdStr, + ValidateError, + PeerIdStr, + MessageStatus, + RejectReason, + RejectReasonObj, + FastMsgIdFn, + AddrInfo, + DataTransform, + TopicValidatorFn, + rejectReasonFromAcceptance +} from './types' +import { buildRawMessage, validateToRawMessage } from './utils/buildRawMessage' +import { msgIdFnStrictNoSign, msgIdFnStrictSign } from './utils/msgIdFn' +import { computeAllPeersScoreWeights } from './score/scoreMetrics' +import { getPublishConfigFromPeerId } from './utils/publishConfig' +import { GossipsubOptsSpec } from './config' + +// From 'bl' library +interface BufferList { + slice(): Buffer +} + +type ConnectionDirection = 'inbound' | 'outbound' + +type ReceivedMessageResult = + | { code: MessageStatus.duplicate; msgId: MsgIdStr } + | ({ code: MessageStatus.invalid; msgId?: MsgIdStr } & RejectReasonObj) + | { code: MessageStatus.valid; msgIdStr: MsgIdStr; msg: GossipsubMessage } export const multicodec: string = constants.GossipsubIDv11 -export interface GossipInputOptions { +export type GossipsubOpts = GossipsubOptsSpec & { + // Behaviour emitSelf: boolean + /** if can relay messages not subscribed */ canRelayMessage: boolean + /** if incoming messages on a subscribed topic should be automatically gossiped */ gossipIncoming: boolean + /** if dial should fallback to floodsub */ fallbackToFloodsub: boolean + /** if self-published messages should be sent to all peers */ floodPublish: boolean + /** whether PX is enabled; this should be enabled in bootstrappers and other well connected/trusted nodes. */ doPX: boolean - msgIdFn: MessageIdFunction + /** peers with which we will maintain direct connections */ + directPeers: AddrInfo[] + /** + * If true will not forward messages to mesh peers until reportMessageValidationResult() is called. + * Messages will be cached in mcache for some time after which they are evicted. Calling + * reportMessageValidationResult() after the message is dropped from mcache won't forward the message. + */ + asyncValidation: boolean + /** Do not throw `InsufficientPeers` error if publishing to zero peers */ + allowPublishToZeroPeers: boolean + /** For a single stream, await processing each RPC before processing the next */ + awaitRpcHandler: boolean + /** For a single RPC, await processing each message before processing the next */ + awaitRpcMessageHandler: boolean + + // Extra modules, config + msgIdFn: MsgIdFn + /** fast message id function */ fastMsgIdFn: FastMsgIdFn + /** override the default MessageCache */ messageCache: MessageCache - globalSignaturePolicy: 'StrictSign' | 'StrictNoSign' | undefined + /** signing policy to apply across all messages */ + globalSignaturePolicy: SignaturePolicy | undefined + /** peer score parameters */ scoreParams: Partial + /** peer score thresholds */ scoreThresholds: Partial - directPeers: AddrInfo[] - - /** - * D sets the optimal degree for a Gossipsub topic mesh. - */ - D: number - - /** - * Dlo sets the lower bound on the number of peers we keep in a Gossipsub topic mesh. - */ - Dlo: number - - /** - * Dhi sets the upper bound on the number of peers we keep in a Gossipsub topic mesh. - */ - Dhi: number - - /** - * Dscore affects how peers are selected when pruning a mesh due to over subscription. - */ - Dscore: number + /** customize GossipsubIWantFollowupTime in order not to apply IWANT penalties */ + gossipsubIWantFollowupMs: number - /** - * Dout sets the quota for the number of outbound connections to maintain in a topic mesh. - */ - Dout: number - - /** - * Dlazy affects how many peers we will emit gossip to at each heartbeat. - */ - Dlazy: number - - /** - * heartbeatInterval is the time between heartbeats in milliseconds - */ - heartbeatInterval: number + dataTransform?: DataTransform + metricsRegister?: MetricsRegister | null + metricsTopicStrToLabel?: TopicStrToLabel - /** - * fanoutTTL controls how long we keep track of the fanout state. If it's been - * fanoutTTL milliseconds since we've published to a topic that we're not subscribed to, - * we'll delete the fanout map for that topic. - */ - fanoutTTL: number - - /** - * mcacheLength is the number of windows to retain full messages for IWANT responses - */ - mcacheLength: number + // Debug + /** Prefix tag for debug logs */ + debugName?: string +} - /** - * mcacheGossip is the number of windows to gossip about - */ - mcacheGossip: number +export type GossipsubEvents = { + 'gossipsub:message': { + propagationSource: PeerId + msgId: MsgIdStr + msg: GossipsubMessage + } +} - /** - * seenTTL is the number of milliseconds to retain message IDs in the seen cache - */ - seenTTL: number +enum GossipStatusCode { + started, + stopped } -interface GossipOptions extends GossipInputOptions { +type GossipStatus = + | { + code: GossipStatusCode.started + registrarTopologyId: string + heartbeatTimeout: NodeJS.Timeout + hearbeatStartMs: number + } + | { + code: GossipStatusCode.stopped + } + +interface GossipOptions extends GossipsubOpts { scoreParams: PeerScoreParams scoreThresholds: PeerScoreThresholds } @@ -114,138 +164,151 @@ interface AcceptFromWhitelistEntry { acceptUntil: number } -type FastMsgIdFn = (msg: InMessage) => string +export default class Gossipsub extends EventEmitter { + /** + * The signature policy to follow by default + */ + private readonly globalSignaturePolicy: SignaturePolicy + + private readonly publishConfig: PublishConfig + + private readonly dataTransform: DataTransform | undefined + + // State -export default class Gossipsub extends Pubsub { - peers: Map + private readonly peers = new Map() /** Direct peers */ - direct: Set + private readonly direct = new Set() + + /** Floodsub peers */ + private readonly floodsubPeers = new Set() /** Cache of seen messages */ - seenCache: SimpleTimeCache + private readonly seenCache: SimpleTimeCache /** * Map of peer id and AcceptRequestWhileListEntry */ - acceptFromWhitelist = new Map() + private readonly acceptFromWhitelist = new Map() - topics: Map> + /** + * Map of topics to which peers are subscribed to + */ + private readonly topics = new Map>() + + /** + * List of our subscriptions + */ + private readonly subscriptions = new Set() /** * Map of topic meshes * topic => peer id set */ - mesh = new Map>() + private readonly mesh = new Map>() /** * Map of topics to set of peers. These mesh peers are the ones to which we are publishing without a topic membership * topic => peer id set */ - fanout = new Map>() + private readonly fanout = new Map>() /** * Map of last publish time for fanout topics * topic => last publish time */ - lastpub = new Map() + private readonly fanoutLastpub = new Map() /** * Map of pending messages to gossip * peer id => control messages */ - gossip = new Map() + private readonly gossip = new Map() /** * Map of control messages * peer id => control message */ - control = new Map() + private readonly control = new Map() /** * Number of IHAVEs received from peer in the last heartbeat */ - peerhave = new Map() + private readonly peerhave = new Map() /** Number of messages we have asked from peer in the last heartbeat */ - iasked = new Map() + private readonly iasked = new Map() /** Prune backoff map */ - backoff = new Map>() + private readonly backoff = new Map>() /** * Connection direction cache, marks peers with outbound connections * peer id => direction */ - outbound = new Map() - defaultMsgIdFn: MessageIdFunction + private readonly outbound = new Map() + private readonly msgIdFn: MsgIdFn /** * A fast message id function used for internal message de-duplication */ - getFastMsgIdStr: FastMsgIdFn | undefined + private readonly fastMsgIdFn: FastMsgIdFn | undefined /** Maps fast message-id to canonical message-id */ - fastMsgIdCache: SimpleTimeCache | undefined + private readonly fastMsgIdCache: SimpleTimeCache | undefined /** - * A message cache that contains the messages for last few hearbeat ticks + * Short term cache for published message ids. This is used for penalizing peers sending + * our own messages back if the messages are anonymous or use a random author. */ - messageCache: MessageCache + private readonly publishedMessageIds: SimpleTimeCache + + /** + * A message cache that contains the messages for last few heartbeat ticks + */ + private readonly mcache: MessageCache /** Peer score tracking */ - score: PeerScore + private readonly score: PeerScore + + private readonly topicValidators = new Map() /** * Number of heartbeats since the beginning of time * This allows us to amortize some resource cleanup -- eg: backoff cleanup */ - heartbeatTicks = 0 + private heartbeatTicks = 0 /** * Tracks IHAVE/IWANT promises broken by peers */ - gossipTracer = new IWantTracer() + readonly gossipTracer: IWantTracer - multicodecs: string[] - started: boolean - peerId: PeerId - subscriptions: Set - _libp2p: Libp2p - _options: GossipOptions - _directPeerInitial: NodeJS.Timeout - log: Debugger & { err: Debugger } - // eslint-disable-next-line @typescript-eslint/ban-types - emit: (event: string | symbol, ...args: any[]) => boolean + // Public for go-gossipsub tests + readonly _libp2p: Libp2p + readonly peerId: PeerId + readonly multicodecs: string[] = [constants.GossipsubIDv11, constants.GossipsubIDv10] + + private directPeerInitial: NodeJS.Timeout | null = null + private log: Debugger public static multicodec: string = constants.GossipsubIDv11 - _heartbeatTimer: { + readonly opts: GossipOptions + private readonly registrar: Registrar + private readonly metrics: Metrics | null + private status: GossipStatus = { code: GossipStatusCode.stopped } + + private heartbeatTimer: { _intervalId: NodeJS.Timeout | undefined runPeriodically(fn: () => void, period: number): void cancel(): void - } | null - - // TODO: add remaining props - /** - * @param {Libp2p} libp2p - * @param {Object} [options] - * @param {boolean} [options.emitSelf = false] if publish should emit to self, if subscribed - * @param {boolean} [options.canRelayMessage = false] - if can relay messages not subscribed - * @param {boolean} [options.gossipIncoming = true] if incoming messages on a subscribed topic should be automatically gossiped - * @param {boolean} [options.fallbackToFloodsub = true] if dial should fallback to floodsub - * @param {boolean} [options.floodPublish = true] if self-published messages should be sent to all peers - * @param {boolean} [options.doPX = false] whether PX is enabled; this should be enabled in bootstrappers and other well connected/trusted nodes. - * @param {Object} [options.messageCache] override the default MessageCache - * @param {FastMsgIdFn} [options.fastMsgIdFn] fast message id function - * @param {string} [options.globalSignaturePolicy = "StrictSign"] signing policy to apply across all messages - * @param {Object} [options.scoreParams] peer score parameters - * @param {Object} [options.scoreThresholds] peer score thresholds - * @param {AddrInfo[]} [options.directPeers] peers with which we will maintain direct connections - * @constructor - */ - constructor(libp2p: Libp2p, options: Partial = {}) { - const multicodecs = [constants.GossipsubIDv11, constants.GossipsubIDv10] + } | null = null + + constructor(libp2p: Libp2p, options: Partial = {}) { + super() + const opts = { gossipIncoming: true, fallbackToFloodsub: true, @@ -263,103 +326,335 @@ export default class Gossipsub extends Pubsub { mcacheLength: constants.GossipsubHistoryLength, mcacheGossip: constants.GossipsubHistoryGossip, seenTTL: constants.GossipsubSeenTTL, + gossipsubIWantFollowupMs: constants.GossipsubIWantFollowupTime, ...options, scoreParams: createPeerScoreParams(options.scoreParams), scoreThresholds: createPeerScoreThresholds(options.scoreThresholds) } as GossipOptions + this.globalSignaturePolicy = opts.globalSignaturePolicy ?? SignaturePolicy.StrictSign + this.publishConfig = getPublishConfigFromPeerId(this.globalSignaturePolicy, libp2p.peerId) + this.peerId = libp2p.peerId + // Also wants to get notified of peers connected using floodsub if (opts.fallbackToFloodsub) { - multicodecs.push(constants.FloodsubID) + this.multicodecs.push(constants.FloodsubID) } - super({ - debugName: 'libp2p:gossipsub', - multicodecs, - libp2p, - ...opts - }) + // From pubsub + this.log = debug(opts.debugName ?? 'libp2p:gossipsub') - this._options = opts + // Gossipsub + + this.opts = opts this.direct = new Set(opts.directPeers.map((p) => p.id.toB58String())) // set direct peer addresses in the address book opts.directPeers.forEach((p) => { - libp2p.peerStore.addressBook.add(p.id, p.addrs) + libp2p.peerStore.addressBook.add( + p.id as unknown as Parameters[0], + p.addrs + ) }) this.seenCache = new SimpleTimeCache({ validityMs: opts.seenTTL }) + this.publishedMessageIds = new SimpleTimeCache({ validityMs: opts.seenTTL }) + + this.mcache = options.messageCache || new MessageCache(opts.mcacheGossip, opts.mcacheLength) + + if (options.msgIdFn) { + // Use custom function + this.msgIdFn = options.msgIdFn + } else { + switch (this.globalSignaturePolicy) { + case SignaturePolicy.StrictSign: + this.msgIdFn = msgIdFnStrictSign + break + case SignaturePolicy.StrictNoSign: + this.msgIdFn = msgIdFnStrictNoSign + break + } + } + + if (options.fastMsgIdFn) { + this.fastMsgIdFn = options.fastMsgIdFn + this.fastMsgIdCache = new SimpleTimeCache({ validityMs: opts.seenTTL }) + } + + if (options.dataTransform) { + this.dataTransform = options.dataTransform + } + + if (options.metricsRegister) { + if (!options.metricsTopicStrToLabel) { + throw Error('Must set metricsTopicStrToLabel with metrics') + } + + // in theory, each topic has its own meshMessageDeliveriesWindow param + // however in lodestar, we configure it mostly the same so just pick the max of positive ones + // (some topics have meshMessageDeliveriesWindow as 0) + const maxMeshMessageDeliveriesWindowMs = Math.max( + ...Object.values(opts.scoreParams.topics).map((topicParam) => topicParam.meshMessageDeliveriesWindow), + constants.DEFAULT_METRIC_MESH_MESSAGE_DELIVERIES_WINDOWS + ) + + const metrics = getMetrics(options.metricsRegister, options.metricsTopicStrToLabel, { + gossipPromiseExpireSec: this.opts.gossipsubIWantFollowupMs / 1000, + behaviourPenaltyThreshold: opts.scoreParams.behaviourPenaltyThreshold, + maxMeshMessageDeliveriesWindowSec: maxMeshMessageDeliveriesWindowMs / 1000 + }) - this.messageCache = options.messageCache || new MessageCache(opts.mcacheGossip, opts.mcacheLength) + metrics.mcacheSize.addCollect(() => this.onScrapeMetrics(metrics)) + for (const protocol of this.multicodecs) { + metrics.protocolsEnabled.set({ protocol }, 1) + } - this.getFastMsgIdStr = options.fastMsgIdFn ?? undefined + this.metrics = metrics + } else { + this.metrics = null + } - this.fastMsgIdCache = options.fastMsgIdFn ? new SimpleTimeCache({ validityMs: opts.seenTTL }) : undefined + this.gossipTracer = new IWantTracer(this.opts.gossipsubIWantFollowupMs, this.metrics) /** * libp2p */ this._libp2p = libp2p + this.registrar = libp2p.registrar + this.score = new PeerScore(this.opts.scoreParams, libp2p.connectionManager, this.metrics, { + scoreCacheValidityMs: opts.heartbeatInterval + }) + } + + // LIFECYCLE METHODS + + /** + * Mounts the gossipsub protocol onto the libp2p node and sends our + * our subscriptions to every peer connected + */ + async start(): Promise { + // From pubsub + if (this.status.code === GossipStatusCode.started) { + return + } + + this.log('starting') + + // Incoming streams + // Called after a peer dials us + this.registrar.handle(this.multicodecs, this.onIncomingStream.bind(this)) + + // # How does Gossipsub interact with libp2p? Rough guide from Mar 2022 + // + // ## Setup: + // Gossipsub requests libp2p to callback, TBD + // + // `this.libp2p.handle()` registers a handler for `/meshsub/1.1.0` and other Gossipsub protocols + // The handler callback is registered in libp2p Upgrader.protocols map. + // + // Upgrader receives an inbound connection from some transport and (`Upgrader.upgradeInbound`): + // - Adds encryption (NOISE in our case) + // - Multiplex stream + // - Create a muxer and register that for each new stream call Upgrader.protocols handler + // + // ## Topology + // - new instance of Topology (unlinked to libp2p) with handlers + // - registar.register(topology) + + // register protocol with topology + // Topology callbacks called on connection manager changes + const topology = new MulticodecTopology({ + multicodecs: this.multicodecs, + handlers: { + onConnect: this.onPeerConnected.bind(this), + onDisconnect: this.onPeerDisconnected.bind(this) + } + }) + const registrarTopologyId = await this.registrar.register(topology) + + // Schedule to start heartbeat after `GossipsubHeartbeatInitialDelay` + const heartbeatTimeout = setTimeout(this.runHeartbeat, constants.GossipsubHeartbeatInitialDelay) + // Then, run heartbeat every `heartbeatInterval` offset by `GossipsubHeartbeatInitialDelay` + + this.status = { + code: GossipStatusCode.started, + registrarTopologyId, + heartbeatTimeout: heartbeatTimeout, + hearbeatStartMs: Date.now() + constants.GossipsubHeartbeatInitialDelay + } + + this.log('started') + + this.score.start() + // connect to direct peers + this.directPeerInitial = setTimeout(() => { + this.direct.forEach((id) => { + this.connect(id) + }) + }, constants.GossipsubDirectConnectInitialDelay) + } + + /** + * Unmounts the gossipsub protocol and shuts down every connection + */ + async stop(): Promise { + // From pubsub + + if (this.status.code !== GossipStatusCode.started) { + return + } + + const { registrarTopologyId } = this.status + this.status = { code: GossipStatusCode.stopped } + + // unregister protocol and handlers + this.registrar.unregister(registrarTopologyId) - this.score = new PeerScore(this._options.scoreParams, libp2p.connectionManager) + this.log('stopping') + for (const peerStreams of this.peers.values()) { + peerStreams.close() + } + + this.peers.clear() + this.subscriptions.clear() + this.log('stopped') + + // Gossipsub + + if (this.heartbeatTimer) { + this.heartbeatTimer.cancel() + this.heartbeatTimer = null + } + + this.score.stop() + + this.mesh.clear() + this.fanout.clear() + this.fanoutLastpub.clear() + this.gossip.clear() + this.control.clear() + this.peerhave.clear() + this.iasked.clear() + this.backoff.clear() + this.outbound.clear() + this.gossipTracer.clear() + this.seenCache.clear() + if (this.fastMsgIdCache) this.fastMsgIdCache.clear() + if (this.directPeerInitial) clearTimeout(this.directPeerInitial) + } + + /** FOR DEBUG ONLY - Dump peer stats for all peers. Data is cloned, safe to mutate */ + dumpPeerScoreStats(): PeerScoreStatsDump { + return this.score.dumpPeerScoreStats() + } + + /** + * On an inbound stream opened + */ + private onIncomingStream({ protocol, stream, connection }: any) { + const peerId = connection.remotePeer + const peer = this.addPeer(peerId, protocol, connection.stat.direction) + const inboundStream = peer.attachInboundStream(stream) + + this.pipePeerReadStream(peerId, inboundStream).catch((err) => this.log(err)) } /** - * Decode a Uint8Array into an RPC object - * Overrided to use an extended protocol-specific protobuf decoder - * @override + * Registrar notifies an established connection with pubsub protocol */ - _decodeRpc(bytes: Uint8Array) { - return RPC.decode(bytes) + protected async onPeerConnected(peerId: PeerId, conn: Connection): Promise { + this.log('connected %s %s', peerId.toB58String(), conn.stat.direction) + + try { + const { stream, protocol } = await conn.newStream(this.multicodecs) + const peer = this.addPeer(peerId, protocol, conn.stat.direction) + await peer.attachOutboundStream(stream) + } catch (err) { + this.log(err) + } + + // Immediately send my own subscriptions to the newly established conn + if (this.subscriptions.size > 0) { + this.sendSubscriptions(peerId.toB58String(), Array.from(this.subscriptions), true) + } } /** - * Encode an RPC object into a Uint8Array - * Overrided to use an extended protocol-specific protobuf encoder - * @override + * Registrar notifies a closing connection with pubsub protocol */ - _encodeRpc(rpc: RPC) { - return RPC.encode(rpc).finish() + protected onPeerDisconnected(peerId: PeerId, err?: Error): void { + const idB58Str = peerId.toB58String() + + this.log('connection ended', idB58Str, err) + this.removePeer(peerId) } /** * Add a peer to the router - * @override */ - _addPeer(peerId: PeerId, protocol: string): PeerStreams { - const p = super._addPeer(peerId, protocol) + private addPeer(peerId: PeerId, protocol: string, direction: ConnectionDirection): PeerStreams { + const peerIdStr = peerId.toB58String() + let peerStreams = this.peers.get(peerIdStr) + + // If peer streams already exists, do nothing + if (peerStreams === undefined) { + // else create a new peer streams + this.log('new peer %s', peerIdStr) + + peerStreams = new PeerStreams({ + id: peerId, + protocol + }) + + this.peers.set(peerIdStr, peerStreams) + peerStreams.addListener('close', () => this.removePeer(peerId)) + } // Add to peer scoring - this.score.addPeer(peerId.toB58String()) - - // track the connection direction - let outbound = false - for (const c of this._libp2p.connectionManager.getAll(peerId)) { - if (c.stat.direction === 'outbound') { - if (Array.from(c.registry.values()).some((rvalue) => protocol === rvalue.protocol)) { - outbound = true - break - } - } + this.score.addPeer(peerIdStr) + if (protocol === constants.FloodsubID) { + this.floodsubPeers.add(peerIdStr) + } + this.metrics?.peersPerProtocol.inc({ protocol }, 1) + + // track the connection direction. Don't allow to unset outbound + if (!this.outbound.get(peerIdStr)) { + this.outbound.set(peerIdStr, direction === 'outbound') } - this.outbound.set(p.id.toB58String(), outbound) - return p + return peerStreams } /** * Removes a peer from the router - * @override */ - _removePeer(peerId: PeerId): PeerStreams | undefined { - const peerStreams = super._removePeer(peerId) + private removePeer(peerId: PeerId): PeerStreams | undefined { const id = peerId.toB58String() + const peerStreams = this.peers.get(id) + + if (peerStreams != null) { + this.metrics?.peersPerProtocol.inc({ protocol: peerStreams.protocol }, -1) + + // delete peer streams. Must delete first to prevent re-entracy loop in .close() + this.log('delete peer %s', id) + this.peers.delete(id) + + // close peer streams + peerStreams.close() + + // remove peer from topics map + for (const peers of this.topics.values()) { + peers.delete(id) + } + } // Remove this peer from the mesh // eslint-disable-next-line no-unused-vars - for (const peers of this.mesh.values()) { - peers.delete(id) + for (const [topicStr, peers] of this.mesh) { + if (peers.delete(id) === true) { + this.metrics?.onRemoveFromMesh(topicStr, ChurnReason.Dc, 1) + } } // Remove this peer from the fanout @@ -368,6 +663,8 @@ export default class Gossipsub extends Pubsub { peers.delete(id) } + // Remove from floodsubPeers + this.floodsubPeers.delete(id) // Remove from gossip mapping this.gossip.delete(id) // Remove from control mapping @@ -383,81 +680,373 @@ export default class Gossipsub extends Pubsub { return peerStreams } + // API METHODS + + get started(): boolean { + return this.status.code === GossipStatusCode.started + } + + /** + * Get a the peer-ids in a topic mesh + */ + getMeshPeers(topic: TopicStr): PeerIdStr[] { + const peersInTopic = this.mesh.get(topic) + return peersInTopic ? Array.from(peersInTopic) : [] + } + + /** + * Get a list of the peer-ids that are subscribed to one topic. + */ + getSubscribers(topic: TopicStr): PeerIdStr[] { + const peersInTopic = this.topics.get(topic) + return peersInTopic ? Array.from(peersInTopic) : [] + } + + /** + * Get the list of topics which the peer is subscribed to. + */ + getTopics(): TopicStr[] { + return Array.from(this.subscriptions) + } + + // TODO: Reviewing Pubsub API + + // MESSAGE METHODS + + /** + * Responsible for processing each RPC message received by other peers. + */ + async pipePeerReadStream(peerId: PeerId, stream: AsyncIterable): Promise { + try { + await pipe(stream, async (source) => { + for await (const data of source) { + try { + // TODO: Check max gossip message size, before decodeRpc() + + // Note: `stream` maybe a BufferList which requires calling .slice to concat all the chunks into + // a single Buffer instance that protobuf js can deal with. + // Otherwise it will throw: + // ``` + // Error: illegal buffer + // at create_typed_array (js-libp2p-gossipsub/node_modules/protobufjs/src/reader.js:47:15) + const rpcBytes = data instanceof Uint8Array ? data : data.slice() + + // Note: This function may throw, it must be wrapped in a try {} catch {} to prevent closing the stream. + // TODO: What should we do if the entire RPC is invalid? + const rpc = RPC.decode(rpcBytes) + + this.metrics?.onRpcRecv(rpc, rpcBytes.length) + + // Since processRpc may be overridden entirely in unsafe ways, + // the simplest/safest option here is to wrap in a function and capture all errors + // to prevent a top-level unhandled exception + // This processing of rpc messages should happen without awaiting full validation/execution of prior messages + if (this.opts.awaitRpcHandler) { + await this.handleReceivedRpc(peerId, rpc) + } else { + this.handleReceivedRpc(peerId, rpc).catch((err) => this.log(err)) + } + } catch (e) { + this.log(e as Error) + } + } + }) + } catch (err) { + this.onPeerDisconnected(peerId, err as Error) + } + } + /** * Handles an rpc request from a peer - * - * @override */ - async _processRpc(id: string, peerStreams: PeerStreams, rpc: RPC): Promise { - if (await super._processRpc(id, peerStreams, rpc)) { - if (rpc.control) { - await this._processRpcControlMessage(id, rpc.control) + async handleReceivedRpc(from: PeerId, rpc: IRPC): Promise { + // Check if peer is graylisted in which case we ignore the event + if (!this.acceptFrom(from.toB58String())) { + this.log('received message from unacceptable peer %s', from.toB58String()) + this.metrics?.rpcRecvNotAccepted.inc() + return + } + + this.log('rpc from %s', from.toB58String()) + + // Handle received subscriptions + if (rpc.subscriptions && rpc.subscriptions.length > 0) { + // update peer subscriptions + rpc.subscriptions.forEach((subOpt) => { + this.handleReceivedSubscription(from, subOpt) + }) + + this.emit('pubsub:subscription-change', from, rpc.subscriptions) + } + + // Handle messages + // TODO: (up to limit) + if (rpc.messages) { + for (const message of rpc.messages) { + const handleReceivedMessagePromise = this.handleReceivedMessage(from, message) + // Should never throw, but handle just in case + .catch((err) => this.log(err)) + + if (this.opts.awaitRpcMessageHandler) { + await handleReceivedMessagePromise + } } - return true } - return false + + // Handle control messages + if (rpc.control) { + await this.handleControlMessage(from.toB58String(), rpc.control) + } } /** - * Handles an rpc control message from a peer + * Handles a subscription change from a peer */ - async _processRpcControlMessage(id: string, controlMsg: RPC.IControlMessage): Promise { - if (controlMsg === undefined) { + handleReceivedSubscription(from: PeerId, subOpt: RPC.ISubOpts): void { + if (subOpt.topicID == null) { return } - const iwant = controlMsg.ihave ? this._handleIHave(id, controlMsg.ihave) : [] - const ihave = controlMsg.iwant ? this._handleIWant(id, controlMsg.iwant) : [] - const prune = controlMsg.graft ? await this._handleGraft(id, controlMsg.graft) : [] - controlMsg.prune && this._handlePrune(id, controlMsg.prune) + this.log('subscription update from %s topic %s', from.toB58String(), subOpt.topicID) - if (!iwant.length && !ihave.length && !prune.length) { - return + let topicSet = this.topics.get(subOpt.topicID) + if (topicSet == null) { + topicSet = new Set() + this.topics.set(subOpt.topicID, topicSet) } - const outRpc = createGossipRpc(ihave, { iwant, prune }) - this._sendRpc(id, outRpc) + if (subOpt.subscribe) { + // subscribe peer to new topic + topicSet.add(from.toB58String()) + } else { + // unsubscribe from existing topic + topicSet.delete(from.toB58String()) + } + + // TODO: rust-libp2p has A LOT more logic here } /** - * Process incoming message, - * emitting locally and forwarding on to relevant floodsub and gossipsub peers - * @override + * Handles a newly received message from an RPC. + * May forward to all peers in the mesh. */ - async _processRpcMessage(msg: InMessage): Promise { - let canonicalMsgIdStr - if (this.getFastMsgIdStr && this.fastMsgIdCache) { - // check duplicate - const fastMsgIdStr = this.getFastMsgIdStr(msg) - canonicalMsgIdStr = this.fastMsgIdCache.get(fastMsgIdStr) - if (canonicalMsgIdStr !== undefined) { - this.score.duplicateMessage(msg, canonicalMsgIdStr) + async handleReceivedMessage(from: PeerId, rpcMsg: RPC.IMessage): Promise { + this.metrics?.onMsgRecvPreValidation(rpcMsg.topic) + + const validationResult = await this.validateReceivedMessage(from, rpcMsg) + + this.metrics?.onMsgRecvResult(rpcMsg.topic, validationResult.code) + + switch (validationResult.code) { + case MessageStatus.duplicate: + // Report the duplicate + this.score.duplicateMessage(from.toB58String(), validationResult.msgId, rpcMsg.topic) + this.mcache.observeDuplicate(validationResult.msgId, from.toB58String()) + return + + case MessageStatus.invalid: + // invalid messages received + // metrics.register_invalid_message(&raw_message.topic) + // Tell peer_score about reject + // Reject the original source, and any duplicates we've seen from other peers. + if (validationResult.msgId) { + this.score.rejectMessage(from.toB58String(), validationResult.msgId, rpcMsg.topic, validationResult.reason) + this.gossipTracer.rejectMessage(validationResult.msgId, validationResult.reason) + } else { + this.score.rejectInvalidMessage(from.toB58String(), rpcMsg.topic) + } + + this.metrics?.onMsgRecvInvalid(rpcMsg.topic, validationResult) return + + case MessageStatus.valid: { + const { msgIdStr, msg } = validationResult + // Tells score that message arrived (but is maybe not fully validated yet). + // Consider the message as delivered for gossip promises. + this.score.validateMessage(msgIdStr) + this.gossipTracer.deliverMessage(msgIdStr) + + // Add the message to our memcache + this.mcache.put(msgIdStr, rpcMsg) + + // Dispatch the message to the user if we are subscribed to the topic + if (this.subscriptions.has(rpcMsg.topic)) { + const isFromSelf = this.peerId !== undefined && this.peerId.equals(from) + + if (!isFromSelf || this.opts.emitSelf) { + super.emit('gossipsub:message', { + propagationSource: from, + msgId: msgIdStr, + msg + }) + // TODO: Add option to switch between emit per topic or all messages in one + super.emit(rpcMsg.topic, msg) + } + } + + // Forward the message to mesh peers, if no validation is required + // If asyncValidation is ON, expect the app layer to call reportMessageValidationResult(), then forward + if (!this.opts.asyncValidation) { + // TODO: in rust-libp2p + // .forward_msg(&msg_id, raw_message, Some(propagation_source)) + this.forwardMessage(msgIdStr, rpcMsg, from.toB58String()) + } } - canonicalMsgIdStr = messageIdToString(await this.getMsgId(msg)) + } + } + + // # Ethereum consensus message-id function + // + // ## phase0 + // + // The message-id of a gossipsub message MUST be the following 20 byte value computed from the message data: + // + // - If message.data has a valid snappy decompression, set message-id to the first 20 bytes of the SHA256 hash of + // the concatenation of MESSAGE_DOMAIN_VALID_SNAPPY with the snappy decompressed message data, + // i.e. SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + snappy_decompress(message.data))[:20]. + // + // - Otherwise, set message-id to the first 20 bytes of the SHA256 hash of the concatenation of + // MESSAGE_DOMAIN_INVALID_SNAPPY with the raw message data, + // i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]. + // + // ## altair + // + // The derivation of the message-id has changed starting with Altair to incorporate the message topic along with the + // message data. These are fields of the Message Protobuf, and interpreted as empty byte strings if missing. The + // message-id MUST be the following 20 byte value computed from the message: + // + // - If message.data has a valid snappy decompression, set message-id to the first 20 bytes of the SHA256 hash of + // the concatenation of the following data: MESSAGE_DOMAIN_VALID_SNAPPY, the length of the topic byte string + // (encoded as little-endian uint64), the topic byte string, and the snappy decompressed message data: + // i.e. SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data))[:20]. + // + // - Otherwise, set message-id to the first 20 bytes of the SHA256 hash of the concatenation of the following data: + // MESSAGE_DOMAIN_INVALID_SNAPPY, the length of the topic byte string (encoded as little-endian uint64),the topic + // byte string, and the raw message data: + // i.e. SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20]. + + /** + * Handles a newly received message from an RPC. + * May forward to all peers in the mesh. + */ + async validateReceivedMessage(propagationSource: PeerId, rpcMsg: RPC.IMessage): Promise { + this.metrics?.onMsgRecvPreValidation(rpcMsg.topic) + + // Fast message ID stuff + const fastMsgIdStr = this.fastMsgIdFn?.(rpcMsg) + const msgIdCached = fastMsgIdStr && this.fastMsgIdCache?.get(fastMsgIdStr) + + if (msgIdCached) { + // This message has been seen previously. Ignore it + return { code: MessageStatus.duplicate, msgId: msgIdCached } + } + + // Perform basic validation on message and convert to RawGossipsubMessage for fastMsgIdFn() + const validationResult = await validateToRawMessage(this.globalSignaturePolicy, rpcMsg) + + if (!validationResult.valid) { + return { code: MessageStatus.invalid, reason: RejectReason.Error, error: validationResult.error } + } + + // Try and perform the data transform to the message. If it fails, consider it invalid. + let data: Uint8Array + try { + const transformedData = rpcMsg.data ?? new Uint8Array(0) + data = this.dataTransform ? this.dataTransform.inboundTransform(rpcMsg.topic, transformedData) : transformedData + } catch (e) { + this.log('Invalid message, transform failed', e) + return { code: MessageStatus.invalid, reason: RejectReason.Error, error: ValidateError.TransformFailed } + } + + const msg: GossipsubMessage = { + from: rpcMsg.from === null ? undefined : rpcMsg.from, + data: data, + seqno: rpcMsg.seqno === null ? undefined : rpcMsg.seqno, + topic: rpcMsg.topic + } + + // TODO: Check if message is from a blacklisted source or propagation origin + // - Reject any message from a blacklisted peer + // - Also reject any message that originated from a blacklisted peer + // - reject messages claiming to be from ourselves but not locally published + + // Calculate the message id on the transformed data. + const msgIdStr = msgIdCached ?? messageIdToString(await this.msgIdFn(msg)) - this.fastMsgIdCache.put(fastMsgIdStr, canonicalMsgIdStr) + // Add the message to the duplicate caches + if (fastMsgIdStr) this.fastMsgIdCache?.put(fastMsgIdStr, msgIdStr) + + if (this.seenCache.has(msgIdStr)) { + return { code: MessageStatus.duplicate, msgId: msgIdStr } } else { - // check duplicate - canonicalMsgIdStr = messageIdToString(await this.getMsgId(msg)) - if (this.seenCache.has(canonicalMsgIdStr)) { - this.score.duplicateMessage(msg, canonicalMsgIdStr) - return + this.seenCache.put(msgIdStr) + } + + // (Optional) Provide custom validation here with dynamic validators per topic + // NOTE: This custom topicValidator() must resolve fast (< 100ms) to allow scores + // to not penalize peers for long validation times. + const topicValidator = this.topicValidators.get(rpcMsg.topic) + if (topicValidator != null) { + let acceptance: MessageAcceptance + // Use try {} catch {} in case topicValidator() is syncronous + try { + acceptance = await topicValidator(msg.topic, msg, propagationSource) + } catch (e) { + const errCode = (e as { code: string }).code + if (errCode === constants.ERR_TOPIC_VALIDATOR_IGNORE) acceptance = MessageAcceptance.Ignore + if (errCode === constants.ERR_TOPIC_VALIDATOR_REJECT) acceptance = MessageAcceptance.Reject + else acceptance = MessageAcceptance.Ignore } + + if (acceptance !== MessageAcceptance.Accept) { + return { code: MessageStatus.invalid, reason: rejectReasonFromAcceptance(acceptance), msgId: msgIdStr } + } + } + + return { code: MessageStatus.valid, msgIdStr, msg } + } + + /** + * Return score of a peer. + */ + getScore(peerId: PeerIdStr): number { + return this.score.score(peerId) + } + + /** + * Send an rpc object to a peer with subscriptions + */ + private sendSubscriptions(toPeer: PeerIdStr, topics: string[], subscribe: boolean): void { + this.sendRpc(toPeer, { + subscriptions: topics.map((topic) => ({ topicID: topic, subscribe })), + messages: [] + }) + } + + /** + * Handles an rpc control message from a peer + */ + private async handleControlMessage(id: PeerIdStr, controlMsg: RPC.IControlMessage): Promise { + if (controlMsg === undefined) { + return } - // put in cache - this.seenCache.put(canonicalMsgIdStr) + const iwant = controlMsg.ihave ? this.handleIHave(id, controlMsg.ihave) : [] + const ihave = controlMsg.iwant ? this.handleIWant(id, controlMsg.iwant) : [] + const prune = controlMsg.graft ? await this.handleGraft(id, controlMsg.graft) : [] + controlMsg.prune && this.handlePrune(id, controlMsg.prune) - await this.score.validateMessage(canonicalMsgIdStr) - await super._processRpcMessage(msg) + if (!iwant.length && !ihave.length && !prune.length) { + return + } + + this.sendRpc(id, createGossipRpc(ihave, { iwant, prune })) } /** * Whether to accept a message from a peer - * @override */ - _acceptFrom(id: string): boolean { + private acceptFrom(id: PeerIdStr): boolean { if (this.direct.has(id)) { return true } @@ -482,35 +1071,22 @@ export default class Gossipsub extends Pubsub { this.acceptFromWhitelist.delete(id) } - return score >= this._options.scoreThresholds.graylistThreshold - } - - /** - * Validate incoming message - * @override - */ - async validate(msg: InMessage): Promise { - try { - await super.validate(msg) - } catch (e) { - const canonicalMsgIdStr = await this.getCanonicalMsgIdStr(msg) - this.score.rejectMessage(msg, canonicalMsgIdStr, e.code) - this.gossipTracer.rejectMessage(canonicalMsgIdStr, e.code) - throw e - } + return score >= this.opts.scoreThresholds.graylistThreshold } /** * Handles IHAVE messages */ - _handleIHave(id: string, ihave: RPC.IControlIHave[]): RPC.IControlIWant[] { + private handleIHave(id: PeerIdStr, ihave: RPC.IControlIHave[]): RPC.IControlIWant[] { if (!ihave.length) { return [] } + // we ignore IHAVE gossip from any peer whose score is below the gossips threshold const score = this.score.score(id) - if (score < this._options.scoreThresholds.gossipThreshold) { + if (score < this.opts.scoreThresholds.gossipThreshold) { this.log('IHAVE: ignoring peer %s with score below threshold [ score = %d ]', id, score) + this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.LowScore }) return [] } @@ -523,30 +1099,36 @@ export default class Gossipsub extends Pubsub { id, peerhave ) + this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIhave }) return [] } const iasked = this.iasked.get(id) ?? 0 if (iasked >= constants.GossipsubMaxIHaveLength) { this.log('IHAVE: peer %s has already advertised too many messages (%d); ignoring', id, iasked) + this.metrics?.ihaveRcvIgnored.inc({ reason: IHaveIgnoreReason.MaxIasked }) return [] } // string msgId => msgId - const iwant = new Map() + const iwant = new Map() ihave.forEach(({ topicID, messageIDs }) => { if (!topicID || !messageIDs || !this.mesh.has(topicID)) { return } + let idonthave = 0 + messageIDs.forEach((msgId) => { const msgIdStr = messageIdToString(msgId) - if (this.seenCache.has(msgIdStr)) { - return + if (!this.seenCache.has(msgIdStr)) { + iwant.set(msgIdStr, msgId) + idonthave++ } - iwant.set(msgIdStr, msgId) }) + + this.metrics?.onIhaveRcv(topicID, messageIDs.length, idonthave) }) if (!iwant.size) { @@ -581,53 +1163,62 @@ export default class Gossipsub extends Pubsub { * Handles IWANT messages * Returns messages to send back to peer */ - _handleIWant(id: string, iwant: RPC.IControlIWant[]): RPC.IMessage[] { + private handleIWant(id: PeerIdStr, iwant: RPC.IControlIWant[]): RPC.IMessage[] { if (!iwant.length) { return [] } + // we don't respond to IWANT requests from any per whose score is below the gossip threshold const score = this.score.score(id) - if (score < this._options.scoreThresholds.gossipThreshold) { + if (score < this.opts.scoreThresholds.gossipThreshold) { this.log('IWANT: ignoring peer %s with score below threshold [score = %d]', id, score) return [] } - // @type {Map} - const ihave = new Map() + + const ihave = new Map() + const iwantByTopic = new Map() + let iwantDonthave = 0 iwant.forEach(({ messageIDs }) => { messageIDs && messageIDs.forEach((msgId) => { const msgIdStr = messageIdToString(msgId) - const [msg, count] = this.messageCache.getForPeer(msgIdStr, id) - if (!msg) { + const entry = this.mcache.getWithIWantCount(msgIdStr, id) + if (!entry) { + iwantDonthave++ return } - if (count > constants.GossipsubGossipRetransmission) { + iwantByTopic.set(entry.msg.topic, 1 + (iwantByTopic.get(entry.msg.topic) ?? 0)) + + if (entry.count > constants.GossipsubGossipRetransmission) { this.log('IWANT: Peer %s has asked for message %s too many times: ignoring request', id, msgId) return } - ihave.set(msgIdStr, msg) + + ihave.set(msgIdStr, entry.msg) }) }) + this.metrics?.onIwantRcv(iwantByTopic, iwantDonthave) + if (!ihave.size) { return [] } this.log('IWANT: Sending %d messages to %s', ihave.size, id) - return Array.from(ihave.values()).map(utils.normalizeOutRpcMessage) + return Array.from(ihave.values()) } /** * Handles Graft messages */ - async _handleGraft(id: string, graft: RPC.IControlGraft[]): Promise { - const prune: string[] = [] + private async handleGraft(id: PeerIdStr, graft: RPC.IControlGraft[]): Promise { + const prune: TopicStr[] = [] const score = this.score.score(id) const now = this._now() - let doPX = this._options.doPX + let doPX = this.opts.doPX graft.forEach(({ topicID }) => { if (!topicID) { @@ -661,17 +1252,17 @@ export default class Gossipsub extends Pubsub { if (typeof expire === 'number' && now < expire) { this.log('GRAFT: ignoring backed off peer %s', id) // add behavioral penalty - this.score.addPenalty(id, 1) + this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff) // no PX doPX = false // check the flood cutoff -- is the GRAFT coming too fast? const floodCutoff = expire + constants.GossipsubGraftFloodThreshold - constants.GossipsubPruneBackoff if (now < floodCutoff) { // extra penalty - this.score.addPenalty(id, 1) + this.score.addPenalty(id, 1, ScorePenalty.GraftBackoff) } // refresh the backoff - this._addBackoff(id, topicID) + this.addBackoff(id, topicID) prune.push(topicID) return } @@ -685,58 +1276,65 @@ export default class Gossipsub extends Pubsub { // but we won't PX to them doPX = false // add/refresh backoff so that we don't reGRAFT too early even if the score decays - this._addBackoff(id, topicID) + this.addBackoff(id, topicID) return } // check the number of mesh peers; if it is at (or over) Dhi, we only accept grafts // from peers with outbound connections; this is a defensive check to restrict potential // mesh takeover attacks combined with love bombing - if (peersInMesh.size >= this._options.Dhi && !this.outbound.get(id)) { + if (peersInMesh.size >= this.opts.Dhi && !this.outbound.get(id)) { prune.push(topicID) - this._addBackoff(id, topicID) + this.addBackoff(id, topicID) return } this.log('GRAFT: Add mesh link from %s in %s', id, topicID) this.score.graft(id, topicID) peersInMesh.add(id) + + this.metrics?.onAddToMesh(topicID, InclusionReason.Subscribed, 1) }) if (!prune.length) { return [] } - return Promise.all(prune.map((topic) => this._makePrune(id, topic, doPX))) + return Promise.all(prune.map((topic) => this.makePrune(id, topic, doPX))) } /** * Handles Prune messages */ - _handlePrune(id: string, prune: RPC.IControlPrune[]): void { + private handlePrune(id: PeerIdStr, prune: RPC.IControlPrune[]): void { const score = this.score.score(id) prune.forEach(({ topicID, backoff, peers }) => { if (!topicID) { return } + const peersInMesh = this.mesh.get(topicID) if (!peersInMesh) { return } + this.log('PRUNE: Remove mesh link to %s in %s', id, topicID) this.score.prune(id, topicID) - peersInMesh.delete(id) + if (peersInMesh.delete(id) === true) { + this.metrics?.onRemoveFromMesh(topicID, ChurnReason.Unsub, 1) + } + // is there a backoff specified by the peer? if so obey it if (typeof backoff === 'number' && backoff > 0) { - this._doAddBackoff(id, topicID, backoff * 1000) + this.doAddBackoff(id, topicID, backoff * 1000) } else { - this._addBackoff(id, topicID) + this.addBackoff(id, topicID) } // PX if (peers && peers.length) { // we ignore PX from peers with insufficient scores - if (score < this._options.scoreThresholds.acceptPXThreshold) { + if (score < this.opts.scoreThresholds.acceptPXThreshold) { this.log( 'PRUNE: ignoring PX from peer %s with insufficient score [score = %d, topic = %s]', id, @@ -745,7 +1343,7 @@ export default class Gossipsub extends Pubsub { ) return } - this._pxConnect(peers) + this.pxConnect(peers) } }) } @@ -753,15 +1351,15 @@ export default class Gossipsub extends Pubsub { /** * Add standard backoff log for a peer in a topic */ - _addBackoff(id: string, topic: string): void { - this._doAddBackoff(id, topic, constants.GossipsubPruneBackoff) + private addBackoff(id: PeerIdStr, topic: TopicStr): void { + this.doAddBackoff(id, topic, constants.GossipsubPruneBackoff) } /** * Add backoff expiry interval for a peer in a topic * @param interval backoff duration in milliseconds */ - _doAddBackoff(id: string, topic: string, interval: number): void { + private doAddBackoff(id: PeerIdStr, topic: TopicStr, interval: number): void { let backoff = this.backoff.get(topic) if (!backoff) { backoff = new Map() @@ -777,17 +1375,17 @@ export default class Gossipsub extends Pubsub { /** * Apply penalties from broken IHAVE/IWANT promises */ - _applyIwantPenalties(): void { + private applyIwantPenalties(): void { this.gossipTracer.getBrokenPromises().forEach((count, p) => { this.log("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count) - this.score.addPenalty(p, count) + this.score.addPenalty(p, count, ScorePenalty.BrokenPromise) }) } /** * Clear expired backoff expiries */ - _clearBackoff(): void { + private clearBackoff(): void { // we only clear once every GossipsubPruneBackoffTicks ticks to avoid iterating over the maps too much if (this.heartbeatTicks % constants.GossipsubPruneBackoffTicks !== 0) { return @@ -809,23 +1407,18 @@ export default class Gossipsub extends Pubsub { /** * Maybe reconnect to direct peers */ - _directConnect(): void { - // we only do this every few ticks to allow pending connections to complete and account for - // restarts/downtime - if (this.heartbeatTicks % constants.GossipsubDirectConnectTicks !== 0) { - return - } - - const toconnect: string[] = [] + private directConnect(): void { + const toconnect: PeerIdStr[] = [] this.direct.forEach((id) => { const peer = this.peers.get(id) if (!peer || !peer.isWritable) { toconnect.push(id) } }) + if (toconnect.length) { toconnect.forEach((id) => { - this._connect(id) + this.connect(id) }) } } @@ -833,12 +1426,12 @@ export default class Gossipsub extends Pubsub { /** * Maybe attempt connection given signed peer records */ - async _pxConnect(peers: RPC.IPeerInfo[]): Promise { + private async pxConnect(peers: RPC.IPeerInfo[]): Promise { if (peers.length > constants.GossipsubPrunePeers) { shuffle(peers) peers = peers.slice(0, constants.GossipsubPrunePeers) } - const toconnect: string[] = [] + const toconnect: PeerIdStr[] = [] await Promise.all( peers.map(async (pi) => { @@ -846,7 +1439,7 @@ export default class Gossipsub extends Pubsub { return } - const p = PeerId.createFromBytes(pi.peerID) + const p = createFromBytes(pi.peerID) const id = p.toB58String() if (this.peers.has(id)) { @@ -883,281 +1476,437 @@ export default class Gossipsub extends Pubsub { return } - toconnect.forEach((id) => this._connect(id)) + toconnect.forEach((id) => this.connect(id)) } /** - * Mounts the gossipsub protocol onto the libp2p node and sends our - * our subscriptions to every peer connected - * @override + * Connect to a peer using the gossipsub protocol */ - async start(): Promise { - await super.start() - this.score.start() - // connect to direct peers - this._directPeerInitial = setTimeout(() => { - this.direct.forEach((id) => { - this._connect(id) - }) - }, constants.GossipsubDirectConnectInitialDelay) - - if (!this._heartbeatTimer) { - const heartbeat = this._heartbeat.bind(this) - - const timeout = setTimeout(() => { - heartbeat() - this._heartbeatTimer!.runPeriodically(heartbeat, this._options.heartbeatInterval) - }, constants.GossipsubHeartbeatInitialDelay) - - this._heartbeatTimer = { - _intervalId: undefined, - runPeriodically: (fn, period) => { - this._heartbeatTimer!._intervalId = setInterval(fn, period) - }, - cancel: () => { - clearTimeout(timeout) - clearInterval(this._heartbeatTimer!._intervalId as NodeJS.Timeout) - } - } - } + private connect(id: PeerIdStr): void { + this.log('Initiating connection with %s', id) + this._libp2p.dialProtocol(createFromB58String(id), this.multicodecs) } /** - * Unmounts the gossipsub protocol and shuts down every connection - * @override + * Subscribes to a topic */ - async stop(): Promise { - await super.stop() - - if (this._heartbeatTimer) { - this._heartbeatTimer.cancel() - this._heartbeatTimer = null + subscribe(topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { + throw new Error('Pubsub has not started') } - this.score.stop() - - this.mesh = new Map() - this.fanout = new Map() - this.lastpub = new Map() - this.gossip = new Map() - this.control = new Map() - this.peerhave = new Map() - this.iasked = new Map() - this.backoff = new Map() - this.outbound = new Map() - this.gossipTracer.clear() - this.seenCache.clear() - if (this.fastMsgIdCache) this.fastMsgIdCache.clear() - clearTimeout(this._directPeerInitial) - } + if (!this.subscriptions.has(topic)) { + this.subscriptions.add(topic) - /** - * Connect to a peer using the gossipsub protocol - */ - _connect(id: string): void { - this.log('Initiating connection with %s', id) - this._libp2p.dialProtocol(PeerId.createFromB58String(id), this.multicodecs) - } + for (const peerId of this.peers.keys()) { + this.sendSubscriptions(peerId, [topic], true) + } + } - /** - * Subscribes to a topic - * @override - */ - subscribe(topic: string): void { - super.subscribe(topic) this.join(topic) } /** * Unsubscribe to a topic - * @override */ - unsubscribe(topic: string): void { - super.unsubscribe(topic) + unsubscribe(topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { + throw new Error('Pubsub is not started') + } + + const wasSubscribed = this.subscriptions.delete(topic) + + this.log('unsubscribe from %s - am subscribed %s', topic, wasSubscribed) + + if (wasSubscribed) { + for (const peerId of this.peers.keys()) { + this.sendSubscriptions(peerId, [topic], false) + } + } + this.leave(topic) } /** * Join topic */ - join(topic: string): void { - if (!this.started) { + join(topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { throw new Error('Gossipsub has not started') } + + // if we are already in the mesh, return + if (this.mesh.has(topic)) { + return + } + this.log('JOIN %s', topic) + this.metrics?.onJoin(topic) + + const toAdd = new Set() + // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, + // removing the fanout entry. const fanoutPeers = this.fanout.get(topic) if (fanoutPeers) { - // these peers have a score above the publish threshold, which may be negative - // so drop the ones with a negative score + // Remove fanout entry and the last published time + this.fanout.delete(topic) + this.fanoutLastpub.delete(topic) + + // remove explicit peers, peers with negative scores, and backoffed peers fanoutPeers.forEach((id) => { - if (this.score.score(id) < 0) { - fanoutPeers.delete(id) + // TODO:rust-libp2p checks `self.backoffs.is_backoff_with_slack()` + if (!this.direct.has(id) && this.score.score(id) >= 0) { + toAdd.add(id) } }) - if (fanoutPeers.size < this._options.D) { - // we need more peers; eager, as this would get fixed in the next heartbeat - this.getGossipPeers(topic, this._options.D - fanoutPeers.size, (id: string): boolean => { - // filter our current peers, direct peers, and peers with negative scores - return !fanoutPeers.has(id) && !this.direct.has(id) && this.score.score(id) >= 0 - }).forEach((id) => fanoutPeers.add(id)) - } - this.mesh.set(topic, fanoutPeers) - this.fanout.delete(topic) - this.lastpub.delete(topic) - } else { - const peers = this.getGossipPeers(topic, this._options.D, (id: string): boolean => { - // filter direct peers and peers with negative score - return !this.direct.has(id) && this.score.score(id) >= 0 + + this.metrics?.onAddToMesh(topic, InclusionReason.Fanout, toAdd.size) + } + + // check if we need to get more peers, which we randomly select + if (toAdd.size < this.opts.D) { + const fanoutCount = toAdd.size + const newPeers = this.getRandomGossipPeers( + topic, + this.opts.D, + (id: PeerIdStr): boolean => + // filter direct peers and peers with negative score + !toAdd.has(id) && !this.direct.has(id) && this.score.score(id) >= 0 + ) + + newPeers.forEach((peer) => { + toAdd.add(peer) }) - this.mesh.set(topic, peers) + + this.metrics?.onAddToMesh(topic, InclusionReason.Random, toAdd.size - fanoutCount) } + + this.mesh.set(topic, toAdd) + this.mesh.get(topic)!.forEach((id) => { this.log('JOIN: Add mesh link to %s in %s', id, topic) - this._sendGraft(id, topic) + this.sendGraft(id, topic) + + // rust-libp2p + // - peer_score.graft() + // - Self::control_pool_add() + // - peer_added_to_mesh() }) } /** * Leave topic */ - leave(topic: string): void { - if (!this.started) { + leave(topic: TopicStr): void { + if (this.status.code !== GossipStatusCode.started) { throw new Error('Gossipsub has not started') } + this.log('LEAVE %s', topic) + this.metrics?.onLeave(topic) // Send PRUNE to mesh peers const meshPeers = this.mesh.get(topic) if (meshPeers) { meshPeers.forEach((id) => { this.log('LEAVE: Remove mesh link to %s in %s', id, topic) - this._sendPrune(id, topic) + this.sendPrune(id, topic) }) this.mesh.delete(topic) } } - /** - * Return the canonical message-id of a message as a string - * - * If a fast message-id is set: Try 1. the application cache 2. the fast cache 3. `getMsgId()` - * If a fast message-id is NOT set: Just `getMsgId()` - */ - async getCanonicalMsgIdStr(msg: InMessage): Promise { - return this.fastMsgIdCache && this.getFastMsgIdStr - ? this.getCachedMsgIdStr(msg) ?? - this.fastMsgIdCache.get(this.getFastMsgIdStr(msg)) ?? - messageIdToString(await this.getMsgId(msg)) - : messageIdToString(await this.getMsgId(msg)) - } + private selectPeersToForward(topic: TopicStr, propagationSource?: PeerIdStr, excludePeers?: Set) { + const tosend = new Set() - /** - * An application should override this function to return its cached message id string without computing it. - * Return undefined if message id is not found. - * If a fast message id function is not defined, this function is ignored. - */ - getCachedMsgIdStr(msg: InMessage): string | undefined { - return undefined - } + // Add explicit peers + const peersInTopic = this.topics.get(topic) + if (peersInTopic) { + this.direct.forEach((peer) => { + if (peersInTopic.has(peer) && propagationSource !== peer && !excludePeers?.has(peer)) { + tosend.add(peer) + } + }) - /** - * Publish messages - * - * @override - */ - async _publish(msg: InMessage): Promise { - const msgIdStr = await this.getCanonicalMsgIdStr(msg) - if (msg.receivedFrom !== this.peerId.toB58String()) { - this.score.deliverMessage(msg, msgIdStr) - this.gossipTracer.deliverMessage(msgIdStr) + // As of Mar 2022, spec + golang-libp2p include this while rust-libp2p does not + // rust-libp2p: https://github.com/libp2p/rust-libp2p/blob/6cc3b4ec52c922bfcf562a29b5805c3150e37c75/protocols/gossipsub/src/behaviour.rs#L2693 + // spec: https://github.com/libp2p/specs/blob/10712c55ab309086a52eec7d25f294df4fa96528/pubsub/gossipsub/gossipsub-v1.0.md?plain=1#L361 + this.floodsubPeers.forEach((peer) => { + if ( + peersInTopic.has(peer) && + propagationSource !== peer && + !excludePeers?.has(peer) && + this.score.score(peer) >= this.opts.scoreThresholds.publishThreshold + ) { + tosend.add(peer) + } + }) } - // put in seen cache - this.seenCache.put(msgIdStr) + // add mesh peers + const meshPeers = this.mesh.get(topic) + if (meshPeers && meshPeers.size > 0) { + meshPeers.forEach((peer) => { + if (propagationSource !== peer && !excludePeers?.has(peer)) { + tosend.add(peer) + } + }) + } - this.messageCache.put(msg, msgIdStr) + return tosend + } - const tosend = new Set() - msg.topicIDs.forEach((topic) => { - const peersInTopic = this.topics.get(topic) - if (!peersInTopic) { - return - } + private selectPeersToPublish(topic: TopicStr): { + tosend: Set + tosendCount: ToSendGroupCount + } { + const tosend = new Set() + const tosendCount: ToSendGroupCount = { + direct: 0, + floodsub: 0, + mesh: 0, + fanout: 0 + } - if (this._options.floodPublish && msg.receivedFrom === this.peerId.toB58String()) { - // flood-publish behavior - // send to direct peers and _all_ peers meeting the publishThreshold + const peersInTopic = this.topics.get(topic) + if (peersInTopic) { + // flood-publish behavior + // send to direct peers and _all_ peers meeting the publishThreshold + if (this.opts.floodPublish) { peersInTopic.forEach((id) => { - if (this.direct.has(id) || this.score.score(id) >= this._options.scoreThresholds.publishThreshold) { + if (this.direct.has(id)) { tosend.add(id) + tosendCount.direct++ + } else if (this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) { + tosend.add(id) + tosendCount.floodsub++ } }) - } else { - // non-flood-publish behavior - // send to direct peers, subscribed floodsub peers - // and some mesh peers above publishThreshold + } - // direct peers + // non-flood-publish behavior + // send to direct peers, subscribed floodsub peers + // and some mesh peers above publishThreshold + else { + // direct peers (if subscribed) this.direct.forEach((id) => { - tosend.add(id) + if (peersInTopic.has(id)) { + tosend.add(id) + tosendCount.direct++ + } }) // floodsub peers - peersInTopic.forEach((id) => { - const peerStreams = this.peers.get(id) - if (!peerStreams) { - return - } - if ( - peerStreams.protocol === constants.FloodsubID && - this.score.score(id) >= this._options.scoreThresholds.publishThreshold - ) { + // Note: if there are no floodsub peers, we save a loop through peersInTopic Map + this.floodsubPeers.forEach((id) => { + if (peersInTopic.has(id) && this.score.score(id) >= this.opts.scoreThresholds.publishThreshold) { tosend.add(id) + tosendCount.floodsub++ } }) // Gossipsub peers handling - let meshPeers = this.mesh.get(topic) - if (!meshPeers || !meshPeers.size) { - // We are not in the mesh for topic, use fanout peers - meshPeers = this.fanout.get(topic) - if (!meshPeers) { + const meshPeers = this.mesh.get(topic) + if (meshPeers && meshPeers.size > 0) { + meshPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.mesh++ + }) + } + + // We are not in the mesh for topic, use fanout peers + else { + const fanoutPeers = this.fanout.get(topic) + if (fanoutPeers && fanoutPeers.size > 0) { + fanoutPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.fanout++ + }) + } + + // We have no fanout peers, select mesh_n of them and add them to the fanout + else { // If we are not in the fanout, then pick peers in topic above the publishThreshold - const peers = this.getGossipPeers(topic, this._options.D, (id) => { - return this.score.score(id) >= this._options.scoreThresholds.publishThreshold + const newFanoutPeers = this.getRandomGossipPeers(topic, this.opts.D, (id) => { + return this.score.score(id) >= this.opts.scoreThresholds.publishThreshold }) - if (peers.size > 0) { - meshPeers = peers - this.fanout.set(topic, peers) - } else { - meshPeers = new Set() + if (newFanoutPeers.size > 0) { + this.fanout.set(topic, newFanoutPeers) + + newFanoutPeers.forEach((peer) => { + tosend.add(peer) + tosendCount.fanout++ + }) } } - // Store the latest publishing time - this.lastpub.set(topic, this._now()) - } - meshPeers!.forEach((peer) => { - tosend.add(peer) - }) + // We are publishing to fanout peers - update the time we published + this.fanoutLastpub.set(topic, this._now()) + } } + } + + return { tosend, tosendCount } + } + + /** + * Forwards a message from our peers. + * + * For messages published by us (the app layer), this class uses `publish` + */ + private forwardMessage( + msgIdStr: string, + rawMsg: RPC.IMessage, + propagationSource?: PeerIdStr, + excludePeers?: Set + ): void { + // message is fully validated inform peer_score + if (propagationSource) { + this.score.deliverMessage(propagationSource, msgIdStr, rawMsg.topic) + } + + const tosend = this.selectPeersToForward(rawMsg.topic, propagationSource, excludePeers) + + // Note: Don't throw if tosend is empty, we can have a mesh with a single peer + + // forward the message to peers + const rpc = createGossipRpc([rawMsg]) + tosend.forEach((id) => { + // self.send_message(*peer_id, event.clone())?; + this.sendRpc(id, rpc) }) - // Publish messages to peers - const rpc = createGossipRpc([utils.normalizeOutRpcMessage(msg)]) + + this.metrics?.onForwardMsg(rawMsg.topic, tosend.size) + } + + /** + * App layer publishes a message to peers, return number of peers this message is published to + * Note: `async` due to crypto only if `StrictSign`, otherwise it's a sync fn. + * + * For messages not from us, this class uses `forwardMessage`. + */ + async publish(topic: TopicStr, data: Uint8Array): Promise { + const transformedData = this.dataTransform ? this.dataTransform.outboundTransform(topic, data) : data + + // Prepare raw message with user's publishConfig + const rawMsg = await buildRawMessage(this.publishConfig, topic, transformedData) + + // calculate the message id from the un-transformed data + const msg: GossipsubMessage = { + from: rawMsg.from === null ? undefined : rawMsg.from, + data, // the uncompressed form + seqno: rawMsg.seqno === null ? undefined : rawMsg.seqno, + topic + } + const msgId = await this.msgIdFn(msg) + const msgIdStr = messageIdToString(msgId) + + if (this.seenCache.has(msgIdStr)) { + // This message has already been seen. We don't re-publish messages that have already + // been published on the network. + throw Error('PublishError.Duplicate') + } + + const { tosend, tosendCount } = this.selectPeersToPublish(rawMsg.topic) + + if (tosend.size === 0 && !this.opts.allowPublishToZeroPeers) { + throw Error('PublishError.InsufficientPeers') + } + + // If the message isn't a duplicate and we have sent it to some peers add it to the + // duplicate cache and memcache. + this.seenCache.put(msgIdStr) + this.mcache.put(msgIdStr, rawMsg) + + // If the message is anonymous or has a random author add it to the published message ids cache. + this.publishedMessageIds.put(msgIdStr) + + // Send to set of peers aggregated from direct, mesh, fanout + const rpc = createGossipRpc([rawMsg]) tosend.forEach((id) => { - if (id === msg.receivedFrom || id === msg.from) { - return - } - this._sendRpc(id, rpc) + // self.send_message(*peer_id, event.clone())?; + this.sendRpc(id, rpc) }) + + this.metrics?.onPublishMsg(topic, tosendCount, tosend.size, rawMsg.data ? rawMsg.data.length : 0) + + // Dispatch the message to the user if we are subscribed to the topic + if (this.opts.emitSelf && this.subscriptions.has(topic)) { + super.emit('gossipsub:message', { + propagationSource: this.peerId.toB58String(), + msgId: msgIdStr, + msg + }) + // TODO: Add option to switch between emit per topic or all messages in one + super.emit(topic, msg) + } + + return tosend.size + } + + /// This function should be called when [`GossipsubConfig::validate_messages()`] is `true` after + /// the message got validated by the caller. Messages are stored in the ['Memcache'] and + /// validation is expected to be fast enough that the messages should still exist in the cache. + /// There are three possible validation outcomes and the outcome is given in acceptance. + /// + /// If acceptance = [`MessageAcceptance::Accept`] the message will get propagated to the + /// network. The `propagation_source` parameter indicates who the message was received by and + /// will not be forwarded back to that peer. + /// + /// If acceptance = [`MessageAcceptance::Reject`] the message will be deleted from the memcache + /// and the P₄ penalty will be applied to the `propagation_source`. + // + /// If acceptance = [`MessageAcceptance::Ignore`] the message will be deleted from the memcache + /// but no P₄ penalty will be applied. + /// + /// This function will return true if the message was found in the cache and false if was not + /// in the cache anymore. + /// + /// This should only be called once per message. + reportMessageValidationResult(msgId: MsgIdStr, propagationSource: PeerId, acceptance: MessageAcceptance): void { + if (acceptance === MessageAcceptance.Accept) { + const cacheEntry = this.mcache.validate(msgId) + this.metrics?.onReportValidationMcacheHit(cacheEntry !== null) + + if (cacheEntry) { + const { message: rawMsg, originatingPeers } = cacheEntry + // message is fully validated inform peer_score + this.score.deliverMessage(propagationSource.toB58String(), msgId, rawMsg.topic) + + this.forwardMessage(msgId, cacheEntry.message, propagationSource.toB58String(), originatingPeers) + this.metrics?.onReportValidation(rawMsg.topic, acceptance) + } + // else, Message not in cache. Ignoring forwarding + } + + // Not valid + else { + const cacheEntry = this.mcache.remove(msgId) + this.metrics?.onReportValidationMcacheHit(cacheEntry !== null) + + if (cacheEntry) { + const rejectReason = rejectReasonFromAcceptance(acceptance) + const { message: rawMsg, originatingPeers } = cacheEntry + + // Tell peer_score about reject + // Reject the original source, and any duplicates we've seen from other peers. + this.score.rejectMessage(propagationSource.toB58String(), msgId, rawMsg.topic, rejectReason) + for (const peer of originatingPeers) { + this.score.rejectMessage(peer, msgId, rawMsg.topic, rejectReason) + } + + this.metrics?.onReportValidation(rawMsg.topic, acceptance) + } + // else, Message not in cache. Ignoring forwarding + } } /** * Sends a GRAFT message to a peer */ - _sendGraft(id: string, topic: string): void { + private sendGraft(id: PeerIdStr, topic: string): void { const graft = [ { topicID: topic @@ -1165,46 +1914,50 @@ export default class Gossipsub extends Pubsub { ] const out = createGossipRpc([], { graft }) - this._sendRpc(id, out) + this.sendRpc(id, out) } /** * Sends a PRUNE message to a peer */ - async _sendPrune(id: string, topic: string): Promise { - const prune = [await this._makePrune(id, topic, this._options.doPX)] + private async sendPrune(id: PeerIdStr, topic: string): Promise { + const prune = [await this.makePrune(id, topic, this.opts.doPX)] const out = createGossipRpc([], { prune }) - this._sendRpc(id, out) + this.sendRpc(id, out) } /** - * @override + * Send an rpc object to a peer */ - _sendRpc(id: string, outRpc: IRPC): void { + private sendRpc(id: PeerIdStr, rpc: IRPC): void { const peerStreams = this.peers.get(id) if (!peerStreams || !peerStreams.isWritable) { + this.log(`Cannot send RPC to ${id} as there is no open stream to it available`) return } // piggyback control message retries const ctrl = this.control.get(id) if (ctrl) { - this._piggybackControl(id, outRpc, ctrl) + this.piggybackControl(id, rpc, ctrl) this.control.delete(id) } // piggyback gossip const ihave = this.gossip.get(id) if (ihave) { - this._piggybackGossip(id, outRpc, ihave) + this.piggybackGossip(id, rpc, ihave) this.gossip.delete(id) } - peerStreams.write(RPC.encode(outRpc).finish()) + const rpcBytes = RPC.encode(rpc).finish() + peerStreams.write(rpcBytes) + + this.metrics?.onRpcSent(rpc, rpcBytes.length) } - _piggybackControl(id: string, outRpc: IRPC, ctrl: RPC.IControlMessage): void { + private piggybackControl(id: PeerIdStr, outRpc: IRPC, ctrl: RPC.IControlMessage): void { const tograft = (ctrl.graft || []).filter(({ topicID }) => ((topicID && this.mesh.get(topicID)) || new Set()).has(id) ) @@ -1224,7 +1977,7 @@ export default class Gossipsub extends Pubsub { } } - _piggybackGossip(id: string, outRpc: IRPC, ihave: RPC.IControlIHave[]): void { + private piggybackGossip(id: PeerIdStr, outRpc: IRPC, ihave: RPC.IControlIHave[]): void { if (!outRpc.control) { outRpc.control = { ihave: [], iwant: [], graft: [], prune: [] } } @@ -1236,29 +1989,29 @@ export default class Gossipsub extends Pubsub { * @param tograft peer id => topic[] * @param toprune peer id => topic[] */ - async _sendGraftPrune( + private async sendGraftPrune( tograft: Map, toprune: Map, noPX: Map ): Promise { - const doPX = this._options.doPX + const doPX = this.opts.doPX for (const [id, topics] of tograft) { const graft = topics.map((topicID) => ({ topicID })) let prune: RPC.IControlPrune[] = [] // If a peer also has prunes, process them now const pruning = toprune.get(id) if (pruning) { - prune = await Promise.all(pruning.map((topicID) => this._makePrune(id, topicID, doPX && !noPX.get(id)))) + prune = await Promise.all(pruning.map((topicID) => this.makePrune(id, topicID, doPX && !noPX.get(id)))) toprune.delete(id) } const outRpc = createGossipRpc([], { graft, prune }) - this._sendRpc(id, outRpc) + this.sendRpc(id, outRpc) } for (const [id, topics] of toprune) { - const prune = await Promise.all(topics.map((topicID) => this._makePrune(id, topicID, doPX && !noPX.get(id)))) + const prune = await Promise.all(topics.map((topicID) => this.makePrune(id, topicID, doPX && !noPX.get(id)))) const outRpc = createGossipRpc([], { prune }) - this._sendRpc(id, outRpc) + this.sendRpc(id, outRpc) } } @@ -1266,8 +2019,8 @@ export default class Gossipsub extends Pubsub { * Emits gossip to peers in a particular topic * @param exclude peers to exclude */ - _emitGossip(topic: string, exclude: Set): void { - const messageIDs = this.messageCache.getGossipIDs(topic) + private emitGossip(topic: string, exclude: Set): void { + const messageIDs = this.mcache.getGossipIDs(topic) if (!messageIDs.length) { return } @@ -1300,13 +2053,13 @@ export default class Gossipsub extends Pubsub { !exclude.has(id) && !this.direct.has(id) && hasGossipProtocol(peerStreams.protocol) && - this.score.score(id) >= this._options.scoreThresholds.gossipThreshold + this.score.score(id) >= this.opts.scoreThresholds.gossipThreshold ) { peersToGossip.push(id) } }) - let target = this._options.Dlazy + let target = this.opts.Dlazy const factor = constants.GossipsubGossipFactor * peersToGossip.length if (factor > target) { target = factor @@ -1325,7 +2078,7 @@ export default class Gossipsub extends Pubsub { // coverage when we do truncate peerMessageIDs = shuffle(peerMessageIDs.slice()).slice(0, constants.GossipsubMaxIHaveLength) } - this._pushGossip(id, { + this.pushGossip(id, { topicID: topic, messageIDs: peerMessageIDs }) @@ -1335,25 +2088,23 @@ export default class Gossipsub extends Pubsub { /** * Flush gossip and control messages */ - _flush(): void { + private flush(): void { // send gossip first, which will also piggyback control for (const [peer, ihave] of this.gossip.entries()) { this.gossip.delete(peer) - const out = createGossipRpc([], { ihave }) - this._sendRpc(peer, out) + this.sendRpc(peer, createGossipRpc([], { ihave })) } // send the remaining control messages for (const [peer, control] of this.control.entries()) { this.control.delete(peer) - const out = createGossipRpc([], { graft: control.graft, prune: control.prune }) - this._sendRpc(peer, out) + this.sendRpc(peer, createGossipRpc([], { graft: control.graft, prune: control.prune })) } } /** * Adds new IHAVE messages to pending gossip */ - _pushGossip(id: string, controlIHaveMsgs: RPC.IControlIHave): void { + private pushGossip(id: PeerIdStr, controlIHaveMsgs: RPC.IControlIHave): void { this.log('Add gossip to %s', id) const gossip = this.gossip.get(id) || [] this.gossip.set(id, gossip.concat(controlIHaveMsgs)) @@ -1369,7 +2120,8 @@ export default class Gossipsub extends Pubsub { /** * Make a PRUNE control message for a peer in a topic */ - async _makePrune(id: string, topic: string, doPX: boolean): Promise { + private async makePrune(id: PeerIdStr, topic: string, doPX: boolean): Promise { + this.score.prune(id, topic) if (this.peers.get(id)!.protocol === constants.GossipsubIDv10) { // Gossipsub v1.0 -- no backoff, the peer won't be able to parse it anyway return { @@ -1388,7 +2140,7 @@ export default class Gossipsub extends Pubsub { } } // select peers for Peer eXchange - const peers = this.getGossipPeers(topic, constants.GossipsubPrunePeers, (xid: string): boolean => { + const peers = this.getRandomGossipPeers(topic, constants.GossipsubPrunePeers, (xid) => { return xid !== id && this.score.score(xid) >= 0 }) const px = await Promise.all( @@ -1397,7 +2149,7 @@ export default class Gossipsub extends Pubsub { // the peer ID and let the pruned peer find them in the DHT -- we can't trust // unsigned address records through PX anyways // Finding signed records in the DHT is not supported at the time of writing in js-libp2p - const peerId = PeerId.createFromB58String(p) + const peerId = createFromB58String(p) return { peerID: peerId.toBytes(), signedPeerRecord: await this._libp2p.peerStore.addressBook.getRawEnvelope(peerId) @@ -1411,11 +2163,40 @@ export default class Gossipsub extends Pubsub { } } + private runHeartbeat = () => { + const timer = this.metrics?.heartbeatDuration.startTimer() + try { + this.heartbeat() + } catch (e) { + this.log('Error running heartbeat', e as Error) + } + if (timer) timer() + + // Schedule the next run if still in started status + if (this.status.code === GossipStatusCode.started) { + // Clear previous timeout before overwriting `status.heartbeatTimeout`, it should be completed tho. + clearTimeout(this.status.heartbeatTimeout) + + // NodeJS setInterval function is innexact, calls drift by a few miliseconds on each call. + // To run the heartbeat precisely setTimeout() must be used recomputing the delay on every loop. + let msToNextHeartbeat = (Date.now() - this.status.hearbeatStartMs) % this.opts.heartbeatInterval + + // If too close to next heartbeat, skip one + if (msToNextHeartbeat < this.opts.heartbeatInterval * 0.25) { + msToNextHeartbeat += this.opts.heartbeatInterval + this.metrics?.heartbeatSkipped.inc() + } + + this.status.heartbeatTimeout = setTimeout(this.runHeartbeat, msToNextHeartbeat) + } + } + /** * Maintains the mesh and fanout maps in gossipsub. */ - _heartbeat(): void { - const { D, Dlo, Dhi, Dscore, Dout, fanoutTTL } = this._options + private heartbeat(): void { + const { D, Dlo, Dhi, Dscore, Dout, fanoutTTL } = this.opts + this.heartbeatTicks++ // cache scores throught the heartbeat @@ -1437,29 +2218,39 @@ export default class Gossipsub extends Pubsub { const noPX = new Map() // clean up expired backoffs - this._clearBackoff() + this.clearBackoff() // clean up peerhave/iasked counters this.peerhave.clear() + this.metrics?.cacheSize.set({ cache: 'iasked' }, this.iasked.size) this.iasked.clear() // apply IWANT request penalties - this._applyIwantPenalties() + this.applyIwantPenalties() // ensure direct peers are connected - this._directConnect() + if (this.heartbeatTicks % constants.GossipsubDirectConnectTicks === 0) { + // we only do this every few ticks to allow pending connections to complete and account for restarts/downtime + this.directConnect() + } + + // EXTRA: Prune caches + this.fastMsgIdCache?.prune() + this.seenCache.prune() + this.gossipTracer.prune() + this.publishedMessageIds.prune() // maintain the mesh for topics we have joined this.mesh.forEach((peers, topic) => { // prune/graft helper functions (defined per topic) - const prunePeer = (id: string): void => { + const prunePeer = (id: PeerIdStr, reason: ChurnReason): void => { this.log('HEARTBEAT: Remove mesh link to %s in %s', id, topic) - // update peer score - this.score.prune(id, topic) + // no need to update peer score here as we do it in makePrune // add prune backoff record - this._addBackoff(id, topic) + this.addBackoff(id, topic) // remove peer from mesh peers.delete(id) + this.metrics?.onRemoveFromMesh(topic, reason, 1) // add to toprune const topics = toprune.get(id) if (!topics) { @@ -1468,12 +2259,14 @@ export default class Gossipsub extends Pubsub { topics.push(topic) } } - const graftPeer = (id: string): void => { + + const graftPeer = (id: PeerIdStr, reason: InclusionReason): void => { this.log('HEARTBEAT: Add mesh link to %s in %s', id, topic) // update peer score this.score.graft(id, topic) // add peer to mesh peers.add(id) + this.metrics?.onAddToMesh(topic, reason, 1) // add to tograft const topics = tograft.get(id) if (!topics) { @@ -1486,9 +2279,12 @@ export default class Gossipsub extends Pubsub { // drop all peers with negative score, without PX peers.forEach((id) => { const score = getScore(id) + + // Record the score + if (score < 0) { this.log('HEARTBEAT: Prune peer %s with negative score: score=%d, topic=%s', id, score, topic) - prunePeer(id) + prunePeer(id, ChurnReason.BadScore) noPX.set(id, true) } }) @@ -1497,12 +2293,12 @@ export default class Gossipsub extends Pubsub { if (peers.size < Dlo) { const backoff = this.backoff.get(topic) const ineed = D - peers.size - const peersSet = this.getGossipPeers(topic, ineed, (id) => { + const peersSet = this.getRandomGossipPeers(topic, ineed, (id) => { // filter out mesh peers, direct peers, peers we are backing off, peers with negative score return !peers.has(id) && !this.direct.has(id) && (!backoff || !backoff.has(id)) && getScore(id) >= 0 }) - peersSet.forEach(graftPeer) + peersSet.forEach((p) => graftPeer(p, InclusionReason.NotEnough)) } // do we have to many peers? @@ -1555,7 +2351,7 @@ export default class Gossipsub extends Pubsub { } // prune the excess peers - peersArray.slice(D).forEach(prunePeer) + peersArray.slice(D).forEach((p) => prunePeer(p, ChurnReason.Excess)) } // do we have enough outbound peers? @@ -1572,10 +2368,11 @@ export default class Gossipsub extends Pubsub { if (outbound < Dout) { const ineed = Dout - outbound const backoff = this.backoff.get(topic) - this.getGossipPeers(topic, ineed, (id: string): boolean => { + const newPeers = this.getRandomGossipPeers(topic, ineed, (id: string): boolean => { // filter our current mesh peers, direct peers, peers we are backing off, peers with negative score return !peers.has(id) && !this.direct.has(id) && (!backoff || !backoff.has(id)) && getScore(id) >= 0 - }).forEach(graftPeer) + }) + newPeers.forEach((p) => graftPeer(p, InclusionReason.Outbound)) } } @@ -1594,36 +2391,30 @@ export default class Gossipsub extends Pubsub { const medianScore = getScore(peersList[medianIndex]) // if the median score is below the threshold, select a better peer (if any) and GRAFT - if (medianScore < this._options.scoreThresholds.opportunisticGraftThreshold) { + if (medianScore < this.opts.scoreThresholds.opportunisticGraftThreshold) { const backoff = this.backoff.get(topic) - const peersToGraft = this.getGossipPeers( - topic, - constants.GossipsubOpportunisticGraftPeers, - (id: string): boolean => { - // filter out current mesh peers, direct peers, peers we are backing off, peers below or at threshold - return ( - peers.has(id) && !this.direct.has(id) && (!backoff || !backoff.has(id)) && getScore(id) > medianScore - ) - } - ) + const peersToGraft = this.getRandomGossipPeers(topic, constants.GossipsubOpportunisticGraftPeers, (id) => { + // filter out current mesh peers, direct peers, peers we are backing off, peers below or at threshold + return peers.has(id) && !this.direct.has(id) && (!backoff || !backoff.has(id)) && getScore(id) > medianScore + }) peersToGraft.forEach((id) => { this.log('HEARTBEAT: Opportunistically graft peer %s on topic %s', id, topic) - graftPeer(id) + graftPeer(id, InclusionReason.Opportunistic) }) } } // 2nd arg are mesh peers excluded from gossip. We have already pushed // messages to them, so its redundant to gossip IHAVEs. - this._emitGossip(topic, peers) + this.emitGossip(topic, peers) }) // expire fanout for topics we haven't published to in a while const now = this._now() - this.lastpub.forEach((lastpb, topic) => { + this.fanoutLastpub.forEach((lastpb, topic) => { if (lastpb + fanoutTTL < now) { this.fanout.delete(topic) - this.lastpub.delete(topic) + this.fanoutLastpub.delete(topic) } }) @@ -1632,7 +2423,7 @@ export default class Gossipsub extends Pubsub { // checks whether our peers are still in the topic and have a score above the publish threshold const topicPeers = this.topics.get(topic) fanoutPeers.forEach((id) => { - if (!topicPeers!.has(id) || getScore(id) < this._options.scoreThresholds.publishThreshold) { + if (!topicPeers!.has(id) || getScore(id) < this.opts.scoreThresholds.publishThreshold) { fanoutPeers.delete(id) } }) @@ -1640,12 +2431,10 @@ export default class Gossipsub extends Pubsub { // do we need more peers? if (fanoutPeers.size < D) { const ineed = D - fanoutPeers.size - const peersSet = this.getGossipPeers(topic, ineed, (id: string): boolean => { + const peersSet = this.getRandomGossipPeers(topic, ineed, (id) => { // filter out existing fanout peers, direct peers, and peers with score above the publish threshold return ( - !fanoutPeers.has(id) && - !this.direct.has(id) && - getScore(id) >= this._options.scoreThresholds.publishThreshold + !fanoutPeers.has(id) && !this.direct.has(id) && getScore(id) >= this.opts.scoreThresholds.publishThreshold ) }) peersSet.forEach((id) => { @@ -1655,17 +2444,17 @@ export default class Gossipsub extends Pubsub { // 2nd arg are fanout peers excluded from gossip. // We have already pushed messages to them, so its redundant to gossip IHAVEs - this._emitGossip(topic, fanoutPeers) + this.emitGossip(topic, fanoutPeers) }) // send coalesced GRAFT/PRUNE messages (will piggyback gossip) - this._sendGraftPrune(tograft, toprune, noPX) + this.sendGraftPrune(tograft, toprune, noPX) // flush pending gossip that wasn't piggybacked above - this._flush() + this.flush() // advance the message history window - this.messageCache.shift() + this.mcache.shift() this.emit('gossipsub:heartbeat') } @@ -1676,7 +2465,11 @@ export default class Gossipsub extends Pubsub { * * @param filter a function to filter acceptable peers */ - private getGossipPeers(topic: string, count: number, filter: (id: string) => boolean = () => true): Set { + private getRandomGossipPeers( + topic: string, + count: number, + filter: (id: string) => boolean = () => true + ): Set { const peersInTopic = this.topics.get(topic) if (!peersInTopic) { return new Set() @@ -1703,4 +2496,77 @@ export default class Gossipsub extends Pubsub { return new Set(peers) } + + private onScrapeMetrics(metrics: Metrics): void { + /* Data structure sizes */ + metrics.mcacheSize.set(this.mcache.size) + // Arbitrary size + metrics.cacheSize.set({ cache: 'direct' }, this.direct.size) + metrics.cacheSize.set({ cache: 'seenCache' }, this.seenCache.size) + metrics.cacheSize.set({ cache: 'fastMsgIdCache' }, this.fastMsgIdCache?.size ?? 0) + metrics.cacheSize.set({ cache: 'publishedMessageIds' }, this.publishedMessageIds.size) + metrics.cacheSize.set({ cache: 'mcache' }, this.mcache.size) + metrics.cacheSize.set({ cache: 'score' }, this.score.size) + metrics.cacheSize.set({ cache: 'gossipTracer.promises' }, this.gossipTracer.size) + metrics.cacheSize.set({ cache: 'gossipTracer.requests' }, this.gossipTracer.requestMsByMsgSize) + // Bounded by topic + metrics.cacheSize.set({ cache: 'topics' }, this.topics.size) + metrics.cacheSize.set({ cache: 'subscriptions' }, this.subscriptions.size) + metrics.cacheSize.set({ cache: 'mesh' }, this.mesh.size) + metrics.cacheSize.set({ cache: 'fanout' }, this.fanout.size) + // Bounded by peer + metrics.cacheSize.set({ cache: 'peers' }, this.peers.size) + metrics.cacheSize.set({ cache: 'acceptFromWhitelist' }, this.acceptFromWhitelist.size) + metrics.cacheSize.set({ cache: 'gossip' }, this.gossip.size) + metrics.cacheSize.set({ cache: 'control' }, this.control.size) + metrics.cacheSize.set({ cache: 'peerhave' }, this.peerhave.size) + metrics.cacheSize.set({ cache: 'outbound' }, this.outbound.size) + // 2D nested data structure + let backoffSize = 0 + for (const backoff of this.backoff.values()) { + backoffSize += backoff.size + } + metrics.cacheSize.set({ cache: 'backoff' }, backoffSize) + + // Peer counts + + for (const [topicStr, peers] of this.topics) { + metrics.topicPeersCount.set({ topicStr }, peers.size) + } + + for (const [topicStr, peers] of this.mesh) { + metrics.meshPeerCounts.set({ topicStr }, peers.size) + } + + // Peer scores + + const scores: number[] = [] + const scoreByPeer = new Map() + metrics.behaviourPenalty.reset() + + for (const peerIdStr of this.peers.keys()) { + const score = this.score.score(peerIdStr) + scores.push(score) + scoreByPeer.set(peerIdStr, score) + metrics.behaviourPenalty.observe(this.score.peerStats.get(peerIdStr)?.behaviourPenalty ?? 0) + } + + metrics.registerScores(scores, this.opts.scoreThresholds) + + // Breakdown score per mesh topicLabel + + metrics.registerScorePerMesh(this.mesh, scoreByPeer) + + // Breakdown on each score weight + + const sw = computeAllPeersScoreWeights( + this.peers.keys(), + this.score.peerStats, + this.score.params, + this.score.peerIPs, + metrics.topicStrToLabel + ) + + metrics.registerScoreWeights(sw) + } } diff --git a/ts/interfaces.ts b/ts/interfaces.ts deleted file mode 100644 index d59ab582..00000000 --- a/ts/interfaces.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { InMessage } from 'libp2p-interfaces/src/pubsub' -import { Multiaddr } from 'multiaddr' -import PeerId = require('peer-id') - -export interface AddrInfo { - id: PeerId - addrs: Multiaddr[] -} - -export type MessageIdFunction = (msg: InMessage) => Promise | Uint8Array -export type MessageIdStrFunction = (msg: InMessage) => Promise diff --git a/ts/message-cache.ts b/ts/message-cache.ts index 16756d16..773088c5 100644 --- a/ts/message-cache.ts +++ b/ts/message-cache.ts @@ -1,63 +1,111 @@ -import { InMessage } from 'libp2p-interfaces/src/pubsub' -import { MessageIdFunction } from './interfaces' +import { RPC } from './message/rpc' +import { MsgIdStr, PeerIdStr, TopicStr } from './types' import { messageIdFromString, messageIdToString } from './utils' export interface CacheEntry { msgId: Uint8Array - topics: string[] + topic: TopicStr +} + +type MessageCacheEntry = { + message: RPC.IMessage + /** + * Tracks if the message has been validated by the app layer and thus forwarded + */ + validated: boolean + /** + * Tracks peers that sent this message before it has been validated by the app layer + */ + originatingPeers: Set + /** + * For every message and peer the number of times this peer asked for the message + */ + iwantCounts: Map } export class MessageCache { - msgs = new Map() - peertx = new Map>() + msgs = new Map() + history: CacheEntry[][] = [] - gossip: number - msgIdFn: MessageIdFunction - constructor(gossip: number, history: number) { - for (let i = 0; i < history; i++) { + /** + * Holds history of messages in timebounded history arrays + */ + constructor( + /** + * he number of indices in the cache history used for gossiping. That means that a message + * won't get gossiped anymore when shift got called `gossip` many times after inserting the + * message in the cache. + */ + private readonly gossip: number, + historyCapacity: number + ) { + for (let i = 0; i < historyCapacity; i++) { this.history[i] = [] } + } - this.gossip = gossip + get size(): number { + return this.msgs.size } /** * Adds a message to the current window and the cache + * Returns true if the message is not known and is inserted in the cache */ - async put(msg: InMessage, msgIdStr: string): Promise { - this.msgs.set(msgIdStr, msg) + put(msgIdStr: MsgIdStr, msg: RPC.IMessage): boolean { + // Don't add duplicate entries to the cache. + if (this.msgs.has(msgIdStr)) { + return false + } + + this.msgs.set(msgIdStr, { + message: msg, + validated: false, + originatingPeers: new Set(), + iwantCounts: new Map() + }) + const msgId = messageIdFromString(msgIdStr) - this.history[0].push({ msgId: msgId, topics: msg.topicIDs }) + this.history[0].push({ msgId: msgId, topic: msg.topic }) + + return true + } + + observeDuplicate(msgId: MsgIdStr, fromPeerIdStr: PeerIdStr): void { + const entry = this.msgs.get(msgId) + + if ( + entry && + // if the message is already validated, we don't need to store extra peers sending us + // duplicates as the message has already been forwarded + !entry.validated + ) { + entry.originatingPeers.add(fromPeerIdStr) + } } /** * Retrieves a message from the cache by its ID, if it is still present */ - get(msgId: Uint8Array): InMessage | undefined { - return this.msgs.get(messageIdToString(msgId)) + get(msgId: Uint8Array): RPC.IMessage | undefined { + return this.msgs.get(messageIdToString(msgId))?.message } /** - * Retrieves a message from the cache by its ID, if it is present - * for a specific peer. - * Returns the message and the number of times the peer has requested the message + * Increases the iwant count for the given message by one and returns the message together + * with the iwant if the message exists. */ - getForPeer(msgIdStr: string, p: string): [InMessage | undefined, number] { + getWithIWantCount(msgIdStr: string, p: string): { msg: RPC.IMessage; count: number } | null { const msg = this.msgs.get(msgIdStr) if (!msg) { - return [undefined, 0] + return null } - let peertx = this.peertx.get(msgIdStr) - if (!peertx) { - peertx = new Map() - this.peertx.set(msgIdStr, peertx) - } - const count = (peertx.get(p) ?? 0) + 1 - peertx.set(p, count) + const count = (msg.iwantCounts.get(p) ?? 0) + 1 + msg.iwantCounts.set(p, count) - return [msg, count] + return { msg: msg.message, count } } /** @@ -67,11 +115,8 @@ export class MessageCache { const msgIds: Uint8Array[] = [] for (let i = 0; i < this.gossip; i++) { this.history[i].forEach((entry) => { - for (const t of entry.topics) { - if (t === topic) { - msgIds.push(entry.msgId) - break - } + if (entry.topic === topic) { + msgIds.push(entry.msgId) } }) } @@ -79,6 +124,25 @@ export class MessageCache { return msgIds } + /** + * Gets a message with msgId and tags it as validated. + * This function also returns the known peers that have sent us this message. This is used to + * prevent us sending redundant messages to peers who have already propagated it. + */ + validate(msgId: MsgIdStr): { message: RPC.IMessage; originatingPeers: Set } | null { + const entry = this.msgs.get(msgId) + if (!entry) { + return null + } + + const { message, originatingPeers } = entry + entry.validated = true + // Clear the known peers list (after a message is validated, it is forwarded and we no + // longer need to store the originating peers). + entry.originatingPeers = new Set() + return { message, originatingPeers } + } + /** * Shifts the current window, discarding messages older than this.history.length of the cache */ @@ -87,10 +151,20 @@ export class MessageCache { last.forEach((entry) => { const msgIdStr = messageIdToString(entry.msgId) this.msgs.delete(msgIdStr) - this.peertx.delete(msgIdStr) }) this.history.pop() this.history.unshift([]) } + + remove(msgId: MsgIdStr): MessageCacheEntry | null { + const entry = this.msgs.get(msgId) + if (!entry) { + return null + } + + // Keep the message on the history vector, it will be dropped on a shift() + this.msgs.delete(msgId) + return entry + } } diff --git a/ts/message/rpc.d.ts b/ts/message/rpc.d.ts index c71d632d..c744223d 100644 --- a/ts/message/rpc.d.ts +++ b/ts/message/rpc.d.ts @@ -5,8 +5,8 @@ export interface IRPC { /** RPC subscriptions */ subscriptions?: (RPC.ISubOpts[]|null); - /** RPC msgs */ - msgs?: (RPC.IMessage[]|null); + /** RPC messages */ + messages?: (RPC.IMessage[]|null); /** RPC control */ control?: (RPC.IControlMessage|null); @@ -24,8 +24,8 @@ export class RPC implements IRPC { /** RPC subscriptions. */ public subscriptions: RPC.ISubOpts[]; - /** RPC msgs. */ - public msgs: RPC.IMessage[]; + /** RPC messages. */ + public messages: RPC.IMessage[]; /** RPC control. */ public control?: (RPC.IControlMessage|null); @@ -158,8 +158,8 @@ export namespace RPC { /** Message seqno */ seqno?: (Uint8Array|null); - /** Message topicIDs */ - topicIDs?: (string[]|null); + /** Message topic */ + topic: string; /** Message signature */ signature?: (Uint8Array|null); @@ -186,8 +186,8 @@ export namespace RPC { /** Message seqno. */ public seqno?: (Uint8Array|null); - /** Message topicIDs. */ - public topicIDs: string[]; + /** Message topic. */ + public topic: string; /** Message signature. */ public signature?: (Uint8Array|null); diff --git a/ts/message/rpc.js b/ts/message/rpc.js index 2131f747..e4882166 100644 --- a/ts/message/rpc.js +++ b/ts/message/rpc.js @@ -23,7 +23,7 @@ * @exports IRPC * @interface IRPC * @property {Array.|null} [subscriptions] RPC subscriptions - * @property {Array.|null} [msgs] RPC msgs + * @property {Array.|null} [messages] RPC messages * @property {RPC.IControlMessage|null} [control] RPC control */ @@ -37,7 +37,7 @@ */ function RPC(p) { this.subscriptions = []; - this.msgs = []; + this.messages = []; if (p) for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) if (p[ks[i]] != null) @@ -53,12 +53,12 @@ RPC.prototype.subscriptions = $util.emptyArray; /** - * RPC msgs. - * @member {Array.} msgs + * RPC messages. + * @member {Array.} messages * @memberof RPC * @instance */ - RPC.prototype.msgs = $util.emptyArray; + RPC.prototype.messages = $util.emptyArray; /** * RPC control. @@ -98,9 +98,9 @@ for (var i = 0; i < m.subscriptions.length; ++i) $root.RPC.SubOpts.encode(m.subscriptions[i], w.uint32(10).fork()).ldelim(); } - if (m.msgs != null && m.msgs.length) { - for (var i = 0; i < m.msgs.length; ++i) - $root.RPC.Message.encode(m.msgs[i], w.uint32(18).fork()).ldelim(); + if (m.messages != null && m.messages.length) { + for (var i = 0; i < m.messages.length; ++i) + $root.RPC.Message.encode(m.messages[i], w.uint32(18).fork()).ldelim(); } if (m.control != null && Object.hasOwnProperty.call(m, "control")) $root.RPC.ControlMessage.encode(m.control, w.uint32(26).fork()).ldelim(); @@ -131,9 +131,9 @@ m.subscriptions.push($root.RPC.SubOpts.decode(r, r.uint32())); break; case 2: - if (!(m.msgs && m.msgs.length)) - m.msgs = []; - m.msgs.push($root.RPC.Message.decode(r, r.uint32())); + if (!(m.messages && m.messages.length)) + m.messages = []; + m.messages.push($root.RPC.Message.decode(r, r.uint32())); break; case 3: m.control = $root.RPC.ControlMessage.decode(r, r.uint32()); @@ -168,14 +168,14 @@ m.subscriptions[i] = $root.RPC.SubOpts.fromObject(d.subscriptions[i]); } } - if (d.msgs) { - if (!Array.isArray(d.msgs)) - throw TypeError(".RPC.msgs: array expected"); - m.msgs = []; - for (var i = 0; i < d.msgs.length; ++i) { - if (typeof d.msgs[i] !== "object") - throw TypeError(".RPC.msgs: object expected"); - m.msgs[i] = $root.RPC.Message.fromObject(d.msgs[i]); + if (d.messages) { + if (!Array.isArray(d.messages)) + throw TypeError(".RPC.messages: array expected"); + m.messages = []; + for (var i = 0; i < d.messages.length; ++i) { + if (typeof d.messages[i] !== "object") + throw TypeError(".RPC.messages: object expected"); + m.messages[i] = $root.RPC.Message.fromObject(d.messages[i]); } } if (d.control != null) { @@ -201,7 +201,7 @@ var d = {}; if (o.arrays || o.defaults) { d.subscriptions = []; - d.msgs = []; + d.messages = []; } if (m.subscriptions && m.subscriptions.length) { d.subscriptions = []; @@ -209,10 +209,10 @@ d.subscriptions[j] = $root.RPC.SubOpts.toObject(m.subscriptions[j], o); } } - if (m.msgs && m.msgs.length) { - d.msgs = []; - for (var j = 0; j < m.msgs.length; ++j) { - d.msgs[j] = $root.RPC.Message.toObject(m.msgs[j], o); + if (m.messages && m.messages.length) { + d.messages = []; + for (var j = 0; j < m.messages.length; ++j) { + d.messages[j] = $root.RPC.Message.toObject(m.messages[j], o); } } if (m.control != null && m.hasOwnProperty("control")) { @@ -421,7 +421,7 @@ * @property {Uint8Array|null} [from] Message from * @property {Uint8Array|null} [data] Message data * @property {Uint8Array|null} [seqno] Message seqno - * @property {Array.|null} [topicIDs] Message topicIDs + * @property {string} topic Message topic * @property {Uint8Array|null} [signature] Message signature * @property {Uint8Array|null} [key] Message key */ @@ -435,7 +435,6 @@ * @param {RPC.IMessage=} [p] Properties to set */ function Message(p) { - this.topicIDs = []; if (p) for (var ks = Object.keys(p), i = 0; i < ks.length; ++i) if (p[ks[i]] != null) @@ -467,12 +466,12 @@ Message.prototype.seqno = null; /** - * Message topicIDs. - * @member {Array.} topicIDs + * Message topic. + * @member {string} topic * @memberof RPC.Message * @instance */ - Message.prototype.topicIDs = $util.emptyArray; + Message.prototype.topic = ""; /** * Message signature. @@ -566,10 +565,7 @@ w.uint32(18).bytes(m.data); if (m.seqno != null && Object.hasOwnProperty.call(m, "seqno")) w.uint32(26).bytes(m.seqno); - if (m.topicIDs != null && m.topicIDs.length) { - for (var i = 0; i < m.topicIDs.length; ++i) - w.uint32(34).string(m.topicIDs[i]); - } + w.uint32(34).string(m.topic); if (m.signature != null && Object.hasOwnProperty.call(m, "signature")) w.uint32(42).bytes(m.signature); if (m.key != null && Object.hasOwnProperty.call(m, "key")) @@ -605,9 +601,7 @@ m.seqno = r.bytes(); break; case 4: - if (!(m.topicIDs && m.topicIDs.length)) - m.topicIDs = []; - m.topicIDs.push(r.string()); + m.topic = r.string(); break; case 5: m.signature = r.bytes(); @@ -620,6 +614,8 @@ break; } } + if (!m.hasOwnProperty("topic")) + throw $util.ProtocolError("missing required 'topic'", { instance: m }); return m; }; @@ -653,13 +649,8 @@ else if (d.seqno.length) m.seqno = d.seqno; } - if (d.topicIDs) { - if (!Array.isArray(d.topicIDs)) - throw TypeError(".RPC.Message.topicIDs: array expected"); - m.topicIDs = []; - for (var i = 0; i < d.topicIDs.length; ++i) { - m.topicIDs[i] = String(d.topicIDs[i]); - } + if (d.topic != null) { + m.topic = String(d.topic); } if (d.signature != null) { if (typeof d.signature === "string") @@ -689,8 +680,8 @@ if (!o) o = {}; var d = {}; - if (o.arrays || o.defaults) { - d.topicIDs = []; + if (o.defaults) { + d.topic = ""; } if (m.from != null && m.hasOwnProperty("from")) { d.from = o.bytes === String ? $util.base64.encode(m.from, 0, m.from.length) : o.bytes === Array ? Array.prototype.slice.call(m.from) : m.from; @@ -707,11 +698,8 @@ if (o.oneofs) d._seqno = "seqno"; } - if (m.topicIDs && m.topicIDs.length) { - d.topicIDs = []; - for (var j = 0; j < m.topicIDs.length; ++j) { - d.topicIDs[j] = m.topicIDs[j]; - } + if (m.topic != null && m.hasOwnProperty("topic")) { + d.topic = m.topic; } if (m.signature != null && m.hasOwnProperty("signature")) { d.signature = o.bytes === String ? $util.base64.encode(m.signature, 0, m.signature.length) : o.bytes === Array ? Array.prototype.slice.call(m.signature) : m.signature; diff --git a/ts/message/rpc.proto b/ts/message/rpc.proto index 009d8242..7acf8e40 100644 --- a/ts/message/rpc.proto +++ b/ts/message/rpc.proto @@ -2,7 +2,7 @@ syntax = "proto3"; message RPC { repeated SubOpts subscriptions = 1; - repeated Message msgs = 2; + repeated Message messages = 2; optional ControlMessage control = 3; message SubOpts { @@ -14,7 +14,7 @@ message RPC { optional bytes from = 1; optional bytes data = 2; optional bytes seqno = 3; - repeated string topicIDs = 4; + required string topic = 4; optional bytes signature = 5; optional bytes key = 6; } diff --git a/ts/metrics.ts b/ts/metrics.ts new file mode 100644 index 00000000..6e1373db --- /dev/null +++ b/ts/metrics.ts @@ -0,0 +1,702 @@ +import { IRPC } from './message/rpc' +import { PeerScoreThresholds } from './score/peer-score-thresholds' +import { + MessageAcceptance, + MessageStatus, + PeerIdStr, + RejectReason, + RejectReasonObj, + TopicStr, + ValidateError +} from './types' + +/** Topic label as provided in `topicStrToLabel` */ +export type TopicLabel = string +export type TopicStrToLabel = Map + +export enum MessageSource { + forward = 'forward', + publish = 'publish' +} + +type LabelsGeneric = Record +type CollectFn = (metric: Gauge) => void + +interface Gauge { + // Sorry for this mess, `prom-client` API choices are not great + // If the function signature was `inc(value: number, labels?: Labels)`, this would be simpler + inc(value?: number): void + inc(labels: Labels, value?: number): void + inc(arg1?: Labels | number, arg2?: number): void + + set(value: number): void + set(labels: Labels, value: number): void + set(arg1?: Labels | number, arg2?: number): void + + addCollect(collectFn: CollectFn): void +} + +interface Histogram { + startTimer(): () => void + + observe(value: number): void + observe(labels: Labels, values: number): void + observe(arg1: Labels | number, arg2?: number): void + + reset(): void +} + +interface AvgMinMax { + set(values: number[]): void + set(labels: Labels, values: number[]): void + set(arg1?: Labels | number[], arg2?: number[]): void +} + +type GaugeConfig = { + name: string + help: string + labelNames?: keyof Labels extends string ? (keyof Labels)[] : undefined +} + +type HistogramConfig = { + name: string + help: string + labelNames?: (keyof Labels)[] + buckets?: number[] +} + +type AvgMinMaxConfig = GaugeConfig + +export interface MetricsRegister { + gauge(config: GaugeConfig): Gauge + histogram(config: HistogramConfig): Histogram + avgMinMax(config: AvgMinMaxConfig): AvgMinMax +} + +export enum InclusionReason { + /** Peer was a fanaout peer. */ + Fanout = 'fanout', + /** Included from random selection. */ + Random = 'random', + /** Peer subscribed. */ + Subscribed = 'subscribed', + /** On heartbeat, peer was included to fill the outbound quota. */ + Outbound = 'outbound', + /** On heartbeat, not enough peers in mesh */ + NotEnough = 'not_enough', + /** On heartbeat opportunistic grafting due to low mesh score */ + Opportunistic = 'opportunistic' +} + +/// Reasons why a peer was removed from the mesh. +export enum ChurnReason { + /// Peer disconnected. + Dc = 'disconnected', + /// Peer had a bad score. + BadScore = 'bad_score', + /// Peer sent a PRUNE. + Prune = 'prune', + /// Peer unsubscribed. + Unsub = 'unsubscribed', + /// Too many peers. + Excess = 'excess' +} + +/// Kinds of reasons a peer's score has been penalized +export enum ScorePenalty { + /// A peer grafted before waiting the back-off time. + GraftBackoff = 'graft_backoff', + /// A Peer did not respond to an IWANT request in time. + BrokenPromise = 'broken_promise', + /// A Peer did not send enough messages as expected. + MessageDeficit = 'message_deficit', + /// Too many peers under one IP address. + IPColocation = 'IP_colocation' +} + +export enum IHaveIgnoreReason { + LowScore = 'low_score', + MaxIhave = 'max_ihave', + MaxIasked = 'max_iasked' +} + +export enum ScoreThreshold { + graylist = 'graylist', + publish = 'publish', + gossip = 'gossip', + mesh = 'mesh' +} + +export type PeersByScoreThreshold = Record + +export type ToSendGroupCount = { + direct: number + floodsub: number + mesh: number + fanout: number +} + +export type ToAddGroupCount = { + fanout: number + random: number +} + +export type PromiseDeliveredStats = + | { expired: false; requestedCount: number; maxDeliverMs: number } + | { expired: true; maxDeliverMs: number } + +export type TopicScoreWeights = { p1w: T; p2w: T; p3w: T; p3bw: T; p4w: T } +export type ScoreWeights = { + byTopic: Map> + p5w: T + p6w: T + p7w: T + score: T +} + +export type Metrics = ReturnType + +/** + * A collection of metrics used throughout the Gossipsub behaviour. + */ +// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types +export function getMetrics( + register: MetricsRegister, + topicStrToLabel: TopicStrToLabel, + opts: { gossipPromiseExpireSec: number; behaviourPenaltyThreshold: number; maxMeshMessageDeliveriesWindowSec: number } +) { + // Using function style instead of class to prevent having to re-declare all MetricsPrometheus types. + + return { + /* Metrics for static config */ + protocolsEnabled: register.gauge<{ protocol: string }>({ + name: 'gossipsub_protocol', + help: 'Status of enabled protocols', + labelNames: ['protocol'] + }), + + /* Metrics per known topic */ + /** Status of our subscription to this topic. This metric allows analyzing other topic metrics + * filtered by our current subscription status. + * = rust-libp2p `topic_subscription_status` */ + topicSubscriptionStatus: register.gauge<{ topicStr: TopicStr }>({ + name: 'gossipsub_topic_subscription_status', + help: 'Status of our subscription to this topic', + labelNames: ['topicStr'] + }), + /** Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour + * regardless of our subscription status. */ + topicPeersCount: register.gauge<{ topicStr: TopicStr }>({ + name: 'gossipsub_topic_peer_count', + help: 'Number of peers subscribed to each topic', + labelNames: ['topicStr'] + }), + + /* Metrics regarding mesh state */ + /** Number of peers in our mesh. This metric should be updated with the count of peers for a + * topic in the mesh regardless of inclusion and churn events. + * = rust-libp2p `mesh_peer_counts` */ + meshPeerCounts: register.gauge<{ topicStr: TopicStr }>({ + name: 'gossipsub_mesh_peer_count', + help: 'Number of peers in our mesh', + labelNames: ['topicStr'] + }), + /** Number of times we include peers in a topic mesh for different reasons. + * = rust-libp2p `mesh_peer_inclusion_events` */ + meshPeerInclusionEvents: register.gauge<{ topic: TopicLabel; reason: InclusionReason }>({ + name: 'gossipsub_mesh_peer_inclusion_events_total', + help: 'Number of times we include peers in a topic mesh for different reasons', + labelNames: ['topic', 'reason'] + }), + /** Number of times we remove peers in a topic mesh for different reasons. + * = rust-libp2p `mesh_peer_churn_events` */ + meshPeerChurnEvents: register.gauge<{ topic: TopicLabel; reason: ChurnReason }>({ + name: 'gossipsub_peer_churn_events_total', + help: 'Number of times we remove peers in a topic mesh for different reasons', + labelNames: ['topic', 'reason'] + }), + + /* General Metrics */ + /** Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based + * on which protocol they support. This metric keeps track of the number of peers that are + * connected of each type. */ + peersPerProtocol: register.gauge<{ protocol: string }>({ + name: 'gossipsub_peers_per_protocol_count', + help: 'Peers connected for each topic', + labelNames: ['protocol'] + }), + /** The time it takes to complete one iteration of the heartbeat. */ + heartbeatDuration: register.histogram({ + name: 'gossipsub_heartbeat_duration_seconds', + help: 'The time it takes to complete one iteration of the heartbeat', + // Should take <10ms, over 1s it's a huge issue that needs debugging, since a heartbeat will be cancelled + buckets: [0.01, 0.1, 1] + }), + /** Heartbeat run took longer than heartbeat interval so next is skipped */ + heartbeatSkipped: register.gauge({ + name: 'gossipsub_heartbeat_skipped', + help: 'Heartbeat run took longer than heartbeat interval so next is skipped' + }), + + /** Message validation results for each topic. + * Invalid == Reject? + * = rust-libp2p `invalid_messages`, `accepted_messages`, `ignored_messages`, `rejected_messages` */ + asyncValidationResult: register.gauge<{ topic: TopicLabel; acceptance: MessageAcceptance }>({ + name: 'gossipsub_async_validation_result_total', + help: 'Message validation result for each topic', + labelNames: ['topic', 'acceptance'] + }), + /** When the user validates a message, it tries to re propagate it to its mesh peers. If the + * message expires from the memcache before it can be validated, we count this a cache miss + * and it is an indicator that the memcache size should be increased. + * = rust-libp2p `mcache_misses` */ + asyncValidationMcacheHit: register.gauge<{ hit: 'hit' | 'miss' }>({ + name: 'gossipsub_async_validation_mcache_hit_total', + help: 'Async validation result reported by the user layer', + labelNames: ['hit'] + }), + + // RPC outgoing. Track byte length + data structure sizes + rpcRecvBytes: register.gauge({ name: 'gossipsub_rpc_recv_bytes_total', help: 'RPC recv' }), + rpcRecvCount: register.gauge({ name: 'gossipsub_rpc_recv_count_total', help: 'RPC recv' }), + rpcRecvSubscription: register.gauge({ name: 'gossipsub_rpc_recv_subscription_total', help: 'RPC recv' }), + rpcRecvMessage: register.gauge({ name: 'gossipsub_rpc_recv_message_total', help: 'RPC recv' }), + rpcRecvControl: register.gauge({ name: 'gossipsub_rpc_recv_control_total', help: 'RPC recv' }), + rpcRecvIHave: register.gauge({ name: 'gossipsub_rpc_recv_ihave_total', help: 'RPC recv' }), + rpcRecvIWant: register.gauge({ name: 'gossipsub_rpc_recv_iwant_total', help: 'RPC recv' }), + rpcRecvGraft: register.gauge({ name: 'gossipsub_rpc_recv_graft_total', help: 'RPC recv' }), + rpcRecvPrune: register.gauge({ name: 'gossipsub_rpc_recv_prune_total', help: 'RPC recv' }), + + /** Total count of RPC dropped because acceptFrom() == false */ + rpcRecvNotAccepted: register.gauge({ + name: 'gossipsub_rpc_rcv_not_accepted_total', + help: 'Total count of RPC dropped because acceptFrom() == false' + }), + + // RPC incoming. Track byte length + data structure sizes + rpcSentBytes: register.gauge({ name: 'gossipsub_rpc_sent_bytes_total', help: 'RPC sent' }), + rpcSentCount: register.gauge({ name: 'gossipsub_rpc_sent_count_total', help: 'RPC sent' }), + rpcSentSubscription: register.gauge({ name: 'gossipsub_rpc_sent_subscription_total', help: 'RPC sent' }), + rpcSentMessage: register.gauge({ name: 'gossipsub_rpc_sent_message_total', help: 'RPC sent' }), + rpcSentControl: register.gauge({ name: 'gossipsub_rpc_sent_control_total', help: 'RPC sent' }), + rpcSentIHave: register.gauge({ name: 'gossipsub_rpc_sent_ihave_total', help: 'RPC sent' }), + rpcSentIWant: register.gauge({ name: 'gossipsub_rpc_sent_iwant_total', help: 'RPC sent' }), + rpcSentGraft: register.gauge({ name: 'gossipsub_rpc_sent_graft_total', help: 'RPC sent' }), + rpcSentPrune: register.gauge({ name: 'gossipsub_rpc_sent_prune_total', help: 'RPC sent' }), + + // publish message. Track peers sent to and bytes + /** Total count of msg published by topic */ + msgPublishCount: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_count_total', + help: 'Total count of msg published by topic', + labelNames: ['topic'] + }), + /** Total count of peers that we publish a msg to */ + msgPublishPeers: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_peers_total', + help: 'Total count of peers that we publish a msg to', + labelNames: ['topic'] + }), + /** Total count of peers (by group) that we publish a msg to */ + // NOTE: Do not use 'group' label since it's a generic already used by Prometheus to group instances + msgPublishPeersByGroup: register.gauge<{ topic: TopicLabel; peerGroup: keyof ToSendGroupCount }>({ + name: 'gossipsub_msg_publish_peers_by_group', + help: 'Total count of peers (by group) that we publish a msg to', + labelNames: ['topic', 'peerGroup'] + }), + /** Total count of msg publish data.length bytes */ + msgPublishBytes: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_publish_bytes_total', + help: 'Total count of msg publish data.length bytes', + labelNames: ['topic'] + }), + + /** Total count of msg forwarded by topic */ + msgForwardCount: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_forward_count_total', + help: 'Total count of msg forwarded by topic', + labelNames: ['topic'] + }), + /** Total count of peers that we forward a msg to */ + msgForwardPeers: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_forward_peers_total', + help: 'Total count of peers that we forward a msg to', + labelNames: ['topic'] + }), + + /** Total count of recv msgs before any validation */ + msgReceivedPreValidation: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_msg_received_prevalidation_total', + help: 'Total count of recv msgs before any validation', + labelNames: ['topic'] + }), + /** Tracks distribution of recv msgs by duplicate, invalid, valid */ + msgReceivedStatus: register.gauge<{ topic: TopicLabel; status: MessageStatus }>({ + name: 'gossipsub_msg_received_status_total', + help: 'Tracks distribution of recv msgs by duplicate, invalid, valid', + labelNames: ['topic', 'status'] + }), + /** Tracks specific reason of invalid */ + msgReceivedInvalid: register.gauge<{ topic: TopicLabel; error: RejectReason | ValidateError }>({ + name: 'gossipsub_msg_received_invalid_total', + help: 'Tracks specific reason of invalid', + labelNames: ['topic', 'error'] + }), + /** Track duplicate message delivery time */ + duplicateMsgDeliveryDelay: register.histogram({ + name: 'gossisub_duplicate_msg_delivery_delay_seconds', + help: 'Time since the 1st duplicated message validated', + labelNames: ['topic'], + buckets: [ + 0.25 * opts.maxMeshMessageDeliveriesWindowSec, + 0.5 * opts.maxMeshMessageDeliveriesWindowSec, + 1 * opts.maxMeshMessageDeliveriesWindowSec, + 2 * opts.maxMeshMessageDeliveriesWindowSec, + 4 * opts.maxMeshMessageDeliveriesWindowSec + ] + }), + /** Total count of late msg delivery total by topic */ + duplicateMsgLateDelivery: register.gauge<{ topic: TopicLabel }>({ + name: 'gossisub_duplicate_msg_late_delivery_total', + help: 'Total count of late duplicate message delivery by topic, which triggers P3 penalty', + labelNames: ['topic'] + }), + + /* Metrics related to scoring */ + /** Total times score() is called */ + scoreFnCalls: register.gauge({ + name: 'gossipsub_score_fn_calls_total', + help: 'Total times score() is called' + }), + /** Total times score() call actually computed computeScore(), no cache */ + scoreFnRuns: register.gauge({ + name: 'gossipsub_score_fn_runs_total', + help: 'Total times score() call actually computed computeScore(), no cache' + }), + scoreCachedDelta: register.histogram({ + name: 'gossipsub_score_cache_delta', + help: 'Delta of score between cached values that expired', + buckets: [10, 100, 1000] + }), + /** Current count of peers by score threshold */ + peersByScoreThreshold: register.gauge<{ threshold: ScoreThreshold }>({ + name: 'gossipsub_peers_by_score_threshold_count', + help: 'Current count of peers by score threshold', + labelNames: ['threshold'] + }), + score: register.avgMinMax({ + name: 'gossipsub_score', + help: 'Avg min max of gossip scores', + labelNames: ['topic', 'p'] + }), + /** Separate score weights */ + scoreWeights: register.avgMinMax<{ topic?: TopicLabel; p: string }>({ + name: 'gossipsub_score_weights', + help: 'Separate score weights', + labelNames: ['topic', 'p'] + }), + /** Histogram of the scores for each mesh topic. */ + // TODO: Not implemented + scorePerMesh: register.avgMinMax<{ topic: TopicLabel }>({ + name: 'gossipsub_score_per_mesh', + help: 'Histogram of the scores for each mesh topic', + labelNames: ['topic'] + }), + /** A counter of the kind of penalties being applied to peers. */ + // TODO: Not fully implemented + scoringPenalties: register.gauge<{ penalty: ScorePenalty }>({ + name: 'gossipsub_scoring_penalties_total', + help: 'A counter of the kind of penalties being applied to peers', + labelNames: ['penalty'] + }), + behaviourPenalty: register.histogram({ + name: 'gossipsub_peer_stat_behaviour_penalty', + help: 'Current peer stat behaviour_penalty at each scrape', + buckets: [ + 0.25 * opts.behaviourPenaltyThreshold, + 0.5 * opts.behaviourPenaltyThreshold, + 1 * opts.behaviourPenaltyThreshold, + 2 * opts.behaviourPenaltyThreshold, + 4 * opts.behaviourPenaltyThreshold + ] + }), + + // TODO: + // - iasked per peer (on heartbeat) + // - when promise is resolved, track messages from promises + + /** Total received IHAVE messages that we ignore for some reason */ + ihaveRcvIgnored: register.gauge<{ reason: IHaveIgnoreReason }>({ + name: 'gossipsub_ihave_rcv_ignored_total', + help: 'Total received IHAVE messages that we ignore for some reason', + labelNames: ['reason'] + }), + /** Total received IHAVE messages by topic */ + ihaveRcvMsgids: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_ihave_rcv_msgids_total', + help: 'Total received IHAVE messages by topic', + labelNames: ['topic'] + }), + /** Total messages per topic we don't have. Not actual requests. + * The number of times we have decided that an IWANT control message is required for this + * topic. A very high metric might indicate an underperforming network. + * = rust-libp2p `topic_iwant_msgs` */ + ihaveRcvNotSeenMsgids: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_ihave_rcv_not_seen_msgids_total', + help: 'Total messages per topic we do not have, not actual requests', + labelNames: ['topic'] + }), + + /** Total received IWANT messages by topic */ + iwantRcvMsgids: register.gauge<{ topic: TopicLabel }>({ + name: 'gossipsub_iwant_rcv_msgids_total', + help: 'Total received IWANT messages by topic', + labelNames: ['topic'] + }), + /** Total requested messageIDs that we don't have */ + iwantRcvDonthaveMsgids: register.gauge({ + name: 'gossipsub_iwant_rcv_dont_have_msgids_total', + help: 'Total requested messageIDs that we do not have' + }), + iwantPromiseStarted: register.gauge({ + name: 'gossipsub_iwant_promise_sent_total', + help: 'Total count of started IWANT promises' + }), + /** Total count of resolved IWANT promises */ + iwantPromiseResolved: register.gauge({ + name: 'gossipsub_iwant_promise_resolved_total', + help: 'Total count of resolved IWANT promises' + }), + /** Total count of peers we have asked IWANT promises that are resolved */ + iwantPromiseResolvedPeers: register.gauge({ + name: 'gossipsub_iwant_promise_resolved_peers', + help: 'Total count of peers we have asked IWANT promises that are resolved' + }), + iwantPromiseBroken: register.gauge({ + name: 'gossipsub_iwant_promise_broken', + help: 'Total count of broken IWANT promises' + }), + /** Histogram of delivery time of resolved IWANT promises */ + iwantPromiseDeliveryTime: register.histogram({ + name: 'gossipsub_iwant_promise_delivery_seconds', + help: 'Histogram of delivery time of resolved IWANT promises', + buckets: [ + 0.5 * opts.gossipPromiseExpireSec, + 1 * opts.gossipPromiseExpireSec, + 2 * opts.gossipPromiseExpireSec, + 4 * opts.gossipPromiseExpireSec + ] + }), + + /* Data structure sizes */ + /** Unbounded cache sizes */ + cacheSize: register.gauge<{ cache: string }>({ + name: 'gossipsub_cache_size', + help: 'Unbounded cache sizes', + labelNames: ['cache'] + }), + /** Current mcache msg count */ + mcacheSize: register.gauge({ + name: 'gossipsub_mcache_size', + help: 'Current mcache msg count' + }), + + topicStrToLabel: topicStrToLabel, + + toTopic(topicStr: TopicStr): TopicLabel { + return this.topicStrToLabel.get(topicStr) ?? topicStr + }, + + /** We joined a topic */ + onJoin(topicStr: TopicStr): void { + this.topicSubscriptionStatus.set({ topicStr }, 1) + this.meshPeerCounts.set({ topicStr }, 0) // Reset count + }, + + /** We left a topic */ + onLeave(topicStr: TopicStr): void { + this.topicSubscriptionStatus.set({ topicStr }, 0) + this.meshPeerCounts.set({ topicStr }, 0) // Reset count + }, + + /** Register the inclusion of peers in our mesh due to some reason. */ + onAddToMesh(topicStr: TopicStr, reason: InclusionReason, count: number): void { + const topic = this.toTopic(topicStr) + this.meshPeerInclusionEvents.inc({ topic, reason }, count) + }, + + /** Register the removal of peers in our mesh due to some reason */ + // - remove_peer_from_mesh() + // - heartbeat() Churn::BadScore + // - heartbeat() Churn::Excess + // - on_disconnect() Churn::Ds + onRemoveFromMesh(topicStr: TopicStr, reason: ChurnReason, count: number): void { + const topic = this.toTopic(topicStr) + this.meshPeerChurnEvents.inc({ topic, reason }, count) + }, + + onReportValidationMcacheHit(hit: boolean): void { + this.asyncValidationMcacheHit.inc({ hit: hit ? 'hit' : 'miss' }) + }, + + onReportValidation(topicStr: TopicStr, acceptance: MessageAcceptance): void { + const topic = this.toTopic(topicStr) + this.asyncValidationResult.inc({ topic: topic, acceptance }) + }, + + /** + * - in handle_graft() Penalty::GraftBackoff + * - in apply_iwant_penalties() Penalty::BrokenPromise + * - in metric_score() P3 Penalty::MessageDeficit + * - in metric_score() P6 Penalty::IPColocation + */ + onScorePenalty(penalty: ScorePenalty): void { + // Can this be labeled by topic too? + this.scoringPenalties.inc({ penalty }, 1) + }, + + onIhaveRcv(topicStr: TopicStr, ihave: number, idonthave: number): void { + const topic = this.toTopic(topicStr) + this.ihaveRcvMsgids.inc({ topic }, ihave) + this.ihaveRcvNotSeenMsgids.inc({ topic }, idonthave) + }, + + onIwantRcv(iwantByTopic: Map, iwantDonthave: number): void { + for (const [topicStr, iwant] of iwantByTopic) { + const topic = this.toTopic(topicStr) + this.iwantRcvMsgids.inc({ topic }, iwant) + } + + this.iwantRcvDonthaveMsgids.inc(iwantDonthave) + }, + + onForwardMsg(topicStr: TopicStr, tosendCount: number): void { + const topic = this.toTopic(topicStr) + this.msgForwardCount.inc({ topic }, 1) + this.msgForwardPeers.inc({ topic }, tosendCount) + }, + + onPublishMsg(topicStr: TopicStr, tosendGroupCount: ToSendGroupCount, tosendCount: number, dataLen: number): void { + const topic = this.toTopic(topicStr) + this.msgPublishCount.inc({ topic }, 1) + this.msgPublishBytes.inc({ topic }, tosendCount * dataLen) + this.msgPublishPeers.inc({ topic }, tosendCount) + this.msgPublishPeersByGroup.inc({ topic, peerGroup: 'direct' }, tosendGroupCount.direct) + this.msgPublishPeersByGroup.inc({ topic, peerGroup: 'floodsub' }, tosendGroupCount.floodsub) + this.msgPublishPeersByGroup.inc({ topic, peerGroup: 'mesh' }, tosendGroupCount.mesh) + this.msgPublishPeersByGroup.inc({ topic, peerGroup: 'fanout' }, tosendGroupCount.fanout) + }, + + onMsgRecvPreValidation(topicStr: TopicStr): void { + const topic = this.toTopic(topicStr) + this.msgReceivedPreValidation.inc({ topic }, 1) + }, + + onMsgRecvResult(topicStr: TopicStr, status: MessageStatus): void { + const topic = this.toTopic(topicStr) + this.msgReceivedStatus.inc({ topic, status }) + }, + + onMsgRecvInvalid(topicStr: TopicStr, reason: RejectReasonObj): void { + const topic = this.toTopic(topicStr) + + const error = reason.reason === RejectReason.Error ? reason.error : reason.reason + this.msgReceivedInvalid.inc({ topic, error }, 1) + }, + + onDuplicateMsgDelivery(topicStr: TopicStr, deliveryDelayMs: number, isLateDelivery: boolean): void { + this.duplicateMsgDeliveryDelay.observe(deliveryDelayMs / 1000) + if (isLateDelivery) { + const topic = this.toTopic(topicStr) + this.duplicateMsgLateDelivery.inc({ topic }, 1) + } + }, + + onRpcRecv(rpc: IRPC, rpcBytes: number): void { + this.rpcRecvBytes.inc(rpcBytes) + this.rpcRecvCount.inc(1) + if (rpc.subscriptions) this.rpcRecvSubscription.inc(rpc.subscriptions.length) + if (rpc.messages) this.rpcRecvMessage.inc(rpc.messages.length) + if (rpc.control) { + this.rpcRecvControl.inc(1) + if (rpc.control.ihave) this.rpcRecvIHave.inc(rpc.control.ihave.length) + if (rpc.control.iwant) this.rpcRecvIWant.inc(rpc.control.iwant.length) + if (rpc.control.graft) this.rpcRecvGraft.inc(rpc.control.graft.length) + if (rpc.control.prune) this.rpcRecvPrune.inc(rpc.control.prune.length) + } + }, + + onRpcSent(rpc: IRPC, rpcBytes: number): void { + this.rpcSentBytes.inc(rpcBytes) + this.rpcSentCount.inc(1) + if (rpc.subscriptions) this.rpcSentSubscription.inc(rpc.subscriptions.length) + if (rpc.messages) this.rpcSentMessage.inc(rpc.messages.length) + if (rpc.control) { + this.rpcSentControl.inc(1) + if (rpc.control.ihave) this.rpcSentIHave.inc(rpc.control.ihave.length) + if (rpc.control.iwant) this.rpcSentIWant.inc(rpc.control.iwant.length) + if (rpc.control.graft) this.rpcSentGraft.inc(rpc.control.graft.length) + if (rpc.control.prune) this.rpcSentPrune.inc(rpc.control.prune.length) + } + }, + + registerScores(scores: number[], scoreThresholds: PeerScoreThresholds): void { + let graylist = 0 + let publish = 0 + let gossip = 0 + let mesh = 0 + + for (const score of scores) { + if (score >= scoreThresholds.graylistThreshold) graylist++ + if (score >= scoreThresholds.publishThreshold) publish++ + if (score >= scoreThresholds.gossipThreshold) gossip++ + if (score >= 0) mesh++ + } + + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.graylist }, graylist) + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.publish }, publish) + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.gossip }, gossip) + this.peersByScoreThreshold.set({ threshold: ScoreThreshold.mesh }, mesh) + + // Register full score too + this.score.set(scores) + }, + + registerScoreWeights(sw: ScoreWeights): void { + for (const [topic, wsTopic] of sw.byTopic) { + this.scoreWeights.set({ topic, p: 'p1' }, wsTopic.p1w) + this.scoreWeights.set({ topic, p: 'p2' }, wsTopic.p2w) + this.scoreWeights.set({ topic, p: 'p3' }, wsTopic.p3w) + this.scoreWeights.set({ topic, p: 'p3b' }, wsTopic.p3bw) + this.scoreWeights.set({ topic, p: 'p4' }, wsTopic.p4w) + } + + this.scoreWeights.set({ p: 'p5' }, sw.p5w) + this.scoreWeights.set({ p: 'p6' }, sw.p6w) + this.scoreWeights.set({ p: 'p7' }, sw.p7w) + }, + + registerScorePerMesh(mesh: Map>, scoreByPeer: Map): void { + const peersPerTopicLabel = new Map>() + + mesh.forEach((peers, topicStr) => { + // Aggregate by known topicLabel or throw to 'unknown'. This prevent too high cardinality + const topicLabel = this.topicStrToLabel.get(topicStr) ?? 'unknown' + let peersInMesh = peersPerTopicLabel.get(topicLabel) + if (!peersInMesh) { + peersInMesh = new Set() + peersPerTopicLabel.set(topicLabel, peersInMesh) + } + peers.forEach((p) => peersInMesh?.add(p)) + }) + + for (const [topic, peers] of peersPerTopicLabel) { + const meshScores: number[] = [] + peers.forEach((peer) => { + meshScores.push(scoreByPeer.get(peer) ?? 0) + }) + this.scorePerMesh.set({ topic }, meshScores) + } + } + } +} diff --git a/ts/score/compute-score.ts b/ts/score/compute-score.ts index 7f88c7a9..e804ba15 100644 --- a/ts/score/compute-score.ts +++ b/ts/score/compute-score.ts @@ -30,16 +30,20 @@ export function computeScore( } // P2: first message deliveries - const p2 = tstats.firstMessageDeliveries + let p2 = tstats.firstMessageDeliveries + if (p2 > topicParams.firstMessageDeliveriesCap) { + p2 = topicParams.firstMessageDeliveriesCap + } topicScore += p2 * topicParams.firstMessageDeliveriesWeight // P3: mesh message deliveries - if (tstats.meshMessageDeliveriesActive) { - if (tstats.meshMessageDeliveries < topicParams.meshMessageDeliveriesThreshold) { - const deficit = topicParams.meshMessageDeliveriesThreshold - tstats.meshMessageDeliveries - const p3 = deficit * deficit - topicScore += p3 * topicParams.meshMessageDeliveriesWeight - } + if ( + tstats.meshMessageDeliveriesActive && + tstats.meshMessageDeliveries < topicParams.meshMessageDeliveriesThreshold + ) { + const deficit = topicParams.meshMessageDeliveriesThreshold - tstats.meshMessageDeliveries + const p3 = deficit * deficit + topicScore += p3 * topicParams.meshMessageDeliveriesWeight } // P3b: @@ -84,8 +88,11 @@ export function computeScore( }) // P7: behavioural pattern penalty - const p7 = pstats.behaviourPenalty * pstats.behaviourPenalty - score += p7 * params.behaviourPenaltyWeight + if (pstats.behaviourPenalty > params.behaviourPenaltyThreshold) { + const excess = pstats.behaviourPenalty - params.behaviourPenaltyThreshold + const p7 = excess * excess + score += p7 * params.behaviourPenaltyWeight + } return score } diff --git a/ts/score/peer-score-params.ts b/ts/score/peer-score-params.ts index f4477a69..bdf7195b 100644 --- a/ts/score/peer-score-params.ts +++ b/ts/score/peer-score-params.ts @@ -1,7 +1,7 @@ import { ERR_INVALID_PEER_SCORE_PARAMS } from './constants' // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore -import errcode = require('err-code') +import errcode from 'err-code' // This file defines PeerScoreParams and TopicScoreParams interfaces // as well as constructors, default constructors, and validation functions @@ -50,6 +50,7 @@ export interface PeerScoreParams { * The weight of the parameter MUST be negative (or zero to disable). */ behaviourPenaltyWeight: number + behaviourPenaltyThreshold: number behaviourPenaltyDecay: number /** @@ -140,15 +141,16 @@ export interface TopicScoreParams { export const defaultPeerScoreParams: PeerScoreParams = { topics: {}, - topicScoreCap: 10, - appSpecificScore: () => 0, - appSpecificWeight: 10, - IPColocationFactorWeight: -5, - IPColocationFactorThreshold: 10, + topicScoreCap: 10.0, + appSpecificScore: () => 0.0, + appSpecificWeight: 10.0, + IPColocationFactorWeight: -5.0, + IPColocationFactorThreshold: 10.0, IPColocationFactorWhitelist: new Set(), - behaviourPenaltyWeight: -10, + behaviourPenaltyWeight: -10.0, + behaviourPenaltyThreshold: 0.0, behaviourPenaltyDecay: 0.2, - decayInterval: 1000, + decayInterval: 1000.0, decayToZero: 0.1, retainScore: 3600 * 1000 } @@ -204,7 +206,7 @@ export function validatePeerScoreParams(p: PeerScoreParams): void { validateTopicScoreParams(params) } catch (e) { throw errcode( - new Error(`invalid score parameters for topic ${topic}: ${e.message}`), + new Error(`invalid score parameters for topic ${topic}: ${(e as Error).message}`), ERR_INVALID_PEER_SCORE_PARAMS ) } diff --git a/ts/score/peer-score-thresholds.ts b/ts/score/peer-score-thresholds.ts index 3d364c8a..da4b076f 100644 --- a/ts/score/peer-score-thresholds.ts +++ b/ts/score/peer-score-thresholds.ts @@ -1,7 +1,7 @@ import { ERR_INVALID_PEER_SCORE_THRESHOLDS } from './constants' // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore -import errcode = require('err-code') +import errcode from 'err-code' // This file defines PeerScoreThresholds interface // as well as a constructor, default constructor, and validation function diff --git a/ts/score/peer-score.ts b/ts/score/peer-score.ts index 7b5e5e2b..ce3c135d 100644 --- a/ts/score/peer-score.ts +++ b/ts/score/peer-score.ts @@ -1,57 +1,66 @@ import { PeerScoreParams, validatePeerScoreParams } from './peer-score-params' -import { PeerStats, createPeerStats, ensureTopicStats } from './peer-stats' +import { PeerStats, TopicStats } from './peer-stats' import { computeScore } from './compute-score' import { MessageDeliveries, DeliveryRecordStatus } from './message-deliveries' -import { ERR_TOPIC_VALIDATOR_IGNORE } from '../constants' import PeerId from 'peer-id' import ConnectionManager from 'libp2p/src/connection-manager' -import { InMessage } from 'libp2p-interfaces/src/pubsub' -import debug = require('debug') -import pubsubErrors = require('libp2p-interfaces/src/pubsub/errors') - -const { ERR_INVALID_SIGNATURE, ERR_MISSING_SIGNATURE } = pubsubErrors.codes +import debug from 'debug' +import { MsgIdStr, PeerIdStr, RejectReason, TopicStr } from '../types' +import { Metrics, ScorePenalty } from '../metrics' const log = debug('libp2p:gossipsub:score') +type IPStr = string + +type PeerScoreOpts = { + /** + * Miliseconds to cache computed score per peer + */ + scoreCacheValidityMs: number +} interface ScoreCacheEntry { - /** The cached score, null if not cached */ - score: number | null + /** The cached score */ + score: number /** Unix timestamp in miliseconds, the time after which the cached score for a peer is no longer valid */ cacheUntil: number } +export type PeerScoreStatsDump = Record + export class PeerScore { - /** - * The score parameters - */ - params: PeerScoreParams /** * Per-peer stats for score calculation */ - peerStats: Map + readonly peerStats = new Map() /** * IP colocation tracking; maps IP => set of peers. */ - peerIPs: Map> + readonly peerIPs = new Map>() /** * Cache score up to decayInterval if topic stats are unchanged. */ - scoreCache: Map + readonly scoreCache = new Map() /** * Recent message delivery timing/participants */ - deliveryRecords: MessageDeliveries - _connectionManager: ConnectionManager + readonly deliveryRecords = new MessageDeliveries() + _backgroundInterval?: NodeJS.Timeout - constructor(params: PeerScoreParams, connectionManager: ConnectionManager) { + private readonly scoreCacheValidityMs: number + + constructor( + readonly params: PeerScoreParams, + private readonly connectionManager: ConnectionManager, + private readonly metrics: Metrics | null, + opts: PeerScoreOpts + ) { validatePeerScoreParams(params) - this.params = params - this._connectionManager = connectionManager - this.peerStats = new Map() - this.peerIPs = new Map() - this.scoreCache = new Map() - this.deliveryRecords = new MessageDeliveries() + this.scoreCacheValidityMs = opts.scoreCacheValidityMs + } + + get size(): number { + return this.peerStats.size } /** @@ -86,15 +95,19 @@ export class PeerScore { * Periodic maintenance */ background(): void { - this._refreshScores() - this._updateIPs() + this.refreshScores() + this.updateIPs() this.deliveryRecords.gc() } + dumpPeerScoreStats(): PeerScoreStatsDump { + return Object.fromEntries(Array.from(this.peerStats.entries()).map(([peer, stats]) => [peer, stats])) + } + /** * Decays scores, and purges score records for disconnected peers once their expiry has elapsed. */ - _refreshScores(): void { + private refreshScores(): void { const now = Date.now() const decayToZero = this.params.decayToZero @@ -103,8 +116,9 @@ export class PeerScore { // has the retention perious expired? if (now > pstats.expire) { // yes, throw it away (but clean up the IP tracking first) - this._removeIPs(id, pstats.ips) + this.removeIPs(id, pstats.ips) this.peerStats.delete(id) + this.scoreCache.delete(id) } // we don't decay retained scores, as the peer is not active. @@ -127,18 +141,22 @@ export class PeerScore { if (tstats.firstMessageDeliveries < decayToZero) { tstats.firstMessageDeliveries = 0 } + tstats.meshMessageDeliveries *= tparams.meshMessageDeliveriesDecay if (tstats.meshMessageDeliveries < decayToZero) { tstats.meshMessageDeliveries = 0 } + tstats.meshFailurePenalty *= tparams.meshFailurePenaltyDecay if (tstats.meshFailurePenalty < decayToZero) { tstats.meshFailurePenalty = 0 } + tstats.invalidMessageDeliveries *= tparams.invalidMessageDeliveriesDecay if (tstats.invalidMessageDeliveries < decayToZero) { tstats.invalidMessageDeliveries = 0 } + // update mesh time and activate mesh message delivery parameter if need be if (tstats.inMesh) { tstats.meshTime = now - tstats.graftTime @@ -147,68 +165,80 @@ export class PeerScore { } } }) + // decay P7 counter pstats.behaviourPenalty *= this.params.behaviourPenaltyDecay if (pstats.behaviourPenalty < decayToZero) { pstats.behaviourPenalty = 0 } - - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) }) } /** * Return the score for a peer */ - score(id: string): number { + score(id: PeerIdStr): number { + this.metrics?.scoreFnCalls.inc() + const pstats = this.peerStats.get(id) if (!pstats) { return 0 } const now = Date.now() - let cacheEntry = this.scoreCache.get(id) - if (cacheEntry === undefined) { - cacheEntry = { score: null, cacheUntil: 0 } - this.scoreCache.set(id, cacheEntry) + const cacheEntry = this.scoreCache.get(id) + + // Found cached score within validity period + if (cacheEntry && cacheEntry.cacheUntil > now) { + return cacheEntry.score } - const { score, cacheUntil } = cacheEntry - if (cacheUntil > now && score !== null) return score + this.metrics?.scoreFnRuns.inc() + + const score = computeScore(id, pstats, this.params, this.peerIPs) + const cacheUntil = now + this.scoreCacheValidityMs - cacheEntry.score = computeScore(id, pstats, this.params, this.peerIPs) - // decayInterval is used to refresh score so we don't want to cache more than that - cacheEntry.cacheUntil = now + this.params.decayInterval - return cacheEntry.score + if (cacheEntry) { + this.metrics?.scoreCachedDelta.observe(Math.abs(score - cacheEntry.score)) + cacheEntry.score = score + cacheEntry.cacheUntil = cacheUntil + } else { + this.scoreCache.set(id, { score, cacheUntil }) + } + + return score } /** * Apply a behavioural penalty to a peer */ - addPenalty(id: string, penalty: number): void { + addPenalty(id: PeerIdStr, penalty: number, penaltyLabel: ScorePenalty): void { const pstats = this.peerStats.get(id) - if (!pstats) { - return + if (pstats) { + pstats.behaviourPenalty += penalty + this.metrics?.onScorePenalty(penaltyLabel) } - pstats.behaviourPenalty += penalty - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) } - addPeer(id: string): void { + addPeer(id: PeerIdStr): void { // create peer stats (not including topic stats for each topic to be scored) // topic stats will be added as needed - const pstats = createPeerStats({ - connected: true - }) + const pstats: PeerStats = { + connected: true, + expire: 0, + topics: {}, + ips: [], + behaviourPenalty: 0 + } this.peerStats.set(id, pstats) // get + update peer IPs - const ips = this._getIPs(id) - this._setIPs(id, ips, pstats.ips) + const ips = this.getIPs(id) + this.setIPs(id, ips, pstats.ips) pstats.ips = ips } - removePeer(id: string): void { + removePeer(id: PeerIdStr): void { const pstats = this.peerStats.get(id) if (!pstats) { return @@ -217,14 +247,11 @@ export class PeerScore { // decide whether to retain the score; this currently only retains non-positive scores // to dissuade attacks on the score function. if (this.score(id) > 0) { - this._removeIPs(id, pstats.ips) + this.removeIPs(id, pstats.ips) this.peerStats.delete(id) return } - // delete score cache - this.scoreCache.delete(id) - // furthermore, when we decide to retain the score, the firstMessageDelivery counters are // reset to 0 and mesh delivery penalties applied. Object.entries(pstats.topics).forEach(([topic, tstats]) => { @@ -244,57 +271,48 @@ export class PeerScore { pstats.expire = Date.now() + this.params.retainScore } - graft(id: string, topic: string): void { + /** Handles scoring functionality as a peer GRAFTs to a topic. */ + graft(id: PeerIdStr, topic: TopicStr): void { const pstats = this.peerStats.get(id) - if (!pstats) { - return - } - - const tstats = ensureTopicStats(topic, pstats, this.params) - if (!tstats) { - return + if (pstats) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats) { + // if we are scoring the topic, update the mesh status. + tstats.inMesh = true + tstats.graftTime = Date.now() + tstats.meshTime = 0 + tstats.meshMessageDeliveriesActive = false + } } - - tstats.inMesh = true - tstats.graftTime = Date.now() - tstats.meshTime = 0 - tstats.meshMessageDeliveriesActive = false - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) } - prune(id: string, topic: string): void { + /** Handles scoring functionality as a peer PRUNEs from a topic. */ + prune(id: PeerIdStr, topic: TopicStr): void { const pstats = this.peerStats.get(id) - if (!pstats) { - return - } - - const tstats = ensureTopicStats(topic, pstats, this.params) - if (!tstats) { - return - } + if (pstats) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats) { + // sticky mesh delivery rate failure penalty + const threshold = this.params.topics[topic].meshMessageDeliveriesThreshold + if (tstats.meshMessageDeliveriesActive && tstats.meshMessageDeliveries < threshold) { + const deficit = threshold - tstats.meshMessageDeliveries + tstats.meshFailurePenalty += deficit * deficit + } + tstats.meshMessageDeliveriesActive = false + tstats.inMesh = false - // sticky mesh delivery rate failure penalty - const threshold = this.params.topics[topic].meshMessageDeliveriesThreshold - if (tstats.meshMessageDeliveriesActive && tstats.meshMessageDeliveries < threshold) { - const deficit = threshold - tstats.meshMessageDeliveries - tstats.meshFailurePenalty += deficit * deficit + // TODO: Consider clearing score cache on important penalties + // this.scoreCache.delete(id) + } } - tstats.inMesh = false - tstats.meshMessageDeliveriesActive = false - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) } - /** - * @param {InMessage} message - * @returns {Promise} - */ - async validateMessage(msgIdStr: string): Promise { + validateMessage(msgIdStr: MsgIdStr): void { this.deliveryRecords.ensureRecord(msgIdStr) } - async deliverMessage(msg: InMessage, msgIdStr: string): Promise { - const id = msg.receivedFrom - this._markFirstMessageDelivery(id, msg) + deliverMessage(from: PeerIdStr, msgIdStr: MsgIdStr, topic: TopicStr): void { + this.markFirstMessageDelivery(from, topic) const drec = this.deliveryRecords.ensureRecord(msgIdStr) const now = Date.now() @@ -303,7 +321,7 @@ export class PeerScore { if (drec.status !== DeliveryRecordStatus.unknown) { log( 'unexpected delivery: message from %s was first seen %s ago and has delivery status %d', - id, + from, now - drec.firstSeen, DeliveryRecordStatus[drec.status] ) @@ -316,19 +334,31 @@ export class PeerScore { drec.peers.forEach((p) => { // this check is to make sure a peer can't send us a message twice and get a double count // if it is a first delivery. - if (p !== id) { - this._markDuplicateMessageDelivery(p, msg) + if (p !== from) { + this.markDuplicateMessageDelivery(p, topic) } }) } - async rejectMessage(msg: InMessage, msgIdStr: string, reason: string): Promise { - const id = msg.receivedFrom + /** + * Similar to `rejectMessage` except does not require the message id or reason for an invalid message. + */ + rejectInvalidMessage(from: PeerIdStr, topic: TopicStr): void { + this.markInvalidMessageDelivery(from, topic) + } + + rejectMessage(from: PeerIdStr, msgIdStr: MsgIdStr, topic: TopicStr, reason: RejectReason): void { switch (reason) { - case ERR_MISSING_SIGNATURE: - case ERR_INVALID_SIGNATURE: - this._markInvalidMessageDelivery(id, msg) + // these messages are not tracked, but the peer is penalized as they are invalid + case RejectReason.Error: + this.markInvalidMessageDelivery(from, topic) + return + + // we ignore those messages, so do nothing. + case RejectReason.Blacklisted: return + + // the rest are handled after record creation } const drec = this.deliveryRecords.ensureRecord(msgIdStr) @@ -337,34 +367,36 @@ export class PeerScore { if (drec.status !== DeliveryRecordStatus.unknown) { log( 'unexpected rejection: message from %s was first seen %s ago and has delivery status %d', - id, + from, Date.now() - drec.firstSeen, DeliveryRecordStatus[drec.status] ) return } - switch (reason) { - case ERR_TOPIC_VALIDATOR_IGNORE: - // we were explicitly instructed by the validator to ignore the message but not penalize the peer - drec.status = DeliveryRecordStatus.ignored - return + if (reason === RejectReason.Ignore) { + // we were explicitly instructed by the validator to ignore the message but not penalize the peer + drec.status = DeliveryRecordStatus.ignored + drec.peers.clear() + return } // mark the message as invalid and penalize peers that have already forwarded it. drec.status = DeliveryRecordStatus.invalid - this._markInvalidMessageDelivery(id, msg) + this.markInvalidMessageDelivery(from, topic) drec.peers.forEach((p) => { - this._markInvalidMessageDelivery(p, msg) + this.markInvalidMessageDelivery(p, topic) }) + + // release the delivery time tracking map to free some memory early + drec.peers.clear() } - async duplicateMessage(msg: InMessage, msgIdStr: string): Promise { - const id = msg.receivedFrom + duplicateMessage(from: PeerIdStr, msgIdStr: MsgIdStr, topic: TopicStr): void { const drec = this.deliveryRecords.ensureRecord(msgIdStr) - if (drec.peers.has(id)) { + if (drec.peers.has(from)) { // we have already seen this duplicate return } @@ -373,16 +405,22 @@ export class PeerScore { case DeliveryRecordStatus.unknown: // the message is being validated; track the peer delivery and wait for // the Deliver/Reject/Ignore notification. - drec.peers.add(id) + drec.peers.add(from) break + case DeliveryRecordStatus.valid: // mark the peer delivery time to only count a duplicate delivery once. - drec.peers.add(id) - this._markDuplicateMessageDelivery(id, msg, drec.validated) + drec.peers.add(from) + this.markDuplicateMessageDelivery(from, topic, drec.validated) break + case DeliveryRecordStatus.invalid: // we no longer track delivery time - this._markInvalidMessageDelivery(id, msg) + this.markInvalidMessageDelivery(from, topic) + break + + case DeliveryRecordStatus.ignored: + // the message was ignored; do nothing (we don't know if it was valid) break } } @@ -390,109 +428,85 @@ export class PeerScore { /** * Increments the "invalid message deliveries" counter for all scored topics the message is published in. */ - _markInvalidMessageDelivery(id: string, msg: InMessage): void { - const pstats = this.peerStats.get(id) - if (!pstats) { - return - } - - msg.topicIDs.forEach((topic) => { - const tstats = ensureTopicStats(topic, pstats, this.params) - if (!tstats) { - return + private markInvalidMessageDelivery(from: PeerIdStr, topic: TopicStr): void { + const pstats = this.peerStats.get(from) + if (pstats) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats) { + tstats.invalidMessageDeliveries += 1 } - - tstats.invalidMessageDeliveries += 1 - }) - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) + } } /** * Increments the "first message deliveries" counter for all scored topics the message is published in, * as well as the "mesh message deliveries" counter, if the peer is in the mesh for the topic. + * Messages already known (with the seenCache) are counted with markDuplicateMessageDelivery() */ - _markFirstMessageDelivery(id: string, msg: InMessage): void { - const pstats = this.peerStats.get(id) - if (!pstats) { - return - } + private markFirstMessageDelivery(from: PeerIdStr, topic: TopicStr): void { + const pstats = this.peerStats.get(from) + if (pstats) { + const tstats = this.getPtopicStats(pstats, topic) + if (tstats) { + let cap = this.params.topics[topic].firstMessageDeliveriesCap + tstats.firstMessageDeliveries = Math.min(cap, tstats.firstMessageDeliveries + 1) - msg.topicIDs.forEach((topic) => { - const tstats = ensureTopicStats(topic, pstats, this.params) - if (!tstats) { - return - } - - let cap = this.params.topics[topic].firstMessageDeliveriesCap - tstats.firstMessageDeliveries += 1 - if (tstats.firstMessageDeliveries > cap) { - tstats.firstMessageDeliveries = cap - } - - if (!tstats.inMesh) { - return - } - - cap = this.params.topics[topic].meshMessageDeliveriesCap - tstats.meshMessageDeliveries += 1 - if (tstats.meshMessageDeliveries > cap) { - tstats.meshMessageDeliveries = cap + if (tstats.inMesh) { + cap = this.params.topics[topic].meshMessageDeliveriesCap + tstats.meshMessageDeliveries = Math.min(cap, tstats.meshMessageDeliveries + 1) + } } - }) - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) + } } /** * Increments the "mesh message deliveries" counter for messages we've seen before, * as long the message was received within the P3 window. */ - _markDuplicateMessageDelivery(id: string, msg: InMessage, validatedTime = 0): void { - const pstats = this.peerStats.get(id) - if (!pstats) { - return - } - - const now = validatedTime ? Date.now() : 0 - - msg.topicIDs.forEach((topic) => { - const tstats = ensureTopicStats(topic, pstats, this.params) - if (!tstats) { - return - } + private markDuplicateMessageDelivery(from: PeerIdStr, topic: TopicStr, validatedTime?: number): void { + const pstats = this.peerStats.get(from) + if (pstats) { + const now = validatedTime !== undefined ? Date.now() : 0 - if (!tstats.inMesh) { - return - } + const tstats = this.getPtopicStats(pstats, topic) + if (tstats && tstats.inMesh) { + const tparams = this.params.topics[topic] - const tparams = this.params.topics[topic] + // check against the mesh delivery window -- if the validated time is passed as 0, then + // the message was received before we finished validation and thus falls within the mesh + // delivery window. + if (validatedTime !== undefined) { + const deliveryDelayMs = now - validatedTime + const isLateDelivery = deliveryDelayMs > tparams.meshMessageDeliveriesWindow + this.metrics?.onDuplicateMsgDelivery(topic, deliveryDelayMs, isLateDelivery) - // check against the mesh delivery window -- if the validated time is passed as 0, then - // the message was received before we finished validation and thus falls within the mesh - // delivery window. - if (validatedTime && now > validatedTime + tparams.meshMessageDeliveriesWindow) { - return - } + if (isLateDelivery) { + return + } + } - const cap = tparams.meshMessageDeliveriesCap - tstats.meshMessageDeliveries += 1 - if (tstats.meshMessageDeliveries > cap) { - tstats.meshMessageDeliveries = cap + const cap = tparams.meshMessageDeliveriesCap + tstats.meshMessageDeliveries = Math.min(cap, tstats.meshMessageDeliveries + 1) } - }) - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) + } } /** * Gets the current IPs for a peer. */ - _getIPs(id: string): string[] { - return this._connectionManager.getAll(PeerId.createFromB58String(id)).map((c) => c.remoteAddr.toOptions().host) + private getIPs(id: PeerIdStr): IPStr[] { + // TODO: Optimize conversions + const peerId = PeerId.createFromB58String(id) + + // PeerId.createFromB58String(id) + + return this.connectionManager.getAll(peerId).map((c) => c.remoteAddr.toOptions().host) } /** * Adds tracking for the new IPs in the list, and removes tracking from the obsolete IPs. */ - _setIPs(id: string, newIPs: string[], oldIPs: string[]): void { + private setIPs(id: PeerIdStr, newIPs: IPStr[], oldIPs: IPStr[]): void { // add the new IPs to the tracking // eslint-disable-next-line no-labels addNewIPs: for (const ip of newIPs) { @@ -531,14 +545,12 @@ export class PeerScore { this.peerIPs.delete(ip) } } - - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) } /** * Removes an IP list from the tracking list for a peer. */ - _removeIPs(id: string, ips: string[]): void { + private removeIPs(id: PeerIdStr, ips: IPStr[]): void { ips.forEach((ip) => { const peers = this.peerIPs.get(ip) if (!peers) { @@ -550,19 +562,46 @@ export class PeerScore { this.peerIPs.delete(ip) } }) - - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) } /** * Update all peer IPs to currently open connections */ - _updateIPs(): void { + private updateIPs(): void { this.peerStats.forEach((pstats, id) => { - const newIPs = this._getIPs(id) - this._setIPs(id, newIPs, pstats.ips) + const newIPs = this.getIPs(id) + this.setIPs(id, newIPs, pstats.ips) pstats.ips = newIPs - this.scoreCache.set(id, { score: null, cacheUntil: 0 }) }) } + + /** + * Returns topic stats if they exist, otherwise if the supplied parameters score the + * topic, inserts the default stats and returns a reference to those. If neither apply, returns None. + */ + private getPtopicStats(pstats: PeerStats, topic: TopicStr): TopicStats | null { + let topicStats: TopicStats | undefined = pstats.topics[topic] + + if (topicStats !== undefined) { + return topicStats + } + + if (this.params.topics[topic] !== undefined) { + topicStats = { + inMesh: false, + graftTime: 0, + meshTime: 0, + firstMessageDeliveries: 0, + meshMessageDeliveries: 0, + meshMessageDeliveriesActive: false, + meshFailurePenalty: 0, + invalidMessageDeliveries: 0 + } + pstats.topics[topic] = topicStats + + return topicStats + } + + return null + } } diff --git a/ts/score/peer-stats.ts b/ts/score/peer-stats.ts index 0e35a119..f1796076 100644 --- a/ts/score/peer-stats.ts +++ b/ts/score/peer-stats.ts @@ -1,113 +1,33 @@ -import { PeerScoreParams } from './peer-score-params' +import { TopicStr } from '../types' -export interface PeerStats { - /** - * true if the peer is currently connected - */ +export type PeerStats = { + /** true if the peer is currently connected */ connected: boolean - - /** - * expiration time of the score stats for disconnected peers - */ + /** expiration time of the score stats for disconnected peers */ expire: number - - /** - * per topic stats - */ - topics: Record - - /** - * IP tracking; store as string for easy processing - */ + /** per topic stats */ + topics: Record + /** IP tracking; store as string for easy processing */ ips: string[] - - /** - * behavioural pattern penalties (applied by the router) - */ + /** behavioural pattern penalties (applied by the router) */ behaviourPenalty: number } -export interface TopicStats { - /** - * true if the peer is in the mesh - */ +export type TopicStats = { + /** true if the peer is in the mesh */ inMesh: boolean - - /** - * time when the peer was (last) GRAFTed; valid only when in mesh - */ + /** time when the peer was (last) GRAFTed; valid only when in mesh */ graftTime: number - - /** - * time in mesh (updated during refresh/decay to avoid calling gettimeofday on - * every score invocation) - */ + /** time in mesh (updated during refresh/decay to avoid calling gettimeofday on every score invocation) */ meshTime: number - - /** - * first message deliveries - */ + /** first message deliveries */ firstMessageDeliveries: number - - /** - * mesh message deliveries - */ + /** mesh message deliveries */ meshMessageDeliveries: number - - /** - * true if the peer has been enough time in the mesh to activate mess message deliveries - */ + /** true if the peer has been enough time in the mesh to activate mess message deliveries */ meshMessageDeliveriesActive: boolean - - /** - * sticky mesh rate failure penalty counter - */ + /** sticky mesh rate failure penalty counter */ meshFailurePenalty: number - - /** - * invalid message counter - */ + /** invalid message counter */ invalidMessageDeliveries: number } - -export function createPeerStats(ps: Partial = {}): PeerStats { - return { - connected: false, - expire: 0, - ips: [], - behaviourPenalty: 0, - ...ps, - topics: ps.topics - ? Object.entries(ps.topics).reduce((topics, [topic, topicStats]) => { - topics[topic] = createTopicStats(topicStats) - return topics - }, {} as Record) - : {} - } -} - -export function createTopicStats(ts: Partial = {}): TopicStats { - return { - inMesh: false, - graftTime: 0, - meshTime: 0, - firstMessageDeliveries: 0, - meshMessageDeliveries: 0, - meshMessageDeliveriesActive: false, - meshFailurePenalty: 0, - invalidMessageDeliveries: 0, - ...ts - } -} - -export function ensureTopicStats(topic: string, ps: PeerStats, params: PeerScoreParams): TopicStats | undefined { - let ts = ps.topics[topic] - if (ts !== undefined) { - return ts - } - if (params.topics[topic] === undefined) { - return undefined - } - ps.topics[topic] = ts = createTopicStats() - return ts -} diff --git a/ts/score/score-param-decay.ts b/ts/score/score-param-decay.ts deleted file mode 100644 index 4e0efaa0..00000000 --- a/ts/score/score-param-decay.ts +++ /dev/null @@ -1,20 +0,0 @@ -const DefaultDecayInterval = 1000 -const DefaultDecayToZero = 0.01 - -/** - * ScoreParameterDecay computes the decay factor for a parameter, assuming the DecayInterval is 1s - * and that the value decays to zero if it drops below 0.01 - */ -export function scoreParameterDecay(decay: number): number { - return scoreParameterDecayWithBase(decay, DefaultDecayInterval, DefaultDecayToZero) -} - -/** - * ScoreParameterDecay computes the decay factor for a parameter using base as the DecayInterval - */ -export function scoreParameterDecayWithBase(decay: number, base: number, decayToZero: number): number { - // the decay is linear, so after n ticks the value is factor^n - // so factor^n = decayToZero => factor = decayToZero^(1/n) - const ticks = decay / base - return decayToZero ** (1 / ticks) -} diff --git a/ts/score/scoreMetrics.ts b/ts/score/scoreMetrics.ts new file mode 100644 index 00000000..df2bdee4 --- /dev/null +++ b/ts/score/scoreMetrics.ts @@ -0,0 +1,209 @@ +import { PeerScoreParams } from './peer-score-params' +import { PeerStats } from './peer-stats' + +type TopicLabel = string +type TopicStr = string +type TopicStrToLabel = Map + +export type TopicScoreWeights = { p1w: T; p2w: T; p3w: T; p3bw: T; p4w: T } +export type ScoreWeights = { + byTopic: Map> + p5w: T + p6w: T + p7w: T + score: T +} + +export function computeScoreWeights( + peer: string, + pstats: PeerStats, + params: PeerScoreParams, + peerIPs: Map>, + topicStrToLabel: TopicStrToLabel +): ScoreWeights { + let score = 0 + + const byTopic = new Map>() + + // topic stores + Object.entries(pstats.topics).forEach(([topic, tstats]) => { + // the topic parameters + // Aggregate by known topicLabel or throw to 'unknown'. This prevent too high cardinality + const topicLabel = topicStrToLabel.get(topic) ?? 'unknown' + const topicParams = params.topics[topic] + if (topicParams === undefined) { + // we are not scoring this topic + return + } + + let topicScores = byTopic.get(topicLabel) + if (!topicScores) { + topicScores = { + p1w: 0, + p2w: 0, + p3w: 0, + p3bw: 0, + p4w: 0 + } + byTopic.set(topicLabel, topicScores) + } + + let p1w = 0 + let p2w = 0 + let p3w = 0 + let p3bw = 0 + let p4w = 0 + + // P1: time in Mesh + if (tstats.inMesh) { + const p1 = Math.max(tstats.meshTime / topicParams.timeInMeshQuantum, topicParams.timeInMeshCap) + p1w += p1 * topicParams.timeInMeshWeight + } + + // P2: first message deliveries + let p2 = tstats.firstMessageDeliveries + if (p2 > topicParams.firstMessageDeliveriesCap) { + p2 = topicParams.firstMessageDeliveriesCap + } + p2w += p2 * topicParams.firstMessageDeliveriesWeight + + // P3: mesh message deliveries + if ( + tstats.meshMessageDeliveriesActive && + tstats.meshMessageDeliveries < topicParams.meshMessageDeliveriesThreshold + ) { + const deficit = topicParams.meshMessageDeliveriesThreshold - tstats.meshMessageDeliveries + const p3 = deficit * deficit + p3w += p3 * topicParams.meshMessageDeliveriesWeight + } + + // P3b: + // NOTE: the weight of P3b is negative (validated in validateTopicScoreParams) so this detracts + const p3b = tstats.meshFailurePenalty + p3bw += p3b * topicParams.meshFailurePenaltyWeight + + // P4: invalid messages + // NOTE: the weight of P4 is negative (validated in validateTopicScoreParams) so this detracts + const p4 = tstats.invalidMessageDeliveries * tstats.invalidMessageDeliveries + p4w += p4 * topicParams.invalidMessageDeliveriesWeight + + // update score, mixing with topic weight + score += (p1w + p2w + p3w + p3bw + p4w) * topicParams.topicWeight + + topicScores.p1w += p1w + topicScores.p2w += p2w + topicScores.p3w += p3w + topicScores.p3bw += p3bw + topicScores.p4w += p4w + }) + + // apply the topic score cap, if any + if (params.topicScoreCap > 0 && score > params.topicScoreCap) { + score = params.topicScoreCap + + // Proportionally apply cap to all individual contributions + const capF = params.topicScoreCap / score + for (const ws of byTopic.values()) { + ws.p1w *= capF + ws.p2w *= capF + ws.p3w *= capF + ws.p3bw *= capF + ws.p4w *= capF + } + } + + let p5w = 0 + let p6w = 0 + let p7w = 0 + + // P5: application-specific score + const p5 = params.appSpecificScore(peer) + p5w += p5 * params.appSpecificWeight + + // P6: IP colocation factor + pstats.ips.forEach((ip) => { + if (params.IPColocationFactorWhitelist.has(ip)) { + return + } + + // P6 has a cliff (IPColocationFactorThreshold) + // It's only applied if at least that many peers are connected to us from that source IP addr. + // It is quadratic, and the weight is negative (validated in validatePeerScoreParams) + const peersInIP = peerIPs.get(ip) + const numPeersInIP = peersInIP ? peersInIP.size : 0 + if (numPeersInIP > params.IPColocationFactorThreshold) { + const surplus = numPeersInIP - params.IPColocationFactorThreshold + const p6 = surplus * surplus + p6w += p6 * params.IPColocationFactorWeight + } + }) + + // P7: behavioural pattern penalty + const p7 = pstats.behaviourPenalty * pstats.behaviourPenalty + p7w += p7 * params.behaviourPenaltyWeight + + score += p5w + p6w + p7w + + return { + byTopic, + p5w, + p6w, + p7w, + score + } +} + +export function computeAllPeersScoreWeights( + peerIdStrs: Iterable, + peerStats: Map, + params: PeerScoreParams, + peerIPs: Map>, + topicStrToLabel: TopicStrToLabel +): ScoreWeights { + const sw: ScoreWeights = { + byTopic: new Map(), + p5w: [], + p6w: [], + p7w: [], + score: [] + } + + for (const peerIdStr of peerIdStrs) { + const pstats = peerStats.get(peerIdStr) + if (pstats) { + const swPeer = computeScoreWeights(peerIdStr, pstats, params, peerIPs, topicStrToLabel) + + for (const [topic, swPeerTopic] of swPeer.byTopic) { + let swTopic = sw.byTopic.get(topic) + if (!swTopic) { + swTopic = { + p1w: [], + p2w: [], + p3w: [], + p3bw: [], + p4w: [] + } + sw.byTopic.set(topic, swTopic) + } + + swTopic.p1w.push(swPeerTopic.p1w) + swTopic.p2w.push(swPeerTopic.p2w) + swTopic.p3w.push(swPeerTopic.p3w) + swTopic.p3bw.push(swPeerTopic.p3bw) + swTopic.p4w.push(swPeerTopic.p4w) + } + + sw.p5w.push(swPeer.p5w) + sw.p6w.push(swPeer.p6w) + sw.p7w.push(swPeer.p7w) + sw.score.push(swPeer.score) + } else { + sw.p5w.push(0) + sw.p6w.push(0) + sw.p7w.push(0) + sw.score.push(0) + } + } + + return sw +} diff --git a/ts/tracer.ts b/ts/tracer.ts index dac6c79a..7009b527 100644 --- a/ts/tracer.ts +++ b/ts/tracer.ts @@ -1,8 +1,6 @@ -import { GossipsubIWantFollowupTime } from './constants' import { messageIdToString } from './utils' -import pubsubErrors = require('libp2p-interfaces/src/pubsub/errors') - -const { ERR_INVALID_SIGNATURE, ERR_MISSING_SIGNATURE } = pubsubErrors.codes +import { MsgIdStr, PeerIdStr, RejectReason } from './types' +import { Metrics } from './metrics' /** * IWantTracer is an internal tracer that tracks IWANT requests in order to penalize @@ -17,69 +15,120 @@ export class IWantTracer { * Promises to deliver a message * Map per message id, per peer, promise expiration time */ - promises = new Map>() + private readonly promises = new Map>() + /** + * First request time by msgId. Used for metrics to track expire times. + * Necessary to know if peers are actually breaking promises or simply sending them a bit later + */ + private readonly requestMsByMsg = new Map() + private readonly requestMsByMsgExpire: number + + constructor(private readonly gossipsubIWantFollowupMs: number, private readonly metrics: Metrics | null) { + this.requestMsByMsgExpire = 10 * gossipsubIWantFollowupMs + } + + get size(): number { + return this.promises.size + } + + get requestMsByMsgSize(): number { + return this.requestMsByMsg.size + } /** * Track a promise to deliver a message from a list of msgIds we are requesting */ - addPromise(p: string, msgIds: Uint8Array[]): void { + addPromise(from: PeerIdStr, msgIds: Uint8Array[]): void { // pick msgId randomly from the list const ix = Math.floor(Math.random() * msgIds.length) const msgId = msgIds[ix] const msgIdStr = messageIdToString(msgId) - let peers = this.promises.get(msgIdStr) - if (!peers) { - peers = new Map() - this.promises.set(msgIdStr, peers) + let expireByPeer = this.promises.get(msgIdStr) + if (!expireByPeer) { + expireByPeer = new Map() + this.promises.set(msgIdStr, expireByPeer) } - if (!peers.has(p)) { - peers.set(p, Date.now() + GossipsubIWantFollowupTime) + const now = Date.now() + + // If a promise for this message id and peer already exists we don't update the expiry + if (!expireByPeer.has(from)) { + expireByPeer.set(from, now + this.gossipsubIWantFollowupMs) + + if (this.metrics) { + this.metrics.iwantPromiseStarted.inc(1) + if (!this.requestMsByMsg.has(msgIdStr)) { + this.requestMsByMsg.set(msgIdStr, now) + } + } } } /** * Returns the number of broken promises for each peer who didn't follow up on an IWANT request. + * + * This should be called not too often relative to the expire times, since it iterates over the whole data. */ - getBrokenPromises(): Map { + getBrokenPromises(): Map { const now = Date.now() - const result = new Map() + const result = new Map() - this.promises.forEach((peers, msgId) => { - peers.forEach((expire, p) => { + let brokenPromises = 0 + + this.promises.forEach((expireByPeer, msgId) => { + expireByPeer.forEach((expire, p) => { // the promise has been broken if (expire < now) { // add 1 to result result.set(p, (result.get(p) ?? 0) + 1) // delete from tracked promises - peers.delete(p) + expireByPeer.delete(p) + // for metrics + brokenPromises++ } }) // clean up empty promises for a msgId - if (!peers.size) { + if (!expireByPeer.size) { this.promises.delete(msgId) } }) + this.metrics?.iwantPromiseBroken.inc(brokenPromises) + return result } /** * Someone delivered a message, stop tracking promises for it */ - async deliverMessage(msgIdStr: string): Promise { - this.promises.delete(msgIdStr) + deliverMessage(msgIdStr: MsgIdStr): void { + this.trackMessage(msgIdStr) + + const expireByPeer = this.promises.get(msgIdStr) + + // Expired promise, check requestMsByMsg + if (expireByPeer) { + this.promises.delete(msgIdStr) + + if (this.metrics) { + this.metrics.iwantPromiseResolved.inc(1) + this.metrics.iwantPromiseResolvedPeers.inc(expireByPeer.size) + } + } } /** * A message got rejected, so we can stop tracking promises and let the score penalty apply from invalid message delivery, * unless its an obviously invalid message. */ - async rejectMessage(msgIdStr: string, reason: string): Promise { + rejectMessage(msgIdStr: MsgIdStr, reason: RejectReason): void { + this.trackMessage(msgIdStr) + + // A message got rejected, so we can stop tracking promises and let the score penalty apply. + // With the expection of obvious invalid messages switch (reason) { - case ERR_INVALID_SIGNATURE: - case ERR_MISSING_SIGNATURE: + case RejectReason.Error: return } @@ -89,4 +138,29 @@ export class IWantTracer { clear(): void { this.promises.clear() } + + prune(): void { + const maxMs = Date.now() - this.requestMsByMsgExpire + + for (const [k, v] of this.requestMsByMsg.entries()) { + if (v < maxMs) { + // messages that stay too long in the requestMsByMsg map, delete + this.requestMsByMsg.delete(k) + } else { + // recent messages, keep them + // sort by insertion order + break + } + } + } + + private trackMessage(msgIdStr: MsgIdStr): void { + if (this.metrics) { + const requestMs = this.requestMsByMsg.get(msgIdStr) + if (requestMs !== undefined) { + this.metrics.iwantPromiseDeliveryTime.observe((Date.now() - requestMs) / 1000) + this.requestMsByMsg.delete(msgIdStr) + } + } + } } diff --git a/ts/types.ts b/ts/types.ts new file mode 100644 index 00000000..599807f3 --- /dev/null +++ b/ts/types.ts @@ -0,0 +1,189 @@ +import PeerId from 'peer-id' +import { keys } from 'libp2p-crypto' +import { Multiaddr } from 'multiaddr' +import { RPC } from './message/rpc' + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +type PromiseValue> = T extends Promise ? V : never + +type PrivateKey = PromiseValue> + +export type MsgIdStr = string +export type PeerIdStr = string +export type TopicStr = string + +export interface AddrInfo { + id: PeerId + addrs: Multiaddr[] +} + +/** + * Compute a local non-spec'ed msg-id for faster de-duplication of seen messages. + * Used exclusively for a local seen_cache + */ +export type FastMsgIdFn = (msg: RPC.IMessage) => string + +/** + * Compute spec'ed msg-id. Used for IHAVE / IWANT messages + */ +export type MsgIdFn = (msg: GossipsubMessage) => Promise | Uint8Array + +export interface DataTransform { + /** + * Takes the data published by peers on a topic and transforms the data. + * Should be the reverse of outboundTransform(). Example: + * - `inboundTransform()`: decompress snappy payload + * - `outboundTransform()`: compress snappy payload + */ + inboundTransform(topic: TopicStr, data: Uint8Array): Uint8Array + + /** + * Takes the data to be published (a topic and associated data) transforms the data. The + * transformed data will then be used to create a `RawGossipsubMessage` to be sent to peers. + */ + outboundTransform(topic: TopicStr, data: Uint8Array): Uint8Array +} + +/** + * Custom validator function per topic. + * Must return or resolve quickly (< 100ms) to prevent causing penalties for late messages. + * If you need to apply validation that may require longer times use `asyncValidation` option and callback the + * validation result through `Gossipsub.reportValidationResult` + */ +export type TopicValidatorFn = ( + topic: TopicStr, + msg: GossipsubMessage, + propagationSource: PeerId +) => MessageAcceptance | Promise + +export enum SignaturePolicy { + /** + * On the producing side: + * - Build messages with the signature, key (from may be enough for certain inlineable public key types), from and seqno fields. + * + * On the consuming side: + * - Enforce the fields to be present, reject otherwise. + * - Propagate only if the fields are valid and signature can be verified, reject otherwise. + */ + StrictSign = 'StrictSign', + /** + * On the producing side: + * - Build messages without the signature, key, from and seqno fields. + * - The corresponding protobuf key-value pairs are absent from the marshalled message, not just empty. + * + * On the consuming side: + * - Enforce the fields to be absent, reject otherwise. + * - Propagate only if the fields are absent, reject otherwise. + * - A message_id function will not be able to use the above fields, and should instead rely on the data field. A commonplace strategy is to calculate a hash. + */ + StrictNoSign = 'StrictNoSign' +} + +export enum PublishConfigType { + Signing, + Author, + Anonymous +} + +export type PublishConfig = + | { + type: PublishConfigType.Signing + author: PeerId + key: Uint8Array + privateKey: PrivateKey + } + | { type: PublishConfigType.Author; author: PeerId } + | { type: PublishConfigType.Anonymous } + +export enum MessageAcceptance { + /// The message is considered valid, and it should be delivered and forwarded to the network. + Accept = 'accept', + /// The message is neither delivered nor forwarded to the network, but the router does not + /// trigger the P₄ penalty. + Ignore = 'ignore', + /// The message is considered invalid, and it should be rejected and trigger the P₄ penalty. + Reject = 'reject' +} + +export type RejectReasonObj = + | { reason: RejectReason.Error; error: ValidateError } + | { reason: Exclude } + +export enum RejectReason { + /** + * The message failed the configured validation during decoding. + * SelfOrigin is considered a ValidationError + */ + Error = 'error', + /** + * Custom validator fn reported status IGNORE. + */ + Ignore = 'ignore', + /** + * Custom validator fn reported status REJECT. + */ + Reject = 'reject', + /** + * The peer that sent the message OR the source from field is blacklisted. + * Causes messages to be ignored, not penalized, neither do score record creation. + */ + Blacklisted = 'blacklisted' +} + +export enum ValidateError { + /// The message has an invalid signature, + InvalidSignature = 'invalid_signature', + /// The sequence number was the incorrect size + InvalidSeqno = 'invalid_seqno', + /// The PeerId was invalid + InvalidPeerId = 'invalid_peerid', + /// Signature existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SignaturePresent = 'signature_present', + /// Sequence number existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + SeqnoPresent = 'seqno_present', + /// Message source existed when validation has been sent to + /// [`crate::behaviour::MessageAuthenticity::Anonymous`]. + FromPresent = 'from_present', + /// The data transformation failed. + TransformFailed = 'transform_failed' +} + +export enum MessageStatus { + duplicate = 'duplicate', + invalid = 'invalid', + valid = 'valid' +} + +/** + * Gossipsub message with TRANSFORMED data + */ +export type GossipsubMessage = { + /// Id of the peer that published this message. + from?: Uint8Array + + /// Content of the message. + data: Uint8Array + + /// A random sequence number. + // Keeping as Uint8Array for cheaper concatenating on msgIdFn + seqno?: Uint8Array + + /// The topic this message belongs to + topic: TopicStr +} + +/** + * Typesafe conversion of MessageAcceptance -> RejectReason. TS ensures all values covered + */ +export function rejectReasonFromAcceptance( + acceptance: Exclude +): RejectReason.Ignore | RejectReason.Reject { + switch (acceptance) { + case MessageAcceptance.Ignore: + return RejectReason.Ignore + case MessageAcceptance.Reject: + return RejectReason.Reject + } +} diff --git a/ts/utils/buildRawMessage.ts b/ts/utils/buildRawMessage.ts new file mode 100644 index 00000000..4c80d53d --- /dev/null +++ b/ts/utils/buildRawMessage.ts @@ -0,0 +1,138 @@ +import { randomBytes } from 'iso-random-stream' +import { concat as uint8ArrayConcat } from 'uint8arrays/concat' +import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string' +import { keys } from 'libp2p-crypto' +import PeerId, { createFromBytes } from 'peer-id' +import { RPC } from '../message/rpc' +import { PublishConfig, PublishConfigType, SignaturePolicy, TopicStr, ValidateError } from '../types' + +type PublicKey = ReturnType +export const SignPrefix = uint8ArrayFromString('libp2p-pubsub:') + +export async function buildRawMessage( + publishConfig: PublishConfig, + topic: TopicStr, + transformedData: Uint8Array +): Promise { + switch (publishConfig.type) { + case PublishConfigType.Signing: { + const rpcMsg: RPC.IMessage = { + from: publishConfig.author.toBytes(), + data: transformedData, + seqno: randomBytes(8), + topic, + signature: undefined, // Exclude signature field for signing + key: undefined // Exclude key field for signing + } + + // Get the message in bytes, and prepend with the pubsub prefix + // the signature is over the bytes "libp2p-pubsub:" + const bytes = uint8ArrayConcat([SignPrefix, RPC.Message.encode(rpcMsg).finish()]) + + rpcMsg.signature = await publishConfig.privateKey.sign(bytes) + rpcMsg.key = publishConfig.key + + return rpcMsg + } + + case PublishConfigType.Author: { + return { + from: publishConfig.author.toBytes(), + data: transformedData, + seqno: randomBytes(8), + topic, + signature: undefined, + key: undefined + } + } + + case PublishConfigType.Anonymous: { + return { + from: undefined, + data: transformedData, + seqno: undefined, + topic, + signature: undefined, + key: undefined + } + } + } +} + +export type ValidationResult = { valid: true; fromPeerId: PeerId | null } | { valid: false; error: ValidateError } + +export async function validateToRawMessage( + signaturePolicy: SignaturePolicy, + msg: RPC.IMessage +): Promise { + // If strict-sign, verify all + // If anonymous (no-sign), ensure no preven + + switch (signaturePolicy) { + case SignaturePolicy.StrictNoSign: + if (msg.signature != null) return { valid: false, error: ValidateError.SignaturePresent } + if (msg.seqno != null) return { valid: false, error: ValidateError.SeqnoPresent } + if (msg.key != null) return { valid: false, error: ValidateError.FromPresent } + + return { valid: true, fromPeerId: null } + + case SignaturePolicy.StrictSign: { + // Verify seqno + if (msg.seqno == null) return { valid: false, error: ValidateError.InvalidSeqno } + if (msg.seqno.length !== 8) { + return { valid: false, error: ValidateError.InvalidSeqno } + } + + if (msg.signature == null) return { valid: false, error: ValidateError.InvalidSignature } + if (msg.from == null) return { valid: false, error: ValidateError.InvalidPeerId } + + let fromPeerId: PeerId + try { + // TODO: Fix PeerId types + fromPeerId = createFromBytes(msg.from) + } catch (e) { + return { valid: false, error: ValidateError.InvalidPeerId } + } + + // - check from defined + // - transform source to PeerId + // - parse signature + // - get .key, else from source + // - check key == source if present + // - verify sig + + let publicKey: PublicKey + if (msg.key) { + publicKey = keys.unmarshalPublicKey(msg.key) + // TODO: Should `fromPeerId.pubKey` be optional? + if (fromPeerId.pubKey !== undefined && !publicKey.equals(fromPeerId.pubKey)) { + return { valid: false, error: ValidateError.InvalidPeerId } + } + } else { + if (fromPeerId.pubKey === undefined) { + return { valid: false, error: ValidateError.InvalidPeerId } + } + publicKey = fromPeerId.pubKey + } + + const rpcMsgPreSign: RPC.IMessage = { + from: msg.from, + data: msg.data, + seqno: msg.seqno, + topic: msg.topic, + signature: undefined, // Exclude signature field for signing + key: undefined // Exclude key field for signing + } + + // Get the message in bytes, and prepend with the pubsub prefix + // the signature is over the bytes "libp2p-pubsub:" + const bytes = uint8ArrayConcat([SignPrefix, RPC.Message.encode(rpcMsgPreSign).finish()]) + + if (!(await publicKey.verify(bytes, msg.signature))) { + return { valid: false, error: ValidateError.InvalidSignature } + } + + return { valid: true, fromPeerId } + } + } +} diff --git a/ts/utils/create-gossip-rpc.ts b/ts/utils/create-gossip-rpc.ts index 03866b60..1e9793bf 100644 --- a/ts/utils/create-gossip-rpc.ts +++ b/ts/utils/create-gossip-rpc.ts @@ -5,16 +5,15 @@ import { RPC, IRPC } from '../message/rpc' /** * Create a gossipsub RPC object */ -export function createGossipRpc(msgs: RPC.IMessage[] = [], control: Partial = {}): IRPC { +export function createGossipRpc(messages: RPC.IMessage[] = [], control: Partial = {}): IRPC { return { subscriptions: [], - msgs: msgs, + messages, control: { - ihave: [], - iwant: [], - graft: [], - prune: [], - ...control + ihave: control.ihave ?? [], + iwant: control.iwant ?? [], + graft: control.graft ?? [], + prune: control.prune ?? [] } } } diff --git a/ts/utils/index.ts b/ts/utils/index.ts index fed27ae8..9daeb9c9 100644 --- a/ts/utils/index.ts +++ b/ts/utils/index.ts @@ -2,3 +2,4 @@ export * from './create-gossip-rpc' export * from './shuffle' export * from './has-gossip-protocol' export * from './messageIdToString' +export { getPublishConfigFromPeerId } from './publishConfig' diff --git a/ts/utils/msgIdFn.ts b/ts/utils/msgIdFn.ts new file mode 100644 index 00000000..a20a90e2 --- /dev/null +++ b/ts/utils/msgIdFn.ts @@ -0,0 +1,27 @@ +import { sha256 } from 'multiformats/hashes/sha2' +import { GossipsubMessage } from '../types' + +export type PeerIdStr = string + +/** + * Generate a message id, based on the `key` and `seqno` + */ +export function msgIdFnStrictSign(msg: GossipsubMessage): Uint8Array { + // Should never happen + if (!msg.from) throw Error('missing from field') + if (!msg.seqno) throw Error('missing seqno field') + + // TODO: Should use .from here or key? + const msgId = new Uint8Array(msg.from.length + msg.seqno.length) + msgId.set(msg.from, 0) + msgId.set(msg.seqno, msg.from.length) + + return msgId +} + +/** + * Generate a message id, based on message `data` + */ +export async function msgIdFnStrictNoSign(msg: GossipsubMessage): Promise { + return sha256.encode(msg.data) +} diff --git a/ts/utils/publishConfig.ts b/ts/utils/publishConfig.ts new file mode 100644 index 00000000..889da8c1 --- /dev/null +++ b/ts/utils/publishConfig.ts @@ -0,0 +1,40 @@ +// import { keys } from 'libp2p-crypto' +import PeerId from 'peer-id' +import { PublishConfig, PublishConfigType, SignaturePolicy } from '../types' + +/** + * Prepare a PublishConfig object from a PeerId. + */ +export function getPublishConfigFromPeerId(signaturePolicy: SignaturePolicy, peerId?: PeerId): PublishConfig { + switch (signaturePolicy) { + case SignaturePolicy.StrictSign: { + if (!peerId) { + throw Error('Must provide PeerId') + } + + if (peerId.privKey == null) { + throw Error('Cannot sign message, no private key present') + } + + if (peerId.pubKey == null) { + throw Error('Cannot sign message, no public key present') + } + + // Transform privateKey once at initialization time instead of once per message + // const privateKey = await keys.unmarshalPrivateKey(peerId.privateKey) + const privateKey = peerId.privKey + + return { + type: PublishConfigType.Signing, + author: peerId, + key: peerId.pubKey.bytes, + privateKey + } + } + + case SignaturePolicy.StrictNoSign: + return { + type: PublishConfigType.Anonymous + } + } +} diff --git a/ts/utils/time-cache.ts b/ts/utils/time-cache.ts index d6b80c51..58708153 100644 --- a/ts/utils/time-cache.ts +++ b/ts/utils/time-cache.ts @@ -13,28 +13,26 @@ type CacheValue = { * This gives 4x - 5x performance gain compared to npm TimeCache */ export class SimpleTimeCache { - private entries = new Map>() - private validityMs: number - private lastPruneTime = 0 + private readonly entries = new Map>() + private readonly validityMs: number - constructor(options: SimpleTimeCacheOpts) { - this.validityMs = options.validityMs + constructor(opts: SimpleTimeCacheOpts) { + this.validityMs = opts.validityMs // allow negative validityMs so that this does not cache anything, spec test compliance.spec.js // sends duplicate messages and expect peer to receive all. Application likely uses positive validityMs } + get size(): number { + return this.entries.size + } + put(key: string, value: T): void { this.entries.set(key, { value, validUntilMs: Date.now() + this.validityMs }) - this.prune() } prune(): void { const now = Date.now() - if (now - this.lastPruneTime < 200) { - return - } - this.lastPruneTime = now for (const [k, v] of this.entries.entries()) { if (v.validUntilMs < now) { @@ -56,7 +54,6 @@ export class SimpleTimeCache { } clear(): void { - this.entries = new Map() - this.lastPruneTime = 0 + this.entries.clear() } } diff --git a/tsconfig.build.json b/tsconfig.build.json index 7ef76a4f..51e4c13a 100644 --- a/tsconfig.build.json +++ b/tsconfig.build.json @@ -5,11 +5,19 @@ "module": "commonjs", "lib": ["es2020", "dom"], "target": "es2020", + + "strict": true, + "alwaysStrict": true, + "strictNullChecks": true, + "strictFunctionTypes": true, + "strictBindCallApply": true, + "strictPropertyInitialization": true, + "useUnknownInCatchVariables": true, "noImplicitAny": true, "noImplicitThis": true, - "strictFunctionTypes": true, - "strictNullChecks": true, + "noImplicitReturns": true, "skipLibCheck": true, + "esModuleInterop": true, "declaration": true, "types": ["node", "mocha"]