From 7d18e89ea29de97630f781249b2636c12b2effc4 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Aug 2023 15:48:11 +0200 Subject: [PATCH 01/64] Start go-libp2p-kad-dht v2 package --- v2/config.go | 105 ++++++++++++++++++++++++++++++++++++++++++++++ v2/config_test.go | 52 +++++++++++++++++++++++ v2/dht.go | 70 +++++++++++++++++++++++++++++++ v2/dht_test.go | 78 ++++++++++++++++++++++++++++++++++ v2/go.mod | 3 ++ v2/kad.go | 55 ++++++++++++++++++++++++ v2/routing.go | 43 +++++++++++++++++++ 7 files changed, 406 insertions(+) create mode 100644 v2/config.go create mode 100644 v2/config_test.go create mode 100644 v2/dht.go create mode 100644 v2/dht_test.go create mode 100644 v2/go.mod create mode 100644 v2/kad.go create mode 100644 v2/routing.go diff --git a/v2/config.go b/v2/config.go new file mode 100644 index 00000000..b710d86c --- /dev/null +++ b/v2/config.go @@ -0,0 +1,105 @@ +package dht + +import ( + "fmt" + + "github.com/plprobelab/go-kademlia/coord" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +type ( + // ModeOpt describes in which mode this DHT process should operate in. + // Possible options are client, server, and any variant that switches + // between both automatically based on public reachability. The DHT receives + // reachability updates from libp2p via the EvtLocalReachabilityChanged + // event. A DHT that operates in client mode won't register a stream handler + // for incoming requests and therefore won't store, e.g., any provider or + // IPNS records. A DHT in server mode, on the other hand, does all of that. + // + // The `mode` type, on the other hand, captures the current state that the + // DHT is in. This can either be client or server. + ModeOpt string + + // mode describes in which mode the DHT currently operates. Because the ModeOpt + // type has options that automatically switch between client and server mode + // based on public connectivity, the DHT mode at any point in time can differ + // from the desired mode. Therefore, we define this second mode type that + // only has the two forms: client or server. + mode string +) + +const ( + // ModeOptClient configures the DHT to only operate in client mode + // regardless of potential public reachability. + ModeOptClient ModeOpt = "client" + + // ModeOptServer configures the DHT to always operate in server mode + // regardless of potentially not being publicly reachable. + ModeOptServer ModeOpt = "server" + + // ModeOptAutoClient configures the DHT to start operating in client mode + // and if publicly reachability is detected to switch to server mode. + ModeOptAutoClient ModeOpt = "auto-client" + + // ModeOptAutoServer configures the DHT to start operating in server mode, + // and if it is detected that we don't have public reachability switch + // to client mode. + ModeOptAutoServer ModeOpt = "auto-server" + + // modeClient means that the DHT is currently operating in client mode. + // For more information, check ModeOpt documentation. + modeClient mode = "client" + + // modeServer means that the DHT is currently operating in server mode. + // For more information, check ModeOpt documentation. + modeServer mode = "server" +) + +// Config contains all the configuration options for a DHT. Use DefaultConfig +// to build up your own configuration struct. The DHT constructor New uses the +// below method Validate to test for violations of configuration invariants. +type Config struct { + // Mode defines if the DHT should operate as a server or client or switch + // between both automatically (see ModeOpt). + Mode ModeOpt + + // Kademlia holds the configuration of the underlying Kademlia implementation. + Kademlia *coord.Config + + // RoutingTable holds a reference to the specific routing table + // implementation that this DHT should use. If this field is nil, the + // triert.TrieRT routing table will be used. + RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] +} + +// DefaultConfig returns a configuration struct that can be used as-is to +// instantiate a fully functional DHT client. +func DefaultConfig() *Config { + return &Config{ + Mode: ModeOptAutoClient, + Kademlia: coord.DefaultConfig(), + RoutingTable: nil, + } +} + +// Validate validates the configuration struct it is called on. It returns +// an error if any configuration issue was detected and nil if this is +// a valid configuration. +func (c *Config) Validate() error { + switch c.Mode { + case ModeOptClient, ModeOptServer, ModeOptAutoClient, ModeOptAutoServer: + default: + return fmt.Errorf("invalid mode option: %s", c.Mode) + } + + if c.Kademlia == nil { + return fmt.Errorf("kademlia configuration must not be nil") + } + + if err := c.Kademlia.Validate(); err != nil { + return fmt.Errorf("invalid kademlia configuration: %w", err) + } + + return nil +} diff --git a/v2/config_test.go b/v2/config_test.go new file mode 100644 index 00000000..e9268526 --- /dev/null +++ b/v2/config_test.go @@ -0,0 +1,52 @@ +package dht + +import ( + "testing" +) + +func TestConfig_Validate(t *testing.T) { + tests := []struct { + name string + mutate func(*Config) *Config + wantErr bool + }{ + { + name: "happy path", + wantErr: false, + mutate: func(c *Config) *Config { return c }, + }, + { + name: "invalid mode", + wantErr: true, + mutate: func(c *Config) *Config { + c.Mode = "invalid" + return c + }, + }, + { + name: "nil Kademlia configuration", + wantErr: true, + mutate: func(c *Config) *Config { + c.Kademlia = nil + return c + }, + }, + { + name: "invalid Kademlia configuration", + wantErr: true, + mutate: func(c *Config) *Config { + c.Kademlia.Clock = nil + return c + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := DefaultConfig() + c = tt.mutate(c) + if err := c.Validate(); (err != nil) != tt.wantErr { + t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/v2/dht.go b/v2/dht.go new file mode 100644 index 00000000..79d50865 --- /dev/null +++ b/v2/dht.go @@ -0,0 +1,70 @@ +package dht + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/host" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/coord" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing/triert" +) + +// DHT is an implementation of Kademlia with S/Kademlia modifications. +// It is used to implement the base Routing module. +type DHT struct { + host host.Host // host holds a reference to the underlying libp2p host + cfg *Config // cfg holds a reference to the DHT configuration struct + mode mode // mode indicates the current mode the DHT operates in. This can differ from the desired mode if set to auto-client or auto-server. + + kad *coord.Coordinator[key.Key256, ma.Multiaddr] // the go-kademlia reference + rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] +} + +// New constructs a new DHT for the given underlying host and with the given +// configuration. Use DefaultConfig() to construct a configuration. +func New(h host.Host, cfg *Config) (*DHT, error) { + var err error + + // check if the configuration is valid + if err = cfg.Validate(); err != nil { + return nil, fmt.Errorf("validate DHT config: %w", err) + } + + d := &DHT{ + host: h, + cfg: cfg, + } + nid := nodeID(d.host.ID()) + + // Use the configured routing table if it was provided + if cfg.RoutingTable != nil { + d.rt = cfg.RoutingTable + } else { + rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() + d.rt, err = triert.New[key.Key256, kad.NodeID[key.Key256]](nid, rtCfg) + if err != nil { + return nil, fmt.Errorf("new trie routing table: %w", err) + } + } + + // instantiate a new Kademlia DHT coordinator. + d.kad, err = coord.NewCoordinator[key.Key256, ma.Multiaddr](nid, nil, d.rt, cfg.Kademlia) + if err != nil { + return nil, fmt.Errorf("new coordinator: %w", err) + } + + // determine mode to start in + switch cfg.Mode { + case ModeOptClient, ModeOptAutoClient: + d.mode = modeClient + case ModeOptServer, ModeOptAutoServer: + d.mode = modeServer + default: + // should never happen because of the configuration validation above + return nil, fmt.Errorf("invalid dht mode %s", cfg.Mode) + } + + return d, nil +} diff --git a/v2/dht_test.go b/v2/dht_test.go new file mode 100644 index 00000000..dbff988b --- /dev/null +++ b/v2/dht_test.go @@ -0,0 +1,78 @@ +package dht + +import ( + "reflect" + "testing" + + "github.com/libp2p/go-libp2p" +) + +func TestNew(t *testing.T) { + h, err := libp2p.New(libp2p.NoListenAddrs) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + cfgBuilder func(*Config) *Config + wantBuilder func(*DHT) *DHT + wantErr bool + }{ + { + name: "happy path", + cfgBuilder: func(c *Config) *Config { return c }, + wantBuilder: func(dht *DHT) *DHT { return dht }, + wantErr: false, + }, + { + name: "mode set to server", + cfgBuilder: func(c *Config) *Config { + c.Mode = ModeOptServer + return c + }, + wantBuilder: func(dht *DHT) *DHT { + dht.mode = modeServer + return dht + }, + wantErr: false, + }, + { + name: "mode set to auto client", + cfgBuilder: func(c *Config) *Config { + c.Mode = ModeOptAutoClient + return c + }, + wantBuilder: func(dht *DHT) *DHT { + dht.mode = modeClient + return dht + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := DefaultConfig() + d, err := New(h, c) + if err != nil { + t.Fatal(err) + } + + got, err := New(h, tt.cfgBuilder(c)) + if (err != nil) != tt.wantErr { + t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr) + return + } + + want := tt.wantBuilder(d) + + want.kad = nil + got.kad = nil + + if !reflect.DeepEqual(got, want) { + t.Errorf("New() got = %v, want %v", got, want) + } + }) + } +} diff --git a/v2/go.mod b/v2/go.mod new file mode 100644 index 00000000..9feaa05e --- /dev/null +++ b/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/libp2p/go-libp2p-kad-dht/v2 + +go 1.20 diff --git a/v2/kad.go b/v2/kad.go new file mode 100644 index 00000000..7e25f659 --- /dev/null +++ b/v2/kad.go @@ -0,0 +1,55 @@ +package dht + +import ( + "crypto/sha256" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +// nodeID is a type alias for peer.ID that implements the kad.NodeID interface. +// This means we can use nodeID for any operation that interfaces with +// go-kademlia. +type nodeID peer.ID + +// assertion that nodeID implements the kad.NodeID interface +var _ kad.NodeID[key.Key256] = nodeID("") + +// Key returns the Kademlia key of nodeID. The amino DHT operates on SHA256 +// hashes of, in this case, peer.IDs. This means this Key method takes +// the peer.ID, hashes it and constructs a 256-bit key. +func (p nodeID) Key() key.Key256 { + h := sha256.New() + h.Write([]byte(p)) + return key.NewKey256(h.Sum(nil)) +} + +// String calls String on the underlying peer.ID and returns a string like +// QmFoo or 12D3KooBar. +func (p nodeID) String() string { + return peer.ID(p).String() +} + +// nodeInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo +// interface. This means we can use nodeInfo for any operation that interfaces +// with go-kademlia. +type nodeInfo struct { + info peer.AddrInfo +} + +// assertion that nodeInfo implements the kad.NodeInfo interface +var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*nodeInfo)(nil) + +// ID returns the kad.NodeID of this peer's information struct. +func (ai nodeInfo) ID() kad.NodeID[key.Key256] { + return nodeID(ai.info.ID) +} + +// Addresses returns all Multiaddresses of this peer. +func (ai nodeInfo) Addresses() []ma.Multiaddr { + addrs := make([]ma.Multiaddr, len(ai.info.Addrs)) + copy(addrs, ai.info.Addrs) + return addrs +} diff --git a/v2/routing.go b/v2/routing.go new file mode 100644 index 00000000..cfd6e189 --- /dev/null +++ b/v2/routing.go @@ -0,0 +1,43 @@ +package dht + +import ( + "context" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" +) + +// Assert that IPFS assumptions about interfaces aren't broken. These aren't a +// guarantee, but we can use them to aid refactoring. +var ( + _ routing.Routing = (*DHT)(nil) +) + +func (d DHT) Provide(ctx context.Context, cid cid.Cid, b bool) error { + panic("implement me") +} + +func (d DHT) FindProvidersAsync(ctx context.Context, cid cid.Cid, i int) <-chan peer.AddrInfo { + panic("implement me") +} + +func (d DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { + panic("implement me") +} + +func (d DHT) PutValue(ctx context.Context, s string, bytes []byte, option ...routing.Option) error { + panic("implement me") +} + +func (d DHT) GetValue(ctx context.Context, s string, option ...routing.Option) ([]byte, error) { + panic("implement me") +} + +func (d DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { + panic("implement me") +} + +func (d DHT) Bootstrap(ctx context.Context) error { + panic("implement me") +} From 5bec109db0cc8b0e9ffc51b9b5060b62895a2b27 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Aug 2023 15:54:28 +0200 Subject: [PATCH 02/64] improve DHT struct field documentation --- v2/dht.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/v2/dht.go b/v2/dht.go index 79d50865..22d03a59 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -14,12 +14,23 @@ import ( // DHT is an implementation of Kademlia with S/Kademlia modifications. // It is used to implement the base Routing module. type DHT struct { - host host.Host // host holds a reference to the underlying libp2p host - cfg *Config // cfg holds a reference to the DHT configuration struct - mode mode // mode indicates the current mode the DHT operates in. This can differ from the desired mode if set to auto-client or auto-server. + // host holds a reference to the underlying libp2p host + host host.Host - kad *coord.Coordinator[key.Key256, ma.Multiaddr] // the go-kademlia reference - rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + // cfg holds a reference to the DHT configuration struct + cfg *Config + + // mode indicates the current mode the DHT operates in. This can differ from + // the desired mode if set to auto-client or auto-server. The desired mode + // can be configured via the Config struct. + mode mode + + // kad is a reference to the go-kademlia coordinator + kad *coord.Coordinator[key.Key256, ma.Multiaddr] + + // rt holds a reference to the routing table implementation. This can be + // configured via the Config struct. + rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] } // New constructs a new DHT for the given underlying host and with the given @@ -36,6 +47,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { host: h, cfg: cfg, } + nid := nodeID(d.host.ID()) // Use the configured routing table if it was provided From 311f0d3f8e75a9070bbf97b0287a1d3585d97dfc Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Aug 2023 17:03:09 +0200 Subject: [PATCH 03/64] add: default bootstrap peers method --- v2/bootstrap.go | 34 ++++++++++++++++++++++++++++++++++ v2/bootstrap_test.go | 23 +++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 v2/bootstrap.go create mode 100644 v2/bootstrap_test.go diff --git a/v2/bootstrap.go b/v2/bootstrap.go new file mode 100644 index 00000000..f0ac71fe --- /dev/null +++ b/v2/bootstrap.go @@ -0,0 +1,34 @@ +package dht + +import ( + "github.com/libp2p/go-libp2p/core/peer" +) + +// defaultBootstrapPeers is a set of hard-coded public DHT bootstrap peers +// operated by Protocol Labs. This slice is filled in the init() method. +var defaultBootstrapPeers []peer.AddrInfo + +func init() { + for _, s := range []string{ + "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt", + "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io + } { + addrInfo, err := peer.AddrInfoFromString(s) + if err != nil { + panic(err) + } + defaultBootstrapPeers = append(defaultBootstrapPeers, *addrInfo) + } +} + +// DefaultBootstrapPeers returns hard-coded public DHT bootstrap peers operated +// by Protocol Labs. You can configure your own set of bootstrap peers by +// overwriting the corresponding Config field. +func DefaultBootstrapPeers() []peer.AddrInfo { + peers := make([]peer.AddrInfo, len(defaultBootstrapPeers)) + copy(peers, defaultBootstrapPeers) + return peers +} diff --git a/v2/bootstrap_test.go b/v2/bootstrap_test.go new file mode 100644 index 00000000..b38d16be --- /dev/null +++ b/v2/bootstrap_test.go @@ -0,0 +1,23 @@ +package dht + +import ( + "testing" + + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestDefaultBootstrapPeers(t *testing.T) { + bps := DefaultBootstrapPeers() + if len(bps) != len(defaultBootstrapPeers) { + t.Errorf("len(DefaultBootstrapPeers()) = %d, want %v", len(bps), len(defaultBootstrapPeers)) + } + + bpmap := make(map[peer.ID]peer.AddrInfo) + for _, info := range bps { + bpmap[info.ID] = info + } + + if len(bpmap) != len(defaultBootstrapPeers) { + t.Errorf("unique DefaultBootstrapPeers() = %d, want %v", len(bpmap), len(defaultBootstrapPeers)) + } +} From dfe3a81a13da2c91f331328742af06b5f7912929 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Aug 2023 17:03:14 +0200 Subject: [PATCH 04/64] go mod tidy --- v2/go.mod | 97 ++++++++++++ v2/go.sum | 462 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 559 insertions(+) create mode 100644 v2/go.sum diff --git a/v2/go.mod b/v2/go.mod index 9feaa05e..11a7d11f 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -1,3 +1,100 @@ module github.com/libp2p/go-libp2p-kad-dht/v2 go 1.20 + +require ( + github.com/ipfs/go-cid v0.4.1 + github.com/libp2p/go-libp2p v0.29.2 + github.com/multiformats/go-multiaddr v0.11.0 + github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde +) + +require ( + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/elastic/gosigar v0.14.2 // indirect + github.com/flynn/noise v1.0.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/huin/goupnp v1.2.0 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.1 // indirect + github.com/libp2p/go-reuseport v0.3.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/miekg/dns v1.1.55 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/onsi/ginkgo/v2 v2.11.0 // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-19 v0.3.3 // indirect + github.com/quic-go/qtls-go1-20 v0.3.0 // indirect + github.com/quic-go/quic-go v0.36.4 // indirect + github.com/quic-go/webtransport-go v0.5.3 // indirect + github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + go.opentelemetry.io/otel v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.0 // indirect + go.uber.org/fx v1.20.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/exp v0.0.0-20230725012225-302865e7556b // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/tools v0.11.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + lukechampine.com/blake3 v1.2.1 // indirect +) diff --git a/v2/go.sum b/v2/go.sum new file mode 100644 index 00000000..a9c8b17c --- /dev/null +++ b/v2/go.sum @@ -0,0 +1,462 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= +github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= +github.com/libp2p/go-libp2p v0.29.2 h1:uPw/c8hOxoLP/KhFnzlc5Ejqf+OmAL1dwIsqE31WBtY= +github.com/libp2p/go-libp2p v0.29.2/go.mod h1:OU7nSq0aEZMsV2wY8nXn1+XNNt9q2UiR8LjW3Kmp2UE= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= +github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= +github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= +github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= +github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde h1:UiucUJDpMRYfuaLYfO2/euSyBjH/5okj2PaUwCq7DtE= +github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= +github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= +github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.3.0 h1:NrCXmDl8BddZwO67vlvEpBTwT89bJfKYygxv4HQvuDk= +github.com/quic-go/qtls-go1-20 v0.3.0/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.36.4 h1:CXn/ZLN5Vntlk53fjR+kUMC8Jt7flfQe+I5Ty5A+k0o= +github.com/quic-go/quic-go v0.36.4/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o= +github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= +github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= +go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= +go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b h1:tK7yjGqVRzYdXsBcfD2MLhFAhHfDgGLm2rY1ub7FA9k= +golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= From 57c4988a9bb1cc7faf0d4771276c105f55f25f7e Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Aug 2023 18:29:04 +0200 Subject: [PATCH 05/64] use slog for logging --- v2/config.go | 8 ++++++++ v2/dht.go | 5 +++++ v2/go.mod | 5 +++-- v2/go.sum | 2 ++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/v2/config.go b/v2/config.go index b710d86c..cd3f0f35 100644 --- a/v2/config.go +++ b/v2/config.go @@ -3,9 +3,12 @@ package dht import ( "fmt" + logging "github.com/ipfs/go-log/v2" "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "go.uber.org/zap/exp/zapslog" + "golang.org/x/exp/slog" ) type ( @@ -71,6 +74,10 @@ type Config struct { // implementation that this DHT should use. If this field is nil, the // triert.TrieRT routing table will be used. RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + + // Logger can be used to configure a custom structured logger instance. + // By default go.uber.org/zap is used (wrapped in ipfs/go-log). + Logger *slog.Logger } // DefaultConfig returns a configuration struct that can be used as-is to @@ -80,6 +87,7 @@ func DefaultConfig() *Config { Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), RoutingTable: nil, + Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), } } diff --git a/v2/dht.go b/v2/dht.go index 22d03a59..6070b20d 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -31,6 +31,10 @@ type DHT struct { // rt holds a reference to the routing table implementation. This can be // configured via the Config struct. rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + + // log is a convenience accessor to the logging instance. It gets the value + // of the logger field from the configuration. + log *slog.Logger } // New constructs a new DHT for the given underlying host and with the given @@ -46,6 +50,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { d := &DHT{ host: h, cfg: cfg, + log: cfg.Logger, } nid := nodeID(d.host.ID()) diff --git a/v2/go.mod b/v2/go.mod index 11a7d11f..542d1748 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -4,9 +4,12 @@ go 1.20 require ( github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-libp2p v0.29.2 github.com/multiformats/go-multiaddr v0.11.0 github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde + go.uber.org/zap/exp v0.1.0 + golang.org/x/exp v0.0.0-20230725012225-302865e7556b ) require ( @@ -32,7 +35,6 @@ require ( github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/huin/goupnp v1.2.0 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/klauspost/compress v1.16.7 // indirect @@ -88,7 +90,6 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/crypto v0.11.0 // indirect - golang.org/x/exp v0.0.0-20230725012225-302865e7556b // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect diff --git a/v2/go.sum b/v2/go.sum index a9c8b17c..65ee9d28 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -315,6 +315,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap/exp v0.1.0 h1:Ol9zQNvAEAgFHSBiR5LlwS9Xq8u5QF+7HBwNHUB8rcI= +go.uber.org/zap/exp v0.1.0/go.mod h1:z/0T3As39ttolxZGOsvk1OEvQfwwfTZpmV9YTp+VAkc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= From 984f28e0ece7026c4517dd7b971dbae564cce7c8 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 14 Aug 2023 18:29:58 +0200 Subject: [PATCH 06/64] handle reachability change events --- v2/dht.go | 41 +++++++++++++++++++++-- v2/subscriber_notifee.go | 71 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 v2/subscriber_notifee.go diff --git a/v2/dht.go b/v2/dht.go index 6070b20d..659ee065 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -23,7 +23,8 @@ type DHT struct { // mode indicates the current mode the DHT operates in. This can differ from // the desired mode if set to auto-client or auto-server. The desired mode // can be configured via the Config struct. - mode mode + mode mode + modeLk sync.RWMutex // kad is a reference to the go-kademlia coordinator kad *coord.Coordinator[key.Key256, ma.Multiaddr] @@ -75,13 +76,47 @@ func New(h host.Host, cfg *Config) (*DHT, error) { // determine mode to start in switch cfg.Mode { case ModeOptClient, ModeOptAutoClient: - d.mode = modeClient + d.setClientMode() case ModeOptServer, ModeOptAutoServer: - d.mode = modeServer + d.setServerMode() default: // should never happen because of the configuration validation above return nil, fmt.Errorf("invalid dht mode %s", cfg.Mode) } + if err := d.subscribeToNetworkEvents(); err != nil { + return nil, err + } + return d, nil } + +// setServerMode advertises (via libp2p identify updates) that we are able to respond to DHT queries and sets the appropriate stream handlers. +// Note: We may support responding to queries with protocols aside from our primary ones in order to support +// interoperability with older versions of the DHT protocol. +func (d *DHT) setServerMode() { + d.modeLk.Lock() + defer d.modeLk.Unlock() + + if d.mode == modeServer { + return + } + + d.mode = modeServer +} + +// moveToClientMode stops advertising (and rescinds advertisements via libp2p identify updates) that we are able to +// respond to DHT queries and removes the appropriate stream handlers. We also kill all inbound streams that were +// utilizing the handled protocols. +// Note: We may support responding to queries with protocols aside from our primary ones in order to support +// interoperability with older versions of the DHT protocol. +func (d *DHT) setClientMode() { + d.modeLk.Lock() + defer d.modeLk.Unlock() + + if d.mode == modeClient { + return + } + + d.mode = modeClient +} diff --git a/v2/subscriber_notifee.go b/v2/subscriber_notifee.go new file mode 100644 index 00000000..7c015f8f --- /dev/null +++ b/v2/subscriber_notifee.go @@ -0,0 +1,71 @@ +package dht + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" +) + +func (d *DHT) subscribeToNetworkEvents() error { + evts := []interface{}{ + } + + // register for event bus local routability changes in order to trigger switching between client and server modes + // only register for events if the DHT is operating in ModeAuto + if d.cfg.Mode == ModeOptAutoServer || d.cfg.Mode == ModeOptAutoClient { + evts = append(evts, new(event.EvtLocalReachabilityChanged)) + } + + sub, err := d.host.EventBus().Subscribe(evts) + if err != nil { + return fmt.Errorf("failed subscribing to eventbus: %w", err) + } + + go func() { + defer func() { + if err := sub.Close(); err != nil { + d.log.With("err", err).Warn("failed closing libp2p event subscription") + } + }() + + for evt := range sub.Out() { + switch evt := evt.(type) { + case event.EvtLocalReachabilityChanged: + d.onEvtLocalReachabilityChanged(evt) + default: + d.log.Warn("unknown libp2p event", "type", fmt.Sprintf("%T", evt)) + } + } + }() + + return nil +} + +// onEvtLocalReachabilityChanged handles reachability change events and sets +// the DHTs mode accordingly. We only subscribe to these events if the DHT +// operates in an automatic mode. This means we can directly change to +// client/server mode based on the reachability event and don't need to check +// if the configuration constrains us to a specific mode. +func (d *DHT) onEvtLocalReachabilityChanged(evt event.EvtLocalReachabilityChanged) { + d.log.With("reachability", evt.Reachability.String()). + Debug("handling reachability changed event") + + switch evt.Reachability { + case network.ReachabilityPrivate: + d.setClientMode() + case network.ReachabilityUnknown: + switch d.cfg.Mode { + case ModeOptAutoClient: + d.setClientMode() + case ModeOptAutoServer: + d.setServerMode() + default: + d.log.With("mode", d.cfg.Mode).Warn("unexpected mode configuration") + } + case network.ReachabilityPublic: + d.setServerMode() + default: + d.log.With("reachability", evt.Reachability).Warn("unknown reachability type") + } +} From 35a977f5a257d529f78b49e29a29a877733c0ae4 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 15 Aug 2023 10:15:50 +0200 Subject: [PATCH 07/64] Add network event subscription --- v2/config.go | 15 ++++++- v2/config_test.go | 8 ++++ v2/dht.go | 73 ++++++++++++++++++++++++++-------- v2/go.mod | 2 +- v2/handler.go | 6 +++ v2/notifee.go | 86 ++++++++++++++++++++++++++++++++++++++++ v2/routing.go | 14 +++---- v2/subscriber_notifee.go | 71 --------------------------------- 8 files changed, 178 insertions(+), 97 deletions(-) create mode 100644 v2/handler.go create mode 100644 v2/notifee.go delete mode 100644 v2/subscriber_notifee.go diff --git a/v2/config.go b/v2/config.go index cd3f0f35..f406f4f3 100644 --- a/v2/config.go +++ b/v2/config.go @@ -4,6 +4,7 @@ import ( "fmt" logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" @@ -70,9 +71,14 @@ type Config struct { // Kademlia holds the configuration of the underlying Kademlia implementation. Kademlia *coord.Config + // ProtocolID represents the DHT protocol we can query with and respond to. + ProtocolID protocol.ID + // RoutingTable holds a reference to the specific routing table // implementation that this DHT should use. If this field is nil, the - // triert.TrieRT routing table will be used. + // triert.TrieRT routing table will be used. This field will be nil + // in the default configuration because a routing table requires information + // about the local node. RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] // Logger can be used to configure a custom structured logger instance. @@ -86,7 +92,8 @@ func DefaultConfig() *Config { return &Config{ Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), - RoutingTable: nil, + ProtocolID: "/ipfs/kad/1.0.0", + RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), } } @@ -109,5 +116,9 @@ func (c *Config) Validate() error { return fmt.Errorf("invalid kademlia configuration: %w", err) } + if c.ProtocolID == "" { + return fmt.Errorf("protocolID must not be empty") + } + return nil } diff --git a/v2/config_test.go b/v2/config_test.go index e9268526..4126c4b1 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -39,6 +39,14 @@ func TestConfig_Validate(t *testing.T) { return c }, }, + { + name: "empty protocol", + wantErr: true, + mutate: func(c *Config) *Config { + c.ProtocolID = "" + return c + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/v2/dht.go b/v2/dht.go index 659ee065..1bb56bf7 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -2,13 +2,17 @@ package dht import ( "fmt" + "sync" + "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing/triert" + "golang.org/x/exp/slog" ) // DHT is an implementation of Kademlia with S/Kademlia modifications. @@ -36,6 +40,11 @@ type DHT struct { // log is a convenience accessor to the logging instance. It gets the value // of the logger field from the configuration. log *slog.Logger + + // sub holds a subscription to the libp2p event bus. The DHT subscribes to + // these events in networkEventsSubscription and consumes them + // asynchronously in consumeNetworkEvents. + sub event.Subscription } // New constructs a new DHT for the given underlying host and with the given @@ -84,39 +93,71 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("invalid dht mode %s", cfg.Mode) } - if err := d.subscribeToNetworkEvents(); err != nil { - return nil, err + // create subscription to various network events + d.sub, err = d.networkEventsSubscription() + if err != nil { + return nil, fmt.Errorf("failed subscribing to event bus: %w", err) } + // consume these events asynchronously + go d.consumeNetworkEvents(d.sub) + return d, nil } -// setServerMode advertises (via libp2p identify updates) that we are able to respond to DHT queries and sets the appropriate stream handlers. -// Note: We may support responding to queries with protocols aside from our primary ones in order to support -// interoperability with older versions of the DHT protocol. +// Close cleans up all resources associated with this DHT. +func (d *DHT) Close() error { + if err := d.sub.Close(); err != nil { + d.log.With("err", err).Debug("failed closing event bus subscription") + } + + return nil +} + +// setServerMode advertises (via libp2p identify updates) that we are able to +// respond to DHT queries for the configured protocol and sets the appropriate +// stream handler. This method is safe to call even if the DHT is already in +// server mode. func (d *DHT) setServerMode() { d.modeLk.Lock() defer d.modeLk.Unlock() - if d.mode == modeServer { - return - } + d.log.Info("Activating DHT server mode") d.mode = modeServer + d.host.SetStreamHandler(d.cfg.ProtocolID, d.handleNewStream) } -// moveToClientMode stops advertising (and rescinds advertisements via libp2p identify updates) that we are able to -// respond to DHT queries and removes the appropriate stream handlers. We also kill all inbound streams that were -// utilizing the handled protocols. -// Note: We may support responding to queries with protocols aside from our primary ones in order to support -// interoperability with older versions of the DHT protocol. +// setClientMode stops advertising (and rescinds advertisements via libp2p +// identify updates) that we are able to respond to DHT queries for the +// configured protocol and removes the registered stream handlers. We also kill +// all inbound streams that were utilizing the handled protocols. If we are +// already in client mode, this method is a no-op. This method is safe to call +// even if the DHT is already in client mode. func (d *DHT) setClientMode() { d.modeLk.Lock() defer d.modeLk.Unlock() - if d.mode == modeClient { - return - } + d.log.Info("Activating DHT client mode") d.mode = modeClient + d.host.RemoveStreamHandler(d.cfg.ProtocolID) + + // kill all active inbound streams using the DHT protocol. + for _, c := range d.host.Network().Conns() { + for _, s := range c.GetStreams() { + + if s.Protocol() != d.cfg.ProtocolID { + continue + } + + if s.Stat().Direction != network.DirInbound { + continue + } + + if err := s.Reset(); err != nil { + d.log.With("err", err).Debug("failed closing stream") + } + } + } } diff --git a/v2/go.mod b/v2/go.mod index 542d1748..e71ef11b 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.29.2 + github.com/libp2p/go-libp2p v0.27.7 github.com/multiformats/go-multiaddr v0.11.0 github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde go.uber.org/zap/exp v0.1.0 diff --git a/v2/handler.go b/v2/handler.go new file mode 100644 index 00000000..9dca0b2d --- /dev/null +++ b/v2/handler.go @@ -0,0 +1,6 @@ +package dht + +import "github.com/libp2p/go-libp2p/core/network" + +func (d *DHT) handleNewStream(s network.Stream) { +} diff --git a/v2/notifee.go b/v2/notifee.go new file mode 100644 index 00000000..8999b7c0 --- /dev/null +++ b/v2/notifee.go @@ -0,0 +1,86 @@ +package dht + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" +) + +// networkEventsSubscription registers a subscription on the libp2p event bus +// for several events. The DHT uses these events for various tasks like routing +// table or DHT mode updates. +func (d *DHT) networkEventsSubscription() (event.Subscription, error) { + evts := []interface{}{ + // register for event bus notifications of when peers successfully + // complete identification in order to update the routing table. + new(event.EvtPeerIdentificationCompleted), + + // register for event bus protocol ID changes in order to update the + // routing table. If a peer stops supporting the DHT protocol, we want + // to remove it from the routing table. + new(event.EvtPeerProtocolsUpdated), + + // register for event bus notifications for when our local + // address/addresses change, so we can advertise those to the network + new(event.EvtLocalAddressesUpdated), + + // we want to know when we are disconnecting from other peers. + new(event.EvtPeerConnectednessChanged), + } + + // register for event bus local reachability changes in order to trigger + // switching between client and server modes. We only register for these + // events if the DHT is operating in ModeOptAuto{Server,Client}. + if d.cfg.Mode == ModeOptAutoServer || d.cfg.Mode == ModeOptAutoClient { + evts = append(evts, new(event.EvtLocalReachabilityChanged)) + } + + return d.host.EventBus().Subscribe(evts) +} + +// consumeNetworkEvents takes an event bus subscription and consumes all events +// emitted on that subscription. It calls out to various event handlers. +func (d *DHT) consumeNetworkEvents(sub event.Subscription) { + for evt := range sub.Out() { + switch evt := evt.(type) { + case event.EvtLocalReachabilityChanged: + d.onEvtLocalReachabilityChanged(evt) + case event.EvtLocalAddressesUpdated: + case event.EvtPeerProtocolsUpdated: + case event.EvtPeerIdentificationCompleted: + case event.EvtPeerConnectednessChanged: + default: + d.log.Warn("unknown libp2p event", "type", fmt.Sprintf("%T", evt)) + } + } +} + +// onEvtLocalReachabilityChanged handles reachability change events and sets +// the DHTs mode accordingly. We only subscribe to these events if the DHT +// operates in an automatic mode. This means we can directly change to +// client/server mode based on the reachability event and don't need to check +// if the configuration constrains us to a specific mode. +func (d *DHT) onEvtLocalReachabilityChanged(evt event.EvtLocalReachabilityChanged) { + d.log.With("reachability", evt.Reachability.String()).Debug("handling reachability changed event") + + // set DHT mode based on new reachability + switch evt.Reachability { + case network.ReachabilityPrivate: + d.setClientMode() + case network.ReachabilityPublic: + d.setServerMode() + case network.ReachabilityUnknown: + if d.cfg.Mode == ModeOptAutoClient { + d.setClientMode() + } else if d.cfg.Mode == ModeOptAutoServer { + d.setServerMode() + } else { + // we should only be subscribed to EvtLocalReachabilityChanged events + // if the DHT is configured to operate in any auto mode. + d.log.With("mode", d.cfg.Mode).Warn("unexpected mode configuration") + } + default: + d.log.With("reachability", evt.Reachability).Warn("unknown reachability type") + } +} diff --git a/v2/routing.go b/v2/routing.go index cfd6e189..d277fa9c 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -14,30 +14,30 @@ var ( _ routing.Routing = (*DHT)(nil) ) -func (d DHT) Provide(ctx context.Context, cid cid.Cid, b bool) error { +func (d *DHT) Provide(ctx context.Context, cid cid.Cid, b bool) error { panic("implement me") } -func (d DHT) FindProvidersAsync(ctx context.Context, cid cid.Cid, i int) <-chan peer.AddrInfo { +func (d *DHT) FindProvidersAsync(ctx context.Context, cid cid.Cid, i int) <-chan peer.AddrInfo { panic("implement me") } -func (d DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { +func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { panic("implement me") } -func (d DHT) PutValue(ctx context.Context, s string, bytes []byte, option ...routing.Option) error { +func (d *DHT) PutValue(ctx context.Context, s string, bytes []byte, option ...routing.Option) error { panic("implement me") } -func (d DHT) GetValue(ctx context.Context, s string, option ...routing.Option) ([]byte, error) { +func (d *DHT) GetValue(ctx context.Context, s string, option ...routing.Option) ([]byte, error) { panic("implement me") } -func (d DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { +func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { panic("implement me") } -func (d DHT) Bootstrap(ctx context.Context) error { +func (d *DHT) Bootstrap(ctx context.Context) error { panic("implement me") } diff --git a/v2/subscriber_notifee.go b/v2/subscriber_notifee.go deleted file mode 100644 index 7c015f8f..00000000 --- a/v2/subscriber_notifee.go +++ /dev/null @@ -1,71 +0,0 @@ -package dht - -import ( - "fmt" - - "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/network" -) - -func (d *DHT) subscribeToNetworkEvents() error { - evts := []interface{}{ - } - - // register for event bus local routability changes in order to trigger switching between client and server modes - // only register for events if the DHT is operating in ModeAuto - if d.cfg.Mode == ModeOptAutoServer || d.cfg.Mode == ModeOptAutoClient { - evts = append(evts, new(event.EvtLocalReachabilityChanged)) - } - - sub, err := d.host.EventBus().Subscribe(evts) - if err != nil { - return fmt.Errorf("failed subscribing to eventbus: %w", err) - } - - go func() { - defer func() { - if err := sub.Close(); err != nil { - d.log.With("err", err).Warn("failed closing libp2p event subscription") - } - }() - - for evt := range sub.Out() { - switch evt := evt.(type) { - case event.EvtLocalReachabilityChanged: - d.onEvtLocalReachabilityChanged(evt) - default: - d.log.Warn("unknown libp2p event", "type", fmt.Sprintf("%T", evt)) - } - } - }() - - return nil -} - -// onEvtLocalReachabilityChanged handles reachability change events and sets -// the DHTs mode accordingly. We only subscribe to these events if the DHT -// operates in an automatic mode. This means we can directly change to -// client/server mode based on the reachability event and don't need to check -// if the configuration constrains us to a specific mode. -func (d *DHT) onEvtLocalReachabilityChanged(evt event.EvtLocalReachabilityChanged) { - d.log.With("reachability", evt.Reachability.String()). - Debug("handling reachability changed event") - - switch evt.Reachability { - case network.ReachabilityPrivate: - d.setClientMode() - case network.ReachabilityUnknown: - switch d.cfg.Mode { - case ModeOptAutoClient: - d.setClientMode() - case ModeOptAutoServer: - d.setServerMode() - default: - d.log.With("mode", d.cfg.Mode).Warn("unexpected mode configuration") - } - case network.ReachabilityPublic: - d.setServerMode() - default: - d.log.With("reachability", evt.Reachability).Warn("unknown reachability type") - } -} From e8e5be22cb82f4be653244f00c69d2a72e4f04f5 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 15 Aug 2023 17:53:20 +0200 Subject: [PATCH 08/64] Add stream handler implementation --- v2/config.go | 14 +- v2/dht.go | 25 +- v2/handler.go | 6 - v2/handlers.go | 91 ++++ v2/metrics/metrics.go | 110 +++++ v2/net.go | 44 ++ v2/pb/Makefile | 11 + v2/pb/bytestring.go | 42 ++ v2/pb/dht.aux.go | 28 ++ v2/pb/dht.pb.go | 976 ++++++++++++++++++++++++++++++++++++++++++ v2/pb/dht.proto | 72 ++++ v2/pb/message.go | 120 ++++++ v2/pb/message_test.go | 15 + v2/stream.go | 192 +++++++++ 14 files changed, 1733 insertions(+), 13 deletions(-) delete mode 100644 v2/handler.go create mode 100644 v2/handlers.go create mode 100644 v2/metrics/metrics.go create mode 100644 v2/net.go create mode 100644 v2/pb/Makefile create mode 100644 v2/pb/bytestring.go create mode 100644 v2/pb/dht.aux.go create mode 100644 v2/pb/dht.pb.go create mode 100644 v2/pb/dht.proto create mode 100644 v2/pb/message.go create mode 100644 v2/pb/message_test.go create mode 100644 v2/stream.go diff --git a/v2/config.go b/v2/config.go index f406f4f3..88ae0254 100644 --- a/v2/config.go +++ b/v2/config.go @@ -2,6 +2,7 @@ package dht import ( "fmt" + "time" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p/core/protocol" @@ -84,17 +85,20 @@ type Config struct { // Logger can be used to configure a custom structured logger instance. // By default go.uber.org/zap is used (wrapped in ipfs/go-log). Logger *slog.Logger + + TimeoutStreamIdle time.Duration } // DefaultConfig returns a configuration struct that can be used as-is to // instantiate a fully functional DHT client. func DefaultConfig() *Config { return &Config{ - Mode: ModeOptAutoClient, - Kademlia: coord.DefaultConfig(), - ProtocolID: "/ipfs/kad/1.0.0", - RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. - Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), + Mode: ModeOptAutoClient, + Kademlia: coord.DefaultConfig(), + ProtocolID: "/ipfs/kad/1.0.0", + RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. + Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), + TimeoutStreamIdle: time.Minute, } } diff --git a/v2/dht.go b/v2/dht.go index 1bb56bf7..6aa8d33a 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -4,6 +4,8 @@ import ( "fmt" "sync" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -21,6 +23,8 @@ type DHT struct { // host holds a reference to the underlying libp2p host host host.Host + pstore peerstore.Peerstore + // cfg holds a reference to the DHT configuration struct cfg *Config @@ -111,6 +115,20 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } + // kill all active streams using the DHT protocol. + for _, c := range d.host.Network().Conns() { + for _, s := range c.GetStreams() { + + if s.Protocol() != d.cfg.ProtocolID { + continue + } + + if err := s.Reset(); err != nil { + d.log.With("err", err).Debug("failed closing stream") + } + } + } + return nil } @@ -125,7 +143,7 @@ func (d *DHT) setServerMode() { d.log.Info("Activating DHT server mode") d.mode = modeServer - d.host.SetStreamHandler(d.cfg.ProtocolID, d.handleNewStream) + d.host.SetStreamHandler(d.cfg.ProtocolID, d.streamHandler) } // setClientMode stops advertising (and rescinds advertisements via libp2p @@ -151,7 +169,10 @@ func (d *DHT) setClientMode() { continue } - if s.Stat().Direction != network.DirInbound { + switch s.Stat().Direction { + case network.DirUnknown: + case network.DirInbound: + case network.DirOutbound: continue } diff --git a/v2/handler.go b/v2/handler.go deleted file mode 100644 index 9dca0b2d..00000000 --- a/v2/handler.go +++ /dev/null @@ -1,6 +0,0 @@ -package dht - -import "github.com/libp2p/go-libp2p/core/network" - -func (d *DHT) handleNewStream(s network.Stream) { -} diff --git a/v2/handlers.go b/v2/handlers.go new file mode 100644 index 00000000..8c0fa6c3 --- /dev/null +++ b/v2/handlers.go @@ -0,0 +1,91 @@ +package dht + +import ( + "context" + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + + pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) + +func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + target, err := peer.IDFromBytes(req.GetKey()) + if err != nil { + return nil, fmt.Errorf("peer ID from bytes: %w", err) + } + + resp := &pb.Message{Type: pb.Message_FIND_NODE} + if target == d.host.ID() { + resp.CloserPeers = []pb.Message_Peer{pb.FromAddrInfo(d.pstore.PeerInfo(d.host.ID()))} + } else { + resp.CloserPeers = d.closerNodes(ctx, remote, nodeID(target)) + } + + return resp, nil +} + +func (d *DHT) closerNodes(ctx context.Context, remote peer.ID, target kad.NodeID[key.Key256]) []pb.Message_Peer { + peers := d.rt.NearestNodes(target.Key(), 20) // TODO: bucket size + if len(peers) == 0 { + return nil + } + + // pre-allocated the result set slice. + filtered := make([]pb.Message_Peer, 0, len(peers)) + + // if this method should return closer nodes to a peerID (the target is + // parameter is a nodeID), then we want to add this target to the result set + // iff 1) it's not already part of the NearestNodes peers 2) we actually + // know the addresses for the target peer. Therefore, targetFound tracks + // if the target is in the set of NearestNodes from the routing table. + // If that's the case we add it to the final filtered peers slice. This + // means we potentially return bucketSize + 1 peers. + // Context: https://github.com/libp2p/go-libp2p-kad-dht/pull/511 + targetFound := false + for _, p := range peers { + pid := peer.ID(p.(nodeID)) // TODO: type cast + + // check for own peer ID + if pid == d.host.ID() { + d.log.Warn("routing table NearestNodes returned our own ID") + continue + } + + // Don't send a peer back themselves + if pid == remote { + continue + } + + // extract peer information from peer store + addrInfo := d.pstore.PeerInfo(pid) + if len(addrInfo.Addrs) == 0 { + continue + } + + // Check if this peer is our target peer + if tid, ok := target.(nodeID); ok && peer.ID(tid) == pid { + targetFound = true + } + + filtered = append(filtered, pb.FromAddrInfo(addrInfo)) + } + + // check if the target peer was among the nearest nodes + if tid, ok := target.(nodeID); ok && !targetFound && peer.ID(tid) != remote { + // it wasn't, check if we know how to reach it and if we do, add it to + // the filtered list. + addrInfo := d.pstore.PeerInfo(peer.ID(tid)) + if len(addrInfo.Addrs) > 0 { + filtered = append(filtered, pb.FromAddrInfo(addrInfo)) + } + } + + return filtered +} + +func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + panic("not implemented") +} diff --git a/v2/metrics/metrics.go b/v2/metrics/metrics.go new file mode 100644 index 00000000..510b9fa2 --- /dev/null +++ b/v2/metrics/metrics.go @@ -0,0 +1,110 @@ +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + defaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Keys +var ( + KeyMessageType, _ = tag.NewKey("message_type") + KeyPeerID, _ = tag.NewKey("peer_id") + // KeyInstanceID identifies a dht instance by the pointer address. + // Useful for differentiating between different dhts that have the same peer id. + KeyInstanceID, _ = tag.NewKey("instance_id") +) + +// Measures +var ( + ReceivedMessages = stats.Int64("libp2p.io/dht/kad/received_messages", "Total number of messages received per RPC", stats.UnitDimensionless) + ReceivedMessageErrors = stats.Int64("libp2p.io/dht/kad/received_message_errors", "Total number of errors for messages received per RPC", stats.UnitDimensionless) + ReceivedBytes = stats.Int64("libp2p.io/dht/kad/received_bytes", "Total received bytes per RPC", stats.UnitBytes) + InboundRequestLatency = stats.Float64("libp2p.io/dht/kad/inbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) + OutboundRequestLatency = stats.Float64("libp2p.io/dht/kad/outbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) + SentMessages = stats.Int64("libp2p.io/dht/kad/sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless) + SentMessageErrors = stats.Int64("libp2p.io/dht/kad/sent_message_errors", "Total number of errors for messages sent per RPC", stats.UnitDimensionless) + SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless) + SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless) + SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes) + NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless) +) + +// Views +var ( + ReceivedMessagesView = &view.View{ + Measure: ReceivedMessages, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + ReceivedMessageErrorsView = &view.View{ + Measure: ReceivedMessageErrors, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + ReceivedBytesView = &view.View{ + Measure: ReceivedBytes, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultBytesDistribution, + } + InboundRequestLatencyView = &view.View{ + Measure: InboundRequestLatency, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultMillisecondsDistribution, + } + OutboundRequestLatencyView = &view.View{ + Measure: OutboundRequestLatency, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultMillisecondsDistribution, + } + SentMessagesView = &view.View{ + Measure: SentMessages, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentMessageErrorsView = &view.View{ + Measure: SentMessageErrors, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentRequestsView = &view.View{ + Measure: SentRequests, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentRequestErrorsView = &view.View{ + Measure: SentRequestErrors, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } + SentBytesView = &view.View{ + Measure: SentBytes, + TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, + Aggregation: defaultBytesDistribution, + } + NetworkSizeView = &view.View{ + Measure: NetworkSize, + TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } +) + +// DefaultViews with all views in it. +var DefaultViews = []*view.View{ + ReceivedMessagesView, + ReceivedMessageErrorsView, + ReceivedBytesView, + InboundRequestLatencyView, + OutboundRequestLatencyView, + SentMessagesView, + SentMessageErrorsView, + SentRequestsView, + SentRequestErrorsView, + SentBytesView, + NetworkSizeView, +} diff --git a/v2/net.go b/v2/net.go new file mode 100644 index 00000000..0b0c4c72 --- /dev/null +++ b/v2/net.go @@ -0,0 +1,44 @@ +package dht + +import ( + "bufio" + "io" + "sync" + + pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-msgio/protoio" +) + +// The Protobuf writer performs multiple small writes when writing a message. +// We need to buffer those writes, to make sure that we're not sending a new +// packet for every single write. +type bufferedDelimitedWriter struct { + *bufio.Writer + protoio.WriteCloser +} + +var writerPool = sync.Pool{ + New: func() interface{} { + w := bufio.NewWriter(nil) + return &bufferedDelimitedWriter{ + Writer: w, + WriteCloser: protoio.NewDelimitedWriter(w), + } + }, +} + +func writeMsg(w io.Writer, mes *pb.Message) error { + bw := writerPool.Get().(*bufferedDelimitedWriter) + bw.Reset(w) + err := bw.WriteMsg(mes) + if err == nil { + err = bw.Flush() + } + bw.Reset(nil) + writerPool.Put(bw) + return err +} + +func (w *bufferedDelimitedWriter) Flush() error { + return w.Writer.Flush() +} diff --git a/v2/pb/Makefile b/v2/pb/Makefile new file mode 100644 index 00000000..eb14b576 --- /dev/null +++ b/v2/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/v2/pb/bytestring.go b/v2/pb/bytestring.go new file mode 100644 index 00000000..f20f1979 --- /dev/null +++ b/v2/pb/bytestring.go @@ -0,0 +1,42 @@ +package dht_pb + +import ( + "encoding/json" +) + +type byteString string + +func (b byteString) Marshal() ([]byte, error) { + return []byte(b), nil +} + +func (b *byteString) MarshalTo(data []byte) (int, error) { + return copy(data, *b), nil +} + +func (b *byteString) Unmarshal(data []byte) error { + *b = byteString(data) + return nil +} + +func (b *byteString) Size() int { + return len(*b) +} + +func (b byteString) MarshalJSON() ([]byte, error) { + return json.Marshal([]byte(b)) +} + +func (b *byteString) UnmarshalJSON(data []byte) error { + var buf []byte + err := json.Unmarshal(data, &buf) + if err != nil { + return err + } + *b = byteString(buf) + return nil +} + +func (b byteString) Equal(other byteString) bool { + return b == other +} diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go new file mode 100644 index 00000000..055d628c --- /dev/null +++ b/v2/pb/dht.aux.go @@ -0,0 +1,28 @@ +package dht_pb + +import ( + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" +) + +// Addresses returns the Multiaddresses associated with the Message_Peer entry +func (m *Message_Peer) Addresses() []ma.Multiaddr { + if m == nil { + return nil + } + + maddrs := make([]ma.Multiaddr, 0, len(m.Addrs)) + for _, addr := range m.Addrs { + maddr, err := ma.NewMultiaddrBytes(addr) + if err != nil { + slog.Debug("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "error", err) + continue + } + + maddrs = append(maddrs, maddr) + } + + return maddrs +} diff --git a/v2/pb/dht.pb.go b/v2/pb/dht.pb.go new file mode 100644 index 00000000..dd317f5e --- /dev/null +++ b/v2/pb/dht.pb.go @@ -0,0 +1,976 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: dht.proto + +package dht_pb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + pb "github.com/libp2p/go-libp2p-record/pb" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = proto.Marshal + _ = fmt.Errorf + _ = math.Inf +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Message_MessageType int32 + +const ( + Message_PUT_VALUE Message_MessageType = 0 + Message_GET_VALUE Message_MessageType = 1 + Message_ADD_PROVIDER Message_MessageType = 2 + Message_GET_PROVIDERS Message_MessageType = 3 + Message_FIND_NODE Message_MessageType = 4 + Message_PING Message_MessageType = 5 +) + +var Message_MessageType_name = map[int32]string{ + 0: "PUT_VALUE", + 1: "GET_VALUE", + 2: "ADD_PROVIDER", + 3: "GET_PROVIDERS", + 4: "FIND_NODE", + 5: "PING", +} + +var Message_MessageType_value = map[string]int32{ + "PUT_VALUE": 0, + "GET_VALUE": 1, + "ADD_PROVIDER": 2, + "GET_PROVIDERS": 3, + "FIND_NODE": 4, + "PING": 5, +} + +func (x Message_MessageType) String() string { + return proto.EnumName(Message_MessageType_name, int32(x)) +} + +func (Message_MessageType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0, 0} +} + +type Message_ConnectionType int32 + +const ( + // sender does not have a connection to peer, and no extra information (default) + Message_NOT_CONNECTED Message_ConnectionType = 0 + // sender has a live connection to peer + Message_CONNECTED Message_ConnectionType = 1 + // sender recently connected to peer + Message_CAN_CONNECT Message_ConnectionType = 2 + // sender recently tried to connect to peer repeatedly but failed to connect + // ("try" here is loose, but this should signal "made strong effort, failed") + Message_CANNOT_CONNECT Message_ConnectionType = 3 +) + +var Message_ConnectionType_name = map[int32]string{ + 0: "NOT_CONNECTED", + 1: "CONNECTED", + 2: "CAN_CONNECT", + 3: "CANNOT_CONNECT", +} + +var Message_ConnectionType_value = map[string]int32{ + "NOT_CONNECTED": 0, + "CONNECTED": 1, + "CAN_CONNECT": 2, + "CANNOT_CONNECT": 3, +} + +func (x Message_ConnectionType) String() string { + return proto.EnumName(Message_ConnectionType_name, int32(x)) +} + +func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0, 1} +} + +type Message struct { + // defines what type of message it is. + Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"` + // defines what coral cluster level this query/response belongs to. + // in case we want to implement coral's cluster rings in the future. + ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"` + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Used to return a value + // PUT_VALUE, GET_VALUE + Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"` + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"` + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0} +} + +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} + +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} + +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} + +func (m *Message) XXX_Size() int { + return m.Size() +} + +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetType() Message_MessageType { + if m != nil { + return m.Type + } + return Message_PUT_VALUE +} + +func (m *Message) GetClusterLevelRaw() int32 { + if m != nil { + return m.ClusterLevelRaw + } + return 0 +} + +func (m *Message) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *Message) GetRecord() *pb.Record { + if m != nil { + return m.Record + } + return nil +} + +func (m *Message) GetCloserPeers() []Message_Peer { + if m != nil { + return m.CloserPeers + } + return nil +} + +func (m *Message) GetProviderPeers() []Message_Peer { + if m != nil { + return m.ProviderPeers + } + return nil +} + +type Message_Peer struct { + // ID of a given peer. + Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"` + // multiaddrs for a given peer + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` + // used to signal the sender's connection capabilities to the peer + Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message_Peer) Reset() { *m = Message_Peer{} } +func (m *Message_Peer) String() string { return proto.CompactTextString(m) } +func (*Message_Peer) ProtoMessage() {} +func (*Message_Peer) Descriptor() ([]byte, []int) { + return fileDescriptor_616a434b24c97ff4, []int{0, 0} +} + +func (m *Message_Peer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} + +func (m *Message_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Peer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} + +func (m *Message_Peer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Peer.Merge(m, src) +} + +func (m *Message_Peer) XXX_Size() int { + return m.Size() +} + +func (m *Message_Peer) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Peer.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Peer proto.InternalMessageInfo + +func (m *Message_Peer) GetAddrs() [][]byte { + if m != nil { + return m.Addrs + } + return nil +} + +func (m *Message_Peer) GetConnection() Message_ConnectionType { + if m != nil { + return m.Connection + } + return Message_NOT_CONNECTED +} + +func init() { + proto.RegisterEnum("dht.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value) + proto.RegisterEnum("dht.pb.Message_ConnectionType", Message_ConnectionType_name, Message_ConnectionType_value) + proto.RegisterType((*Message)(nil), "dht.pb.Message") + proto.RegisterType((*Message_Peer)(nil), "dht.pb.Message.Peer") +} + +func init() { proto.RegisterFile("dht.proto", fileDescriptor_616a434b24c97ff4) } + +var fileDescriptor_616a434b24c97ff4 = []byte{ + // 469 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x6f, 0x9b, 0x40, + 0x18, 0xc5, 0x73, 0x80, 0xdd, 0xf8, 0x03, 0x3b, 0xe4, 0x94, 0x01, 0xb9, 0x92, 0x83, 0x3c, 0xd1, + 0xc1, 0x20, 0xd1, 0xb5, 0xaa, 0x6a, 0x03, 0x8d, 0x2c, 0xa5, 0xd8, 0xba, 0x38, 0xe9, 0x68, 0x19, + 0xb8, 0x12, 0x54, 0xd7, 0x87, 0x00, 0xa7, 0xf2, 0xd6, 0x3f, 0x2f, 0x63, 0xe7, 0x0e, 0x51, 0xe5, + 0xa9, 0x7f, 0x46, 0xc5, 0x11, 0x5a, 0xec, 0x25, 0x13, 0xef, 0x7d, 0xf7, 0x7e, 0xe2, 0xdd, 0xa7, + 0x83, 0x4e, 0x74, 0x5f, 0x98, 0x69, 0xc6, 0x0a, 0x86, 0xdb, 0x5c, 0x06, 0x7d, 0x3b, 0x4e, 0x8a, + 0xfb, 0x6d, 0x60, 0x86, 0xec, 0x9b, 0xb5, 0x4e, 0x82, 0xd4, 0x4e, 0xad, 0x98, 0x8d, 0x2a, 0x35, + 0xca, 0x68, 0xc8, 0xb2, 0xc8, 0x4a, 0x03, 0xab, 0x52, 0x15, 0xdb, 0x1f, 0x35, 0x98, 0x98, 0xc5, + 0xcc, 0xe2, 0xe3, 0x60, 0xfb, 0x85, 0x3b, 0x6e, 0xb8, 0xaa, 0xe2, 0xc3, 0x3f, 0x12, 0xbc, 0xfa, + 0x44, 0xf3, 0x7c, 0x15, 0x53, 0x6c, 0x81, 0x54, 0xec, 0x52, 0xaa, 0x21, 0x1d, 0x19, 0x3d, 0xfb, + 0xb5, 0x59, 0xb5, 0x30, 0x9f, 0x8f, 0xeb, 0xef, 0x62, 0x97, 0x52, 0xc2, 0x83, 0xd8, 0x80, 0xb3, + 0x70, 0xbd, 0xcd, 0x0b, 0x9a, 0x5d, 0xd3, 0x07, 0xba, 0x26, 0xab, 0xef, 0x1a, 0xe8, 0xc8, 0x68, + 0x91, 0xe3, 0x31, 0x56, 0x41, 0xfc, 0x4a, 0x77, 0x9a, 0xa0, 0x23, 0x43, 0x21, 0xa5, 0xc4, 0x6f, + 0xa0, 0x5d, 0xf5, 0xd6, 0x44, 0x1d, 0x19, 0xb2, 0x7d, 0x6e, 0xd6, 0xd7, 0x08, 0x4c, 0xc2, 0x15, + 0x79, 0x0e, 0xe0, 0x77, 0x20, 0x87, 0x6b, 0x96, 0xd3, 0x6c, 0x4e, 0x69, 0x96, 0x6b, 0xa7, 0xba, + 0x68, 0xc8, 0xf6, 0xc5, 0x71, 0xbd, 0xf2, 0x70, 0x22, 0x3d, 0x3e, 0x5d, 0x9e, 0x90, 0x66, 0x1c, + 0x7f, 0x80, 0x6e, 0x9a, 0xb1, 0x87, 0x24, 0xaa, 0xf9, 0xce, 0x8b, 0xfc, 0x21, 0xd0, 0xff, 0x81, + 0x40, 0x2a, 0x15, 0x1e, 0x82, 0x90, 0x44, 0x7c, 0x3d, 0xca, 0x04, 0x97, 0xc9, 0x5f, 0x4f, 0x97, + 0x10, 0xec, 0x0a, 0x7a, 0x53, 0x64, 0xc9, 0x26, 0x26, 0x42, 0x12, 0xe1, 0x0b, 0x68, 0xad, 0xa2, + 0x28, 0xcb, 0x35, 0x41, 0x17, 0x0d, 0x85, 0x54, 0x06, 0xbf, 0x07, 0x08, 0xd9, 0x66, 0x43, 0xc3, + 0x22, 0x61, 0x1b, 0x7e, 0xe3, 0x9e, 0x3d, 0x38, 0x6e, 0xe0, 0xfc, 0x4b, 0xf0, 0x1d, 0x37, 0x88, + 0x61, 0x02, 0x72, 0x63, 0xfd, 0xb8, 0x0b, 0x9d, 0xf9, 0xed, 0x62, 0x79, 0x37, 0xbe, 0xbe, 0xf5, + 0xd4, 0x93, 0xd2, 0x5e, 0x79, 0xb5, 0x45, 0x58, 0x05, 0x65, 0xec, 0xba, 0xcb, 0x39, 0x99, 0xdd, + 0x4d, 0x5d, 0x8f, 0xa8, 0x02, 0x3e, 0x87, 0x6e, 0x19, 0xa8, 0x27, 0x37, 0xaa, 0x58, 0x32, 0x1f, + 0xa7, 0xbe, 0xbb, 0xf4, 0x67, 0xae, 0xa7, 0x4a, 0xf8, 0x14, 0xa4, 0xf9, 0xd4, 0xbf, 0x52, 0x5b, + 0xc3, 0xcf, 0xd0, 0x3b, 0x2c, 0x52, 0xd2, 0xfe, 0x6c, 0xb1, 0x74, 0x66, 0xbe, 0xef, 0x39, 0x0b, + 0xcf, 0xad, 0xfe, 0xf8, 0xdf, 0x22, 0x7c, 0x06, 0xb2, 0x33, 0xf6, 0xeb, 0x84, 0x2a, 0x60, 0x0c, + 0x3d, 0x67, 0xec, 0x37, 0x28, 0x55, 0x9c, 0x28, 0x8f, 0xfb, 0x01, 0xfa, 0xb9, 0x1f, 0xa0, 0xdf, + 0xfb, 0x01, 0x0a, 0xda, 0xfc, 0xfd, 0xbd, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x1a, 0xa1, + 0xbe, 0xf7, 0x02, 0x00, 0x00, +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ClusterLevelRaw != 0 { + i = encodeVarintDht(dAtA, i, uint64(m.ClusterLevelRaw)) + i-- + dAtA[i] = 0x50 + } + if len(m.ProviderPeers) > 0 { + for iNdEx := len(m.ProviderPeers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProviderPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.CloserPeers) > 0 { + for iNdEx := len(m.CloserPeers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CloserPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Record != nil { + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintDht(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintDht(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message_Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Peer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Connection != 0 { + i = encodeVarintDht(dAtA, i, uint64(m.Connection)) + i-- + dAtA[i] = 0x18 + } + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addrs[iNdEx]) + copy(dAtA[i:], m.Addrs[iNdEx]) + i = encodeVarintDht(dAtA, i, uint64(len(m.Addrs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size := m.Id.Size() + i -= size + if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDht(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintDht(dAtA []byte, offset int, v uint64) int { + offset -= sovDht(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovDht(uint64(m.Type)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovDht(uint64(l)) + } + if m.Record != nil { + l = m.Record.Size() + n += 1 + l + sovDht(uint64(l)) + } + if len(m.CloserPeers) > 0 { + for _, e := range m.CloserPeers { + l = e.Size() + n += 1 + l + sovDht(uint64(l)) + } + } + if len(m.ProviderPeers) > 0 { + for _, e := range m.ProviderPeers { + l = e.Size() + n += 1 + l + sovDht(uint64(l)) + } + } + if m.ClusterLevelRaw != 0 { + n += 1 + sovDht(uint64(m.ClusterLevelRaw)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message_Peer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Id.Size() + n += 1 + l + sovDht(uint64(l)) + if len(m.Addrs) > 0 { + for _, b := range m.Addrs { + l = len(b) + n += 1 + l + sovDht(uint64(l)) + } + } + if m.Connection != 0 { + n += 1 + sovDht(uint64(m.Connection)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDht(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} + +func sozDht(x uint64) (n int) { + return sovDht(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Message_MessageType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Record == nil { + m.Record = &pb.Record{} + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloserPeers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloserPeers = append(m.CloserPeers, Message_Peer{}) + if err := m.CloserPeers[len(m.CloserPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderPeers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderPeers = append(m.ProviderPeers, Message_Peer{}) + if err := m.ProviderPeers[len(m.ProviderPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterLevelRaw", wireType) + } + m.ClusterLevelRaw = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClusterLevelRaw |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDht(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDht + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *Message_Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDht + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDht + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) + copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + m.Connection = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDht + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Connection |= Message_ConnectionType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDht(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDht + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipDht(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDht + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDht + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDht + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDht + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDht + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDht + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDht = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDht = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDht = fmt.Errorf("proto: unexpected end of group") +) diff --git a/v2/pb/dht.proto b/v2/pb/dht.proto new file mode 100644 index 00000000..18bfd741 --- /dev/null +++ b/v2/pb/dht.proto @@ -0,0 +1,72 @@ +// In order to re-generate the golang packages for `Message` you will need... +// 1. Protobuf binary (tested with protoc 3.0.0). - https://github.com/gogo/protobuf/releases +// 2. Gogo Protobuf (tested with gogo 0.3). - https://github.com/gogo/protobuf +// 3. To have cloned `libp2p/go-libp2p-{record,kad-dht}` under the same directory. +// Now from `libp2p/go-libp2p-kad-dht/pb` you can run... +// `protoc --gogo_out=. --proto_path=../../go-libp2p-record/pb/ --proto_path=./ dht.proto` + +syntax = "proto3"; +package dht.pb; + +import "github.com/libp2p/go-libp2p-record/pb/record.proto"; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message Message { + enum MessageType { + PUT_VALUE = 0; + GET_VALUE = 1; + ADD_PROVIDER = 2; + GET_PROVIDERS = 3; + FIND_NODE = 4; + PING = 5; + } + + enum ConnectionType { + // sender does not have a connection to peer, and no extra information (default) + NOT_CONNECTED = 0; + + // sender has a live connection to peer + CONNECTED = 1; + + // sender recently connected to peer + CAN_CONNECT = 2; + + // sender recently tried to connect to peer repeatedly but failed to connect + // ("try" here is loose, but this should signal "made strong effort, failed") + CANNOT_CONNECT = 3; + } + + message Peer { + // ID of a given peer. + bytes id = 1 [(gogoproto.customtype) = "byteString", (gogoproto.nullable) = false]; + + // multiaddrs for a given peer + repeated bytes addrs = 2; + + // used to signal the sender's connection capabilities to the peer + ConnectionType connection = 3; + } + + // defines what type of message it is. + MessageType type = 1; + + // defines what coral cluster level this query/response belongs to. + // in case we want to implement coral's cluster rings in the future. + int32 clusterLevelRaw = 10; + + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + bytes key = 2; + + // Used to return a value + // PUT_VALUE, GET_VALUE + record.pb.Record record = 3; + + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + repeated Peer closerPeers = 8 [(gogoproto.nullable) = false]; + + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + repeated Peer providerPeers = 9 [(gogoproto.nullable) = false]; +} diff --git a/v2/pb/message.go b/v2/pb/message.go new file mode 100644 index 00000000..86d84cf0 --- /dev/null +++ b/v2/pb/message.go @@ -0,0 +1,120 @@ +package dht_pb + +import ( + "github.com/libp2p/go-libp2p/core/peer" +) + +// FromAddrInfo constructs a Message_Peer from the given peer.AddrInfo +func FromAddrInfo(p peer.AddrInfo) Message_Peer { + mp := Message_Peer{ + Id: byteString(p.ID), + Addrs: make([][]byte, len(p.Addrs)), + } + + for i, maddr := range p.Addrs { + mp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. + } + + return mp +} + +//type PeerRoutingInfo struct { +// peer.AddrInfo +// network.Connectedness +//} + +//func peerRoutingInfoToPBPeer(p PeerRoutingInfo) Message_Peer { +// var pbp Message_Peer +// +// pbp.Addrs = make([][]byte, len(p.Addrs)) +// for i, maddr := range p.Addrs { +// pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. +// } +// pbp.Id = byteString(p.ID) +// pbp.Connection = ConnectionType(p.Connectedness) +// return pbp +//} + +//// PBPeerToPeer turns a *Message_Peer into its peer.AddrInfo counterpart +//func PBPeerToPeerInfo(pbp Message_Peer) peer.AddrInfo { +// return peer.AddrInfo{ +// ID: peer.ID(pbp.Id), +// Addrs: pbp.Addresses(), +// } +//} +// +//// RawPeerInfosToPBPeers converts a slice of Peers into a slice of *Message_Peers, +//// ready to go out on the wire. +//func RawPeerInfosToPBPeers(peers []peer.AddrInfo) []Message_Peer { +// pbpeers := make([]Message_Peer, len(peers)) +// for i, p := range peers { +// pbpeers[i] = peerInfoToPBPeer(p) +// } +// return pbpeers +//} +// +//// PeersToPBPeers converts given []peer.Peer into a set of []*Message_Peer, +//// which can be written to a message and sent out. the key thing this function +//// does (in addition to PeersToPBPeers) is set the ConnectionType with +//// information from the given network.Network. +//func PeerInfosToPBPeers(n network.Network, peers []peer.AddrInfo) []Message_Peer { +// pbps := RawPeerInfosToPBPeers(peers) +// for i, pbp := range pbps { +// c := ConnectionType(n.Connectedness(peers[i].ID)) +// pbp.Connection = c +// } +// return pbps +//} +// +//func PeerRoutingInfosToPBPeers(peers []PeerRoutingInfo) []Message_Peer { +// pbpeers := make([]Message_Peer, len(peers)) +// for i, p := range peers { +// pbpeers[i] = peerRoutingInfoToPBPeer(p) +// } +// return pbpeers +//} +// +//// PBPeersToPeerInfos converts given []*Message_Peer into []peer.AddrInfo +//// Invalid addresses will be silently omitted. +//func PBPeersToPeerInfos(pbps []Message_Peer) []*peer.AddrInfo { +// peers := make([]*peer.AddrInfo, 0, len(pbps)) +// for _, pbp := range pbps { +// ai := PBPeerToPeerInfo(pbp) +// peers = append(peers, &ai) +// } +// return peers +//} + +//// fromConnectedness returns a Message_ConnectionType associated with the +//// network.Connectedness. +//func fromConnectedness(c network.Connectedness) Message_ConnectionType { +// switch c { +// case network.NotConnected: +// return Message_NOT_CONNECTED +// case network.Connected: +// return Message_CONNECTED +// case network.CanConnect: +// return Message_CAN_CONNECT +// case network.CannotConnect: +// return Message_CANNOT_CONNECT +// default: +// return Message_NOT_CONNECTED +// } +//} +// +//// Connectedness returns a network.Connectedness associated with the given +//// Message_ConnectionType. +//func Connectedness(c Message_ConnectionType) network.Connectedness { +// switch c { +// case Message_NOT_CONNECTED: +// return network.NotConnected +// case Message_CONNECTED: +// return network.Connected +// case Message_CAN_CONNECT: +// return network.CanConnect +// case Message_CANNOT_CONNECT: +// return network.CannotConnect +// default: +// return network.NotConnected +// } +//} diff --git a/v2/pb/message_test.go b/v2/pb/message_test.go new file mode 100644 index 00000000..71f4abdc --- /dev/null +++ b/v2/pb/message_test.go @@ -0,0 +1,15 @@ +package dht_pb + +import ( + "testing" +) + +func TestBadAddrsDontReturnNil(t *testing.T) { + mp := new(Message_Peer) + mp.Addrs = [][]byte{[]byte("NOT A VALID MULTIADDR")} + + addrs := mp.Addresses() + if len(addrs) > 0 { + t.Fatal("shouldnt have any multiaddrs") + } +} diff --git a/v2/stream.go b/v2/stream.go new file mode 100644 index 00000000..580b1f84 --- /dev/null +++ b/v2/stream.go @@ -0,0 +1,192 @@ +package dht + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-msgio" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" + pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) + +func (d *DHT) streamHandler(s network.Stream) { + ctx, _ := tag.New(context.Background(), + tag.Upsert(metrics.KeyPeerID, d.host.ID().String()), + tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", d)), + ) + + if err := d.handleNewStream(ctx, s); err != nil { + // If we exited with an error, let the remote peer know. + _ = s.Reset() + } else { + // If we exited without an error, close gracefully. + _ = s.Close() + } +} + +func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { + // init structured logger that always contains the remote peers PeerID + slogger := d.log.With(slog.String("from", s.Conn().RemotePeer().String())) + + // reset the stream after it was idle for too long + if err := s.SetDeadline(time.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { + return fmt.Errorf("set initial stream deadline: %w", err) + } + + reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + for { + // 1. read message from stream + data, err := d.streamReadMsg(ctx, slogger, reader) + if err != nil { + return err + } else if data == nil { + return nil // nil error, nil data -> graceful end + } + + // we have received a message, start the timer to + // track inbound request latency + startTime := time.Now() + + // 2. unmarshal message into something usable + req, err := d.streamUnmarshalMsg(ctx, slogger, data) + if err != nil { + return err + } + + // signal to buffer pool that it can reuse the bytes + reader.ReleaseMsg(data) + + // reset stream deadline + if err = s.SetDeadline(time.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { + return fmt.Errorf("reset stream deadline: %w", err) + } + + // extend metrics context and slogger with message information. + // ctx must be overwritten because in the next iteration metrics.KeyMessageType + // would already exist and tag.New would return an error. + ctx, _ := tag.New(ctx, tag.Upsert(metrics.KeyMessageType, req.GetType().String())) + slogger = slogger.With( + slog.String("type", req.GetType().String()), + slog.String("key", base64.StdEncoding.EncodeToString(req.GetKey())), + ) + + // track message metrics + stats.Record(ctx, + metrics.ReceivedMessages.M(1), + metrics.ReceivedBytes.M(int64(len(data))), + ) + + // 3. handle the message and gather response + resp, err := d.streamHandleMsg(ctx, slogger, s.Conn().RemotePeer(), req) + if err != nil { + slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", time.Since(startTime)), slog.String("error", err.Error())) + stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + return err + } + slogger.LogAttrs(ctx, slog.LevelDebug, "handled message", slog.Duration("time", time.Since(startTime))) + + // if the handler didn't return a response, continue reading the stream + if resp == nil { + continue + } + + // 4. sent remote peer our response + err = d.streamWriteMsg(ctx, slogger, s, resp) + if err != nil { + return err + } + + // final logging, metrics tracking + latency := time.Since(startTime) + slogger.LogAttrs(ctx, slog.LevelDebug, "responded to message", slog.Duration("time", latency)) + stats.Record(ctx, metrics.InboundRequestLatency.M(float64(latency.Milliseconds()))) + } +} + +// streamReadMsg reads a message from the given msgio.Reader and returns the +// corresponding bytes. If an error occurs it, logs it, and updates the metrics. +// If the bytes are empty and the error is nil, the remote peer returned +func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.Reader) ([]byte, error) { + data, err := r.ReadMsg() + if err != nil { + // if the reader returns an end-of-file signal, exit gracefully + if errors.Is(err, io.EOF) { + return nil, nil + } + + // log any other errors than stream resets + if err.Error() != "stream reset" { + slogger.LogAttrs(ctx, slog.LevelDebug, "error reading message", slog.String("error", err.Error())) + } + + // record any potential partial message we have received + if len(data) > 0 { + _ = stats.RecordWithTags(ctx, + []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, + metrics.ReceivedMessages.M(1), + metrics.ReceivedMessageErrors.M(1), + metrics.ReceivedBytes.M(int64(len(data))), + ) + } + + return nil, err + } + + return data, nil +} + +// streamUnmarshalMsg takes the byte slice and tries to unmarshal it into a +// protobuf message. If an error occurs, it will be logged and the metrics will +// be updated. +func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data []byte) (*pb.Message, error) { + var req pb.Message + if err := req.Unmarshal(data); err != nil { + slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("error", err.Error())) + + _ = stats.RecordWithTags(ctx, + []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, + metrics.ReceivedMessages.M(1), + metrics.ReceivedMessageErrors.M(1), + metrics.ReceivedBytes.M(int64(len(data))), + ) + + return nil, err + } + + return &req, nil +} + +// streamHandleMsg handles the give protobuf message based on its type from the +// given remote peer. +func (d *DHT) streamHandleMsg(ctx context.Context, slogger *slog.Logger, remote peer.ID, req *pb.Message) (*pb.Message, error) { + slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") + + switch req.GetType() { + case pb.Message_FIND_NODE: + return d.handleFindPeer(ctx, remote, req) + case pb.Message_PING: + return d.handlePing(ctx, remote, req) + } + + return nil, fmt.Errorf("can't handle received message: %s", req.GetType().String()) +} + +func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s network.Stream, msg *pb.Message) error { + if err := writeMsg(s, msg); err != nil { + slogger.LogAttrs(ctx, slog.LevelDebug, "error writing response", slog.String("error", err.Error())) + stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + return err + } + + return nil +} From 027c8d2146a7f726327ed665fb7c46d4082df3e7 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 16 Aug 2023 14:19:51 +0200 Subject: [PATCH 09/64] Add FIND_NODE and PING handlers --- v2/config.go | 7 +++++ v2/dht.go | 2 ++ v2/handlers.go | 74 ++++++++++++++++++++++++--------------------- v2/pb/bytestring.go | 72 +++++++++++++++++++++---------------------- v2/pb/dht.aux.go | 11 +++++++ v2/stream.go | 44 +++++++++++++++++++++++++++ 6 files changed, 137 insertions(+), 73 deletions(-) diff --git a/v2/config.go b/v2/config.go index 88ae0254..c9637917 100644 --- a/v2/config.go +++ b/v2/config.go @@ -9,10 +9,17 @@ import ( "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "go.opentelemetry.io/otel" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" ) +// ServiceName is used to scope incoming streams for the resource manager. +const ServiceName = "libp2p.KadDHT" + +// tracer is an open telemetry tracing instance +var tracer = otel.Tracer("go-libp2p-kad-dht") + type ( // ModeOpt describes in which mode this DHT process should operate in. // Possible options are client, server, and any variant that switches diff --git a/v2/dht.go b/v2/dht.go index 6aa8d33a..3aaadf8a 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -173,6 +173,8 @@ func (d *DHT) setClientMode() { case network.DirUnknown: case network.DirInbound: case network.DirOutbound: + // don't reset outbound connections because these are queries + // that we have initiated. continue } diff --git a/v2/handlers.go b/v2/handlers.go index 8c0fa6c3..3bcda4d5 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -5,46 +5,68 @@ import ( "fmt" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "golang.org/x/exp/slog" pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) +// handleFindPeer handles FIND_NODE requests from remote peers. func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + ctx, span := tracer.Start(ctx, "DHT.handleFindPeer") + defer span.End() + target, err := peer.IDFromBytes(req.GetKey()) if err != nil { return nil, fmt.Errorf("peer ID from bytes: %w", err) } + // initialize the response message resp := &pb.Message{Type: pb.Message_FIND_NODE} + + // if the remote is asking for us, short-circuit and return us only if target == d.host.ID() { resp.CloserPeers = []pb.Message_Peer{pb.FromAddrInfo(d.pstore.PeerInfo(d.host.ID()))} - } else { - resp.CloserPeers = d.closerNodes(ctx, remote, nodeID(target)) + return resp, nil + } + + // gather closer peers that we know + resp.CloserPeers = d.closerPeers(ctx, remote, nodeID(target).Key()) + + // if we happen to know the target peers addresses (e.g., although we are + // far away in the keyspace), we add the peer to the result set. This means + // we potentially return bucketSize + 1 peers. We don't add the peer if it's + // already contained in the CloserPeers. + targetInfo := d.pstore.PeerInfo(target) + if len(targetInfo.Addrs) > 0 && !resp.ContainsCloserPeer(target) { + resp.CloserPeers = append(resp.CloserPeers, pb.FromAddrInfo(targetInfo)) } return resp, nil } -func (d *DHT) closerNodes(ctx context.Context, remote peer.ID, target kad.NodeID[key.Key256]) []pb.Message_Peer { - peers := d.rt.NearestNodes(target.Key(), 20) // TODO: bucket size +// handlePing handles PING requests from remote peers. +func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + ctx, span := tracer.Start(ctx, "DHT.handlePing") + defer span.End() + + d.log.LogAttrs(ctx, slog.LevelDebug, "Responding to ping", slog.String("remote", remote.String())) + return &pb.Message{Type: pb.Message_PING}, nil +} + +// closerPeers returns the closest peers to the given target key this host knows +// about. It doesn't return 1) itself 2) the peer that asked for closer peers. +func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []pb.Message_Peer { + ctx, span := tracer.Start(ctx, "DHT.closerPeers") + defer span.End() + + peers := d.rt.NearestNodes(target, 20) // TODO: bucket size if len(peers) == 0 { return nil } // pre-allocated the result set slice. filtered := make([]pb.Message_Peer, 0, len(peers)) - - // if this method should return closer nodes to a peerID (the target is - // parameter is a nodeID), then we want to add this target to the result set - // iff 1) it's not already part of the NearestNodes peers 2) we actually - // know the addresses for the target peer. Therefore, targetFound tracks - // if the target is in the set of NearestNodes from the routing table. - // If that's the case we add it to the final filtered peers slice. This - // means we potentially return bucketSize + 1 peers. - // Context: https://github.com/libp2p/go-libp2p-kad-dht/pull/511 - targetFound := false for _, p := range peers { pid := peer.ID(p.(nodeID)) // TODO: type cast @@ -59,33 +81,15 @@ func (d *DHT) closerNodes(ctx context.Context, remote peer.ID, target kad.NodeID continue } - // extract peer information from peer store + // extract peer information from peer store and only add it to the + // final list if we know addresses of that peer. addrInfo := d.pstore.PeerInfo(pid) if len(addrInfo.Addrs) == 0 { continue } - // Check if this peer is our target peer - if tid, ok := target.(nodeID); ok && peer.ID(tid) == pid { - targetFound = true - } - filtered = append(filtered, pb.FromAddrInfo(addrInfo)) } - // check if the target peer was among the nearest nodes - if tid, ok := target.(nodeID); ok && !targetFound && peer.ID(tid) != remote { - // it wasn't, check if we know how to reach it and if we do, add it to - // the filtered list. - addrInfo := d.pstore.PeerInfo(peer.ID(tid)) - if len(addrInfo.Addrs) > 0 { - filtered = append(filtered, pb.FromAddrInfo(addrInfo)) - } - } - return filtered } - -func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - panic("not implemented") -} diff --git a/v2/pb/bytestring.go b/v2/pb/bytestring.go index f20f1979..4c33068b 100644 --- a/v2/pb/bytestring.go +++ b/v2/pb/bytestring.go @@ -1,42 +1,38 @@ package dht_pb -import ( - "encoding/json" -) - type byteString string -func (b byteString) Marshal() ([]byte, error) { - return []byte(b), nil -} - -func (b *byteString) MarshalTo(data []byte) (int, error) { - return copy(data, *b), nil -} - -func (b *byteString) Unmarshal(data []byte) error { - *b = byteString(data) - return nil -} - -func (b *byteString) Size() int { - return len(*b) -} - -func (b byteString) MarshalJSON() ([]byte, error) { - return json.Marshal([]byte(b)) -} - -func (b *byteString) UnmarshalJSON(data []byte) error { - var buf []byte - err := json.Unmarshal(data, &buf) - if err != nil { - return err - } - *b = byteString(buf) - return nil -} - -func (b byteString) Equal(other byteString) bool { - return b == other -} +//func (b byteString) Marshal() ([]byte, error) { +// return []byte(b), nil +//} +// +//func (b *byteString) MarshalTo(data []byte) (int, error) { +// return copy(data, *b), nil +//} +// +//func (b *byteString) Unmarshal(data []byte) error { +// *b = byteString(data) +// return nil +//} +// +//func (b *byteString) Size() int { +// return len(*b) +//} +// +//func (b byteString) MarshalJSON() ([]byte, error) { +// return json.Marshal([]byte(b)) +//} +// +//func (b *byteString) UnmarshalJSON(data []byte) error { +// var buf []byte +// err := json.Unmarshal(data, &buf) +// if err != nil { +// return err +// } +// *b = byteString(buf) +// return nil +//} +// +//func (b byteString) Equal(other byteString) bool { +// return b == other +//} diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index 055d628c..3e313fca 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -7,6 +7,17 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +// ContainsCloserPeer returns true if the provided peer ID is among the +// list of closer peers contained in this message. +func (m *Message) ContainsCloserPeer(pid peer.ID) bool { + for _, cp := range m.CloserPeers { + if cp.Id.Equal(byteString(pid)) { + return true + } + } + return false +} + // Addresses returns the Multiaddresses associated with the Message_Peer entry func (m *Message_Peer) Addresses() []ma.Multiaddr { if m == nil { diff --git a/v2/stream.go b/v2/stream.go index 580b1f84..60b3f59f 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -19,12 +19,22 @@ import ( pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) +// streamHandler is the function that's registered with the libp2p host for +// the DHT protocol ID. It sets up metrics and the resource manager scope. It +// actually starts handling the stream and depending on the outcome resets or +// closes it. func (d *DHT) streamHandler(s network.Stream) { ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.KeyPeerID, d.host.ID().String()), tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", d)), ) + if err := s.Scope().SetService(ServiceName); err != nil { + d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("error", err.Error())) + _ = s.Reset() + return + } + if err := d.handleNewStream(ctx, s); err != nil { // If we exited with an error, let the remote peer know. _ = s.Reset() @@ -34,7 +44,28 @@ func (d *DHT) streamHandler(s network.Stream) { } } +// handleNewStream is a method associated with the DHT type. This function +// handles the incoming stream from a remote peer. +// +// This function goes through the following steps: +// 1. Starts a new trace span for the stream handling operation. +// 2. Sets an idle timeout for the stream doing the operation. +// 3. Reads messages from the stream in a loop. +// 4. If a message is received, it starts a timer, and unmarshals the message. +// 5. If the message unmarshals successfully, it resets the stream deadline, +// tags the context with the message type and key, updates some metrics and +// then handles the message and gathers the response from the handler. +// 6. If responding is needed, sends the response back to the peer. +// 7. Logs the latency and updates the relevant metrics before the loop iterates. +// +// Returns: +// It returns an error if any of the operations in the pipeline fails, otherwise +// it will return nil indicating the end of the stream or all messages have been +// processed correctly. func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { + ctx, span := tracer.Start(ctx, "DHT.handleNewStream") + defer span.End() + // init structured logger that always contains the remote peers PeerID slogger := d.log.With(slog.String("from", s.Conn().RemotePeer().String())) @@ -117,6 +148,9 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // corresponding bytes. If an error occurs it, logs it, and updates the metrics. // If the bytes are empty and the error is nil, the remote peer returned func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.Reader) ([]byte, error) { + ctx, span := tracer.Start(ctx, "DHT.streamReadMsg") + defer span.End() + data, err := r.ReadMsg() if err != nil { // if the reader returns an end-of-file signal, exit gracefully @@ -149,6 +183,9 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // protobuf message. If an error occurs, it will be logged and the metrics will // be updated. func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data []byte) (*pb.Message, error) { + ctx, span := tracer.Start(ctx, "DHT.streamUnmarshalMsg") + defer span.End() + var req pb.Message if err := req.Unmarshal(data); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("error", err.Error())) @@ -169,6 +206,9 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data // streamHandleMsg handles the give protobuf message based on its type from the // given remote peer. func (d *DHT) streamHandleMsg(ctx context.Context, slogger *slog.Logger, remote peer.ID, req *pb.Message) (*pb.Message, error) { + ctx, span := tracer.Start(ctx, "DHT.streamHandleMsg") + defer span.End() + slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") switch req.GetType() { @@ -181,7 +221,11 @@ func (d *DHT) streamHandleMsg(ctx context.Context, slogger *slog.Logger, remote return nil, fmt.Errorf("can't handle received message: %s", req.GetType().String()) } +// streamWriteMsg sends the given message over the stream. func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s network.Stream, msg *pb.Message) error { + ctx, span := tracer.Start(ctx, "DHT.streamWriteMsg") + defer span.End() + if err := writeMsg(s, msg); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error writing response", slog.String("error", err.Error())) stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) From 3171894bd8d5ed2b84e2dbd3ce6057a5eefa13db Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 18 Aug 2023 14:35:37 +0200 Subject: [PATCH 10/64] add handlers and tests --- v2/config.go | 92 +++++++++- v2/config_test.go | 41 +++++ v2/dht.go | 47 +++-- v2/dht_test.go | 15 +- v2/handlers.go | 211 +++++++++++++++++++++-- v2/handlers_test.go | 388 ++++++++++++++++++++++++++++++++++++++++++ v2/kad.go | 6 +- v2/net.go | 44 ----- v2/pb/bytestring.go | 82 +++++---- v2/pb/dht.aux.go | 3 +- v2/pb/message_test.go | 2 +- v2/stream.go | 63 ++++++- 12 files changed, 849 insertions(+), 145 deletions(-) create mode 100644 v2/handlers_test.go delete mode 100644 v2/net.go diff --git a/v2/config.go b/v2/config.go index c9637917..17b22707 100644 --- a/v2/config.go +++ b/v2/config.go @@ -4,18 +4,24 @@ import ( "fmt" "time" + "github.com/ipfs/boxo/ipns" + ds "github.com/ipfs/go-datastore" + leveldb "github.com/ipfs/go-ds-leveldb" logging "github.com/ipfs/go-log/v2" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing/triert" "go.opentelemetry.io/otel" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" ) // ServiceName is used to scope incoming streams for the resource manager. -const ServiceName = "libp2p.KadDHT" +const ServiceName = "libp2p.DHT" // tracer is an open telemetry tracing instance var tracer = otel.Tracer("go-libp2p-kad-dht") @@ -39,6 +45,17 @@ type ( // from the desired mode. Therefore, we define this second mode type that // only has the two forms: client or server. mode string + + // Datastore is an interface definition that gathers the datastore + // requirements. The DHT requires the datastore to support batching and + // transactions. Example datastores that implement both features are leveldb + // and badger. leveldb can also be used in memory - this is used as the + // default datastore. + Datastore interface { + ds.Datastore + ds.BatchingFeature + ds.TxnFeature + } ) const ( @@ -79,6 +96,9 @@ type Config struct { // Kademlia holds the configuration of the underlying Kademlia implementation. Kademlia *coord.Config + // BucketSize determines the number of closer peers to return + BucketSize int + // ProtocolID represents the DHT protocol we can query with and respond to. ProtocolID protocol.ID @@ -89,24 +109,73 @@ type Config struct { // about the local node. RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + // Datastore configures the DHT to use the specified datastore. The + // datastore must support batching and transactions. Defaults to a leveldb + // in-memory (temporary) map. + Datastore Datastore + + // Validator + Validator record.Validator + // Logger can be used to configure a custom structured logger instance. // By default go.uber.org/zap is used (wrapped in ipfs/go-log). Logger *slog.Logger + // MaxRecordAge is the default time that a record should last in the DHT. + // This value is also known as the provider record expiration. + MaxRecordAge time.Duration + + // TimeoutStreamIdle is the duration we're reading from a stream without + // receiving before closing/resetting it. The timeout gets reset every time + // we have successfully read a message from the stream. TimeoutStreamIdle time.Duration } // DefaultConfig returns a configuration struct that can be used as-is to -// instantiate a fully functional DHT client. +// instantiate a fully functional DHT client. All fields that are nil require +// some additional information to instantiate. The default values for these +// fields come from separate top-level methods prefixed with Default. func DefaultConfig() *Config { return &Config{ Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), + BucketSize: 20, ProtocolID: "/ipfs/kad/1.0.0", + Datastore: nil, // nil because the initialization of a datastore can fail. An in-memory leveldb datastore will be used if this field is nil. + Validator: nil, // nil because the default validator requires a peerstore.KeyBook. RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), - TimeoutStreamIdle: time.Minute, + MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md + TimeoutStreamIdle: time.Minute, // MAGIC: could be done dynamically + } +} + +// DefaultValidator returns a namespaced validator that can validate both public +// key (under the "pk" namespace) and IPNS records (under the "ipns" namespace). +// The validator can't be initialized in DefaultConfig because it requires +// access to a peerstore.KeyBook for the IPNS validator. +func DefaultValidator(kb peerstore.KeyBook) record.Validator { + return record.NamespacedValidator{ + "pk": record.PublicKeyValidator{}, + "ipns": ipns.Validator{KeyBook: kb}, + } +} + +// DefaultRoutingTable returns a triert.TrieRT routing table. This routing table +// cannot be initialized in DefaultConfig because it requires information about +// the local peer. +func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]], error) { + rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() + rt, err := triert.New[key.Key256, kad.NodeID[key.Key256]](nodeID, rtCfg) + if err != nil { + return nil, fmt.Errorf("new trie routing table: %w", err) } + return rt, nil +} + +// DefaultDatastore returns an in-memory leveldb datastore. +func DefaultDatastore() (Datastore, error) { + return leveldb.NewDatastore("", nil) } // Validate validates the configuration struct it is called on. It returns @@ -114,7 +183,10 @@ func DefaultConfig() *Config { // a valid configuration. func (c *Config) Validate() error { switch c.Mode { - case ModeOptClient, ModeOptServer, ModeOptAutoClient, ModeOptAutoServer: + case ModeOptClient: + case ModeOptServer: + case ModeOptAutoClient: + case ModeOptAutoServer: default: return fmt.Errorf("invalid mode option: %s", c.Mode) } @@ -131,5 +203,17 @@ func (c *Config) Validate() error { return fmt.Errorf("protocolID must not be empty") } + if c.Logger == nil { + return fmt.Errorf("logger must not be nil") + } + + if c.MaxRecordAge <= 0 { + return fmt.Errorf("max record age must be a positive duration") + } + + if c.TimeoutStreamIdle <= 0 { + return fmt.Errorf("stream idle timeout must be a positive duration") + } + return nil } diff --git a/v2/config_test.go b/v2/config_test.go index 4126c4b1..9049a1a3 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -2,6 +2,7 @@ package dht import ( "testing" + "time" ) func TestConfig_Validate(t *testing.T) { @@ -47,6 +48,46 @@ func TestConfig_Validate(t *testing.T) { return c }, }, + { + name: "nil logger", + wantErr: true, + mutate: func(c *Config) *Config { + c.Logger = nil + return c + }, + }, + { + name: "0 max record age", + wantErr: true, + mutate: func(c *Config) *Config { + c.MaxRecordAge = time.Duration(0) + return c + }, + }, + { + name: "negative max record age", + wantErr: true, + mutate: func(c *Config) *Config { + c.MaxRecordAge = time.Duration(-1) + return c + }, + }, + { + name: "0 stream idle timeout", + wantErr: true, + mutate: func(c *Config) *Config { + c.TimeoutStreamIdle = time.Duration(0) + return c + }, + }, + { + name: "negative stream idle timeout", + wantErr: true, + mutate: func(c *Config) *Config { + c.TimeoutStreamIdle = time.Duration(-1) + return c + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/v2/dht.go b/v2/dht.go index 3aaadf8a..631b5d5e 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -4,8 +4,7 @@ import ( "fmt" "sync" - "github.com/libp2p/go-libp2p/core/peerstore" - + record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -13,7 +12,6 @@ import ( "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/routing/triert" "golang.org/x/exp/slog" ) @@ -23,7 +21,8 @@ type DHT struct { // host holds a reference to the underlying libp2p host host host.Host - pstore peerstore.Peerstore + // ds is the datastore where provider, peer, and IPNS records are stored + ds Datastore // cfg holds a reference to the DHT configuration struct cfg *Config @@ -31,8 +30,8 @@ type DHT struct { // mode indicates the current mode the DHT operates in. This can differ from // the desired mode if set to auto-client or auto-server. The desired mode // can be configured via the Config struct. + modeMu sync.RWMutex mode mode - modeLk sync.RWMutex // kad is a reference to the go-kademlia coordinator kad *coord.Coordinator[key.Key256, ma.Multiaddr] @@ -41,6 +40,9 @@ type DHT struct { // configured via the Config struct. rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + // validator . + validator record.Validator + // log is a convenience accessor to the logging instance. It gets the value // of the logger field from the configuration. log *slog.Logger @@ -72,16 +74,26 @@ func New(h host.Host, cfg *Config) (*DHT, error) { // Use the configured routing table if it was provided if cfg.RoutingTable != nil { d.rt = cfg.RoutingTable + } else if d.rt, err = DefaultRoutingTable(nid); err != nil { + return nil, fmt.Errorf("new trie routing table: %w", err) + } + + // Use configured validator if it was provided + if cfg.Validator != nil { + d.validator = cfg.Validator } else { - rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() - d.rt, err = triert.New[key.Key256, kad.NodeID[key.Key256]](nid, rtCfg) - if err != nil { - return nil, fmt.Errorf("new trie routing table: %w", err) - } + d.validator = DefaultValidator(h.Peerstore()) + } + + // Use configured datastore or default leveldb in-memory one + if cfg.Datastore != nil { + d.ds = cfg.Datastore + } else if d.ds, err = DefaultDatastore(); err != nil { + return nil, fmt.Errorf("new default datastore: %w", err) } // instantiate a new Kademlia DHT coordinator. - d.kad, err = coord.NewCoordinator[key.Key256, ma.Multiaddr](nid, nil, d.rt, cfg.Kademlia) + d.kad, err = coord.NewCoordinator[key.Key256, ma.Multiaddr](nid, nil, nil, d.rt, cfg.Kademlia) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } @@ -137,8 +149,8 @@ func (d *DHT) Close() error { // stream handler. This method is safe to call even if the DHT is already in // server mode. func (d *DHT) setServerMode() { - d.modeLk.Lock() - defer d.modeLk.Unlock() + d.modeMu.Lock() + defer d.modeMu.Unlock() d.log.Info("Activating DHT server mode") @@ -153,15 +165,18 @@ func (d *DHT) setServerMode() { // already in client mode, this method is a no-op. This method is safe to call // even if the DHT is already in client mode. func (d *DHT) setClientMode() { - d.modeLk.Lock() - defer d.modeLk.Unlock() + d.modeMu.Lock() + defer d.modeMu.Unlock() d.log.Info("Activating DHT client mode") d.mode = modeClient d.host.RemoveStreamHandler(d.cfg.ProtocolID) - // kill all active inbound streams using the DHT protocol. + // kill all active inbound streams using the DHT protocol. Note that if we + // request something from a remote peer behind a NAT that succeeds with a + // connection reversal, the connection would be inbound but the stream would + // still be outbound and therefore not reset here. for _, c := range d.host.Network().Conns() { for _, s := range c.GetStreams() { diff --git a/v2/dht_test.go b/v2/dht_test.go index dbff988b..832c8ed1 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -1,10 +1,10 @@ package dht import ( - "reflect" "testing" "github.com/libp2p/go-libp2p" + "github.com/stretchr/testify/assert" ) func TestNew(t *testing.T) { @@ -19,12 +19,6 @@ func TestNew(t *testing.T) { wantBuilder func(*DHT) *DHT wantErr bool }{ - { - name: "happy path", - cfgBuilder: func(c *Config) *Config { return c }, - wantBuilder: func(dht *DHT) *DHT { return dht }, - wantErr: false, - }, { name: "mode set to server", cfgBuilder: func(c *Config) *Config { @@ -67,12 +61,7 @@ func TestNew(t *testing.T) { want := tt.wantBuilder(d) - want.kad = nil - got.kad = nil - - if !reflect.DeepEqual(got, want) { - t.Errorf("New() got = %v, want %v", got, want) - } + assert.Equal(t, want.mode, got.mode) }) } } diff --git a/v2/handlers.go b/v2/handlers.go index 3bcda4d5..5e91c698 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -1,10 +1,16 @@ package dht import ( + "bytes" "context" + "errors" "fmt" + "time" + ds "github.com/ipfs/go-datastore" + recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-base32" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" @@ -13,20 +19,24 @@ import ( // handleFindPeer handles FIND_NODE requests from remote peers. func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.handleFindPeer") - defer span.End() - - target, err := peer.IDFromBytes(req.GetKey()) - if err != nil { - return nil, fmt.Errorf("peer ID from bytes: %w", err) + if len(req.GetKey()) == 0 { + return nil, fmt.Errorf("handleFindPeer with empty key") } + target := peer.ID(req.GetKey()) + // initialize the response message - resp := &pb.Message{Type: pb.Message_FIND_NODE} + resp := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: req.GetKey(), + } + + // get reference to peer store + pstore := d.host.Peerstore() // if the remote is asking for us, short-circuit and return us only if target == d.host.ID() { - resp.CloserPeers = []pb.Message_Peer{pb.FromAddrInfo(d.pstore.PeerInfo(d.host.ID()))} + resp.CloserPeers = []pb.Message_Peer{pb.FromAddrInfo(pstore.PeerInfo(d.host.ID()))} return resp, nil } @@ -35,10 +45,10 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag // if we happen to know the target peers addresses (e.g., although we are // far away in the keyspace), we add the peer to the result set. This means - // we potentially return bucketSize + 1 peers. We don't add the peer if it's - // already contained in the CloserPeers. - targetInfo := d.pstore.PeerInfo(target) - if len(targetInfo.Addrs) > 0 && !resp.ContainsCloserPeer(target) { + // we potentially return bucketSize + 1 peers. We won't add the peer to the + // response if it's already contained in CloserPeers. + targetInfo := pstore.PeerInfo(target) + if len(targetInfo.Addrs) > 0 && !resp.ContainsCloserPeer(target) && target != remote { resp.CloserPeers = append(resp.CloserPeers, pb.FromAddrInfo(targetInfo)) } @@ -47,20 +57,145 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag // handlePing handles PING requests from remote peers. func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.handlePing") - defer span.End() - d.log.LogAttrs(ctx, slog.LevelDebug, "Responding to ping", slog.String("remote", remote.String())) return &pb.Message{Type: pb.Message_PING}, nil } +// handleGetValue handles GET_VALUE RPCs from remote peers. +func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + k := req.GetKey() + if len(k) == 0 { + return nil, fmt.Errorf("handleGetValue but no key in request") + } + + // prepare the response message + resp := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: k, + CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + } + + // fetch record from the datastore for the requested key + dsKey := ds.NewKey(base32.RawStdEncoding.EncodeToString(k)) + buf, err := d.ds.Get(ctx, dsKey) + if err != nil { + // if we don't have the record, that's fine, just return closer peers + if errors.Is(err, ds.ErrNotFound) { + return resp, nil + } + + return nil, err + } + + // we have found a record, parse it and do basic validation + rec := &recpb.Record{} + err = rec.Unmarshal(buf) + if err != nil { + return nil, fmt.Errorf("unmarshal stored record: %w", err) + } + + // validate that we don't serve stale records. + receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) + if err != nil || time.Since(receivedAt) > d.cfg.MaxRecordAge { + d.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("error", err.Error()), slog.Duration("age", time.Since(receivedAt))) + if err = d.ds.Delete(ctx, dsKey); err != nil { + d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("error", err.Error())) + } + } + + // We don't do any additional validation beyond checking the above + // timestamp. We put the burden of validating the record on the requester as + // checking a record may be computationally expensive. + + // finally, attach the record to the response + resp.Record = rec + + return resp, nil +} + +func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + if len(req.GetKey()) == 0 { + return nil, fmt.Errorf("no key was provided") + } + + rec := req.GetRecord() + if rec == nil { + return nil, fmt.Errorf("nil record") + } + + if !bytes.Equal(req.GetKey(), rec.GetKey()) { + return nil, fmt.Errorf("key doesn't match record key") + } + + // avoid storing arbitrary data + rec.TimeReceived = "" + + if err := d.validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { + return nil, fmt.Errorf("put bad record: %w", err) + } + + txn, err := d.ds.NewTransaction(ctx, false) + if err != nil { + return nil, fmt.Errorf("new transaction: %w", err) + } + defer txn.Discard(ctx) // discard is a no-op if committed beforehand + + shouldReplace, err := d.shouldReplaceExistingRecord(ctx, txn, rec) + if err != nil { + return nil, fmt.Errorf("checking datastore for better record: %w", err) + } else if !shouldReplace { + return nil, fmt.Errorf("received worse record") + } + + rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + data, err := rec.Marshal() + if err != nil { + return nil, fmt.Errorf("marshal incoming record: %w", err) + } + + if err = txn.Put(ctx, datastoreKey(rec.GetKey()), data); err != nil { + return nil, fmt.Errorf("storing record in datastore: %w", err) + } + + if err = txn.Commit(ctx); err != nil { + return nil, fmt.Errorf("committing new record to datastore: %w", err) + } + + return req, nil +} + +func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + k := req.GetKey() + if len(k) > 80 { + return nil, fmt.Errorf("handleGetProviders key size too large") + } else if len(k) == 0 { + return nil, fmt.Errorf("handleGetProviders key is empty") + } + + resp := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: k, + CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + } + + return resp, nil +} + +func (d *DHT) handleAddProvider(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + k := req.GetKey() + if len(k) == 0 { + return nil, fmt.Errorf("handleAddProvider but no key in request") + } + return nil, nil +} + // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []pb.Message_Peer { ctx, span := tracer.Start(ctx, "DHT.closerPeers") defer span.End() - peers := d.rt.NearestNodes(target, 20) // TODO: bucket size + peers := d.rt.NearestNodes(target, d.cfg.BucketSize) if len(peers) == 0 { return nil } @@ -82,8 +217,8 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 } // extract peer information from peer store and only add it to the - // final list if we know addresses of that peer. - addrInfo := d.pstore.PeerInfo(pid) + // final list if we know any addresses of that peer. + addrInfo := d.host.Peerstore().PeerInfo(pid) if len(addrInfo.Addrs) == 0 { continue } @@ -93,3 +228,43 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 return filtered } + +// shouldReplaceExistingRecord returns true if the given record should replace any +// existing one in the local datastore. It queries the datastore, unmarshalls +// the record, validates it, and compares it to the incoming record. If the +// incoming one is "better" (e.g., just newer), this function returns true. +// If unmarshalling or validation fails, this function also returns true because +// the existing record should be replaced. +func (d *DHT) shouldReplaceExistingRecord(ctx context.Context, dstore ds.Read, newRec *recpb.Record) (bool, error) { + ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") + defer span.End() + + existingBytes, err := dstore.Get(ctx, datastoreKey(newRec.GetKey())) + if errors.Is(err, ds.ErrNotFound) { + return true, nil + } else if err != nil { + return false, fmt.Errorf("getting record from datastore: %w", err) + } + + existingRec := &recpb.Record{} + if err := existingRec.Unmarshal(existingBytes); err != nil { + return true, nil + } + + if err := d.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { + return true, nil + } + records := [][]byte{newRec.GetValue(), existingRec.GetValue()} + i, err := d.validator.Select(string(newRec.GetKey()), records) + if err != nil { + return false, fmt.Errorf("record selection: %w", err) + } else if i != 0 { + return false, nil + } + + return true, nil +} + +func datastoreKey(k []byte) ds.Key { + return ds.NewKey(base32.RawStdEncoding.EncodeToString(k)) +} diff --git a/v2/handlers_test.go b/v2/handlers_test.go new file mode 100644 index 00000000..d0832b25 --- /dev/null +++ b/v2/handlers_test.go @@ -0,0 +1,388 @@ +package dht + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/libp2p/go-libp2p" + pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var rng = rand.New(rand.NewSource(150)) + +func newTestDHT(t testing.TB) *DHT { + t.Helper() + + h, err := libp2p.New(libp2p.NoListenAddrs) + if err != nil { + t.Fatalf("new libp2p host: %s", err) + } + + d, err := New(h, DefaultConfig()) + if err != nil { + t.Fatalf("new dht: %s", err) + } + + t.Cleanup(func() { + if err = d.Close(); err != nil { + t.Logf("closing dht: %s", err) + } + + if err = h.Close(); err != nil { + t.Logf("closing host: %s", err) + } + }) + + return d +} + +func newPeerID(t testing.TB) peer.ID { + _, pub, err := crypto.GenerateEd25519Key(rng) + require.NoError(t, err) + id, err := peer.IDFromPublicKey(pub) + require.NoError(t, err) + return id +} + +func TestMessage_noKey(t *testing.T) { + d := newTestDHT(t) + for _, typ := range []pb.Message_MessageType{ + pb.Message_FIND_NODE, + pb.Message_PUT_VALUE, + pb.Message_GET_VALUE, + pb.Message_ADD_PROVIDER, + pb.Message_GET_PROVIDERS, + } { + t.Run(fmt.Sprintf("%s", typ), func(t *testing.T) { + msg := &pb.Message{Type: typ} // no key + _, err := d.handleMsg(context.Background(), peer.ID(""), msg) + if err == nil { + t.Error("expected processing message to fail") + } + }) + } +} + +func BenchmarkDHT_handleFindPeer(b *testing.B) { + d := newTestDHT(b) + + // build routing table + var peers []peer.ID + for i := 0; i < 250; i++ { + + // generate peer ID + pid := newPeerID(b) + + // add peer to routing table + d.rt.AddNode(nodeID(pid)) + + // keep track of peer + peers = append(peers, pid) + + // craft network address for peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + if err != nil { + b.Fatal(err) + } + + // add peer information to peer store + d.host.Peerstore().AddAddr(pid, a, time.Hour) + } + + // build requests + reqs := make([]*pb.Message, b.N) + for i := 0; i < b.N; i++ { + reqs[i] = &pb.Message{ + Key: []byte("random-key-" + strconv.Itoa(i)), + } + } + + ctx := context.Background() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := d.handleFindPeer(ctx, peers[0], reqs[i]) + if err != nil { + b.Error(err) + } + } +} + +func TestDHT_handleFindPeer_happy_path(t *testing.T) { + d := newTestDHT(t) + + // build routing table + peers := make([]peer.ID, 250) + for i := 0; i < 250; i++ { + // generate peer ID + pid := newPeerID(t) + + // add peer to routing table but don't add first peer. The first peer + // will be the one who's making the request below. If we added it to + // the routing table it could be among the closest peers to the random + // key below. We filter out the requesting peer from the response of + // closer peers. This means we can't assert for exactly 20 closer peers + // below. + if i > 0 { + d.rt.AddNode(nodeID(pid)) + } + + // keep track of peer + peers[i] = pid + + // craft network address for peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + require.NoError(t, err) + + // add peer information to peer store + d.host.Peerstore().AddAddr(pid, a, time.Hour) + } + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := d.handleFindPeer(context.Background(), peers[0], req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, d.cfg.BucketSize) + assert.Len(t, resp.ProviderPeers, 0) + assert.Equal(t, len(resp.CloserPeers[0].Addrs), 1) +} + +func TestDHT_handleFindPeer_self_in_routing_table(t *testing.T) { + // a case that shouldn't happen + d := newTestDHT(t) + + d.rt.AddNode(nodeID(d.host.ID())) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := d.handleFindPeer(context.Background(), newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, 0) + assert.Len(t, resp.ProviderPeers, 0) +} + +func TestDHT_handleFindPeer_empty_routing_table(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := d.handleFindPeer(context.Background(), newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, 0) + assert.Len(t, resp.ProviderPeers, 0) +} + +func TestDHT_handleFindPeer_unknown_addresses_but_in_routing_table(t *testing.T) { + d := newTestDHT(t) + + // build routing table + peers := make([]peer.ID, d.cfg.BucketSize) + for i := 0; i < d.cfg.BucketSize; i++ { + // generate peer ID + pid := newPeerID(t) + + // add peer to routing table + d.rt.AddNode(nodeID(pid)) + + // keep track of peer + peers[i] = pid + + if i == 0 { + continue + } + + // craft network address for peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + require.NoError(t, err) + + // add peer information to peer store + d.host.Peerstore().AddAddr(pid, a, time.Hour) + } + + _, pubk, _ := crypto.GenerateEd25519Key(rng) + requester, err := peer.IDFromPublicKey(pubk) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := d.handleFindPeer(context.Background(), requester, req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.ProviderPeers, 0) + // should not return peer whose addresses we don't know + require.Len(t, resp.CloserPeers, d.cfg.BucketSize-1) + for _, cp := range resp.CloserPeers { + assert.NotEqual(t, peer.ID(cp.Id), peers[0]) + } +} + +func TestDHT_handleFindPeer_request_for_server(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte(d.host.ID()), + } + + resp, err := d.handleFindPeer(context.Background(), newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, 1) + assert.Len(t, resp.ProviderPeers, 0) + assert.Equal(t, d.host.ID(), peer.ID(resp.CloserPeers[0].Id)) +} + +func TestDHT_handleFindPeer_request_for_self(t *testing.T) { + d := newTestDHT(t) + + // build routing table + peers := make([]peer.ID, d.cfg.BucketSize) + for i := 0; i < d.cfg.BucketSize; i++ { + // generate peer ID + pid := newPeerID(t) + + // add peer to routing table + d.rt.AddNode(nodeID(pid)) + + // keep track of peer + peers[i] = pid + + // craft network address for peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + if err != nil { + t.Fatal(err) + } + + // add peer information to peer store + d.host.Peerstore().AddAddr(pid, a, time.Hour) + } + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := d.handleFindPeer(context.Background(), peers[0], req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, d.cfg.BucketSize-1) // don't return requester + assert.Len(t, resp.ProviderPeers, 0) +} + +func TestDHT_handleFindPeer_request_for_known_but_far_peer(t *testing.T) { + // tests if a peer that we know the addresses of but that isn't in + // the routing table is returned + d := newTestDHT(t) + + // build routing table + peers := make([]peer.ID, 250) + for i := 0; i < 250; i++ { + // generate peer ID + pid := newPeerID(t) + + // keep track of peer + peers[i] = pid + + // craft network address for peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + if err != nil { + t.Fatal(err) + } + + // add peer information to peer store + d.host.Peerstore().AddAddr(pid, a, time.Hour) + + // don't add first peer to routing table -> the one we're asking for + // don't add second peer -> the one that's requesting + if i > 1 { + d.rt.AddNode(nodeID(pid)) + } + } + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte(peers[0]), // not in routing table but in peer store + } + + resp, err := d.handleFindPeer(context.Background(), peers[1], req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Nil(t, resp.Record) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, d.cfg.BucketSize+1) // return peer because we know it's addresses + assert.Len(t, resp.ProviderPeers, 0) +} + +func TestDHT_handlePing(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{Type: pb.Message_PING} + res, err := d.handlePing(context.Background(), newPeerID(t), req) + require.NoError(t, err) + assert.Equal(t, pb.Message_PING, res.Type) + assert.Len(t, res.CloserPeers, 0) + assert.Len(t, res.ProviderPeers, 0) + assert.Nil(t, res.Key) + assert.Nil(t, res.Record) +} + +func BenchmarkDHT_handlePing(b *testing.B) { + d := newTestDHT(b) + requester := newPeerID(b) + + ctx := context.Background() + req := &pb.Message{Type: pb.Message_PING} + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := d.handlePing(ctx, requester, req) + if err != nil { + b.Error(err) + } + } +} diff --git a/v2/kad.go b/v2/kad.go index 7e25f659..fd29ec48 100644 --- a/v2/kad.go +++ b/v2/kad.go @@ -1,8 +1,6 @@ package dht import ( - "crypto/sha256" - "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" @@ -21,9 +19,7 @@ var _ kad.NodeID[key.Key256] = nodeID("") // hashes of, in this case, peer.IDs. This means this Key method takes // the peer.ID, hashes it and constructs a 256-bit key. func (p nodeID) Key() key.Key256 { - h := sha256.New() - h.Write([]byte(p)) - return key.NewKey256(h.Sum(nil)) + return key.NewSha256([]byte(p)) } // String calls String on the underlying peer.ID and returns a string like diff --git a/v2/net.go b/v2/net.go deleted file mode 100644 index 0b0c4c72..00000000 --- a/v2/net.go +++ /dev/null @@ -1,44 +0,0 @@ -package dht - -import ( - "bufio" - "io" - "sync" - - pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" - "github.com/libp2p/go-msgio/protoio" -) - -// The Protobuf writer performs multiple small writes when writing a message. -// We need to buffer those writes, to make sure that we're not sending a new -// packet for every single write. -type bufferedDelimitedWriter struct { - *bufio.Writer - protoio.WriteCloser -} - -var writerPool = sync.Pool{ - New: func() interface{} { - w := bufio.NewWriter(nil) - return &bufferedDelimitedWriter{ - Writer: w, - WriteCloser: protoio.NewDelimitedWriter(w), - } - }, -} - -func writeMsg(w io.Writer, mes *pb.Message) error { - bw := writerPool.Get().(*bufferedDelimitedWriter) - bw.Reset(w) - err := bw.WriteMsg(mes) - if err == nil { - err = bw.Flush() - } - bw.Reset(nil) - writerPool.Put(bw) - return err -} - -func (w *bufferedDelimitedWriter) Flush() error { - return w.Writer.Flush() -} diff --git a/v2/pb/bytestring.go b/v2/pb/bytestring.go index 4c33068b..5099a991 100644 --- a/v2/pb/bytestring.go +++ b/v2/pb/bytestring.go @@ -1,38 +1,52 @@ package dht_pb +import ( + "encoding/json" + "fmt" +) + type byteString string -//func (b byteString) Marshal() ([]byte, error) { -// return []byte(b), nil -//} -// -//func (b *byteString) MarshalTo(data []byte) (int, error) { -// return copy(data, *b), nil -//} -// -//func (b *byteString) Unmarshal(data []byte) error { -// *b = byteString(data) -// return nil -//} -// -//func (b *byteString) Size() int { -// return len(*b) -//} -// -//func (b byteString) MarshalJSON() ([]byte, error) { -// return json.Marshal([]byte(b)) -//} -// -//func (b *byteString) UnmarshalJSON(data []byte) error { -// var buf []byte -// err := json.Unmarshal(data, &buf) -// if err != nil { -// return err -// } -// *b = byteString(buf) -// return nil -//} -// -//func (b byteString) Equal(other byteString) bool { -// return b == other -//} +func (b *byteString) MarshalTo(data []byte) (int, error) { + return copy(data, *b), nil +} + +func (b *byteString) Size() int { + return len(*b) +} + +func (b *byteString) Marshal() ([]byte, error) { + if b == nil { + return nil, fmt.Errorf("empty byte string") + } + return []byte(*b), nil +} + +func (b *byteString) Unmarshal(data []byte) error { + *b = byteString(data) + return nil +} + +func (b *byteString) Equal(other *byteString) bool { + if b != nil && other != nil { + return *b == *other + } + return b == nil && other == nil +} + +func (b *byteString) MarshalJSON() ([]byte, error) { + if b == nil { + return nil, fmt.Errorf("empty byte string") + } + return json.Marshal([]byte(*b)) +} + +func (b *byteString) UnmarshalJSON(data []byte) error { + var buf []byte + err := json.Unmarshal(data, &buf) + if err != nil { + return err + } + *b = byteString(buf) + return nil +} diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index 3e313fca..a403c4f8 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -10,8 +10,9 @@ import ( // ContainsCloserPeer returns true if the provided peer ID is among the // list of closer peers contained in this message. func (m *Message) ContainsCloserPeer(pid peer.ID) bool { + b := byteString(pid) for _, cp := range m.CloserPeers { - if cp.Id.Equal(byteString(pid)) { + if cp.Id.Equal(&b) { return true } } diff --git a/v2/pb/message_test.go b/v2/pb/message_test.go index 71f4abdc..f1ea0406 100644 --- a/v2/pb/message_test.go +++ b/v2/pb/message_test.go @@ -10,6 +10,6 @@ func TestBadAddrsDontReturnNil(t *testing.T) { addrs := mp.Addresses() if len(addrs) > 0 { - t.Fatal("shouldnt have any multiaddrs") + t.Fatal("shouldn't have any multiaddrs") } } diff --git a/v2/stream.go b/v2/stream.go index 60b3f59f..d30f6067 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -1,16 +1,19 @@ package dht import ( + "bufio" "context" "encoding/base64" "errors" "fmt" "io" + "sync" "time" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio" + "github.com/libp2p/go-msgio/protoio" "go.opencensus.io/stats" "go.opencensus.io/tag" "golang.org/x/exp/slog" @@ -118,7 +121,8 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { ) // 3. handle the message and gather response - resp, err := d.streamHandleMsg(ctx, slogger, s.Conn().RemotePeer(), req) + slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") + resp, err := d.handleMsg(ctx, s.Conn().RemotePeer(), req) if err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", time.Since(startTime)), slog.String("error", err.Error())) stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) @@ -203,25 +207,32 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data return &req, nil } -// streamHandleMsg handles the give protobuf message based on its type from the +// handleMsg handles the give protobuf message based on its type from the // given remote peer. -func (d *DHT) streamHandleMsg(ctx context.Context, slogger *slog.Logger, remote peer.ID, req *pb.Message) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.streamHandleMsg") +func (d *DHT) handleMsg(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + ctx, span := tracer.Start(ctx, "DHT.handle_"+req.GetType().String()) defer span.End() - slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") - switch req.GetType() { case pb.Message_FIND_NODE: return d.handleFindPeer(ctx, remote, req) case pb.Message_PING: return d.handlePing(ctx, remote, req) + case pb.Message_PUT_VALUE: + return d.handlePutValue(ctx, remote, req) + case pb.Message_GET_VALUE: + return d.handleGetValue(ctx, remote, req) + case pb.Message_ADD_PROVIDER: + return d.handleAddProvider(ctx, remote, req) + case pb.Message_GET_PROVIDERS: + return d.handleGetProviders(ctx, remote, req) + default: + return nil, fmt.Errorf("can't handle received message: %s", req.GetType().String()) } - - return nil, fmt.Errorf("can't handle received message: %s", req.GetType().String()) } -// streamWriteMsg sends the given message over the stream. +// streamWriteMsg sends the given message over the stream and handles traces +// and telemetry. func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s network.Stream, msg *pb.Message) error { ctx, span := tracer.Start(ctx, "DHT.streamWriteMsg") defer span.End() @@ -234,3 +245,37 @@ func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s networ return nil } + +// The Protobuf writer performs multiple small writes when writing a message. +// We need to buffer those writes, to make sure that we're not sending a new +// packet for every single write. +type bufferedDelimitedWriter struct { + *bufio.Writer + protoio.WriteCloser +} + +var writerPool = sync.Pool{ + New: func() interface{} { + w := bufio.NewWriter(nil) + return &bufferedDelimitedWriter{ + Writer: w, + WriteCloser: protoio.NewDelimitedWriter(w), + } + }, +} + +func writeMsg(w io.Writer, mes *pb.Message) error { + bw := writerPool.Get().(*bufferedDelimitedWriter) + bw.Reset(w) + err := bw.WriteMsg(mes) + if err == nil { + err = bw.Flush() + } + bw.Reset(nil) + writerPool.Put(bw) + return err +} + +func (w *bufferedDelimitedWriter) Flush() error { + return w.Writer.Flush() +} From 9f8f4a7100f20a5e538d6c4fde1f1e2974713f1d Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 18 Aug 2023 14:35:57 +0200 Subject: [PATCH 11/64] mark triage and gokad parts --- dht_bootstrap.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dht_bootstrap.go b/dht_bootstrap.go index 58eb88b4..c62cf0ad 100644 --- a/dht_bootstrap.go +++ b/dht_bootstrap.go @@ -14,11 +14,14 @@ var DefaultBootstrapPeers []multiaddr.Multiaddr // Minimum number of peers in the routing table. If we drop below this and we // see a new peer, we trigger a bootstrap round. +// GOKAD: config belongs to go-kademlia var minRTRefreshThreshold = 10 const ( + // GOKAD: config belongs to go-kademlia periodicBootstrapInterval = 2 * time.Minute - maxNBoostrappers = 2 + // GOKAD: config belongs to go-kademlia + maxNBoostrappers = 2 ) func init() { @@ -67,6 +70,7 @@ func (dht *IpfsDHT) Bootstrap(ctx context.Context) error { // // The returned channel will block until the refresh finishes, then yield the // error and close. The channel is buffered and safe to ignore. +// TRIAGE: needed or alternative? func (dht *IpfsDHT) RefreshRoutingTable() <-chan error { return dht.rtRefreshManager.Refresh(false) } @@ -76,6 +80,7 @@ func (dht *IpfsDHT) RefreshRoutingTable() <-chan error { // // The returned channel will block until the refresh finishes, then yield the // error and close. The channel is buffered and safe to ignore. +// TRIAGE: needed or alternative? func (dht *IpfsDHT) ForceRefresh() <-chan error { return dht.rtRefreshManager.Refresh(true) } From 3cdfb34e1f168a2614f45879344ef17284d358a1 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 18 Aug 2023 16:58:56 +0200 Subject: [PATCH 12/64] test put value handler --- v2/config.go | 7 +- v2/dht.go | 8 ++ v2/handlers.go | 105 ++++++++--------- v2/handlers_test.go | 269 +++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 333 insertions(+), 56 deletions(-) diff --git a/v2/config.go b/v2/config.go index 17b22707..064346e6 100644 --- a/v2/config.go +++ b/v2/config.go @@ -110,8 +110,11 @@ type Config struct { RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] // Datastore configures the DHT to use the specified datastore. The - // datastore must support batching and transactions. Defaults to a leveldb - // in-memory (temporary) map. + // datastore must support batching and transactions. If a datastore is + // configured by the user, they are responsible for the datastore's + // lifecycle - like closing it on shutdown. If the default datastore + // is used (this field stays nil), go-libp2p-kad-dht will handle that. + // Defaults to a leveldb in-memory (temporary) map. Datastore Datastore // Validator diff --git a/v2/dht.go b/v2/dht.go index 631b5d5e..c0f00d0c 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -127,6 +127,14 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } + // If the user hasn't configured a custom datastore, the responsibility is + // on us to properly clean up after ourselves. + if d.cfg.Datastore == nil { + if err := d.ds.Close(); err != nil { + d.log.With("err", err).Debug("failed closing default datastore") + } + } + // kill all active streams using the DHT protocol. for _, c := range d.host.Network().Conns() { for _, s := range c.GetStreams() { diff --git a/v2/handlers.go b/v2/handlers.go index 5e91c698..1ecafa96 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -61,58 +61,6 @@ func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) ( return &pb.Message{Type: pb.Message_PING}, nil } -// handleGetValue handles GET_VALUE RPCs from remote peers. -func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - k := req.GetKey() - if len(k) == 0 { - return nil, fmt.Errorf("handleGetValue but no key in request") - } - - // prepare the response message - resp := &pb.Message{ - Type: pb.Message_GET_VALUE, - Key: k, - CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), - } - - // fetch record from the datastore for the requested key - dsKey := ds.NewKey(base32.RawStdEncoding.EncodeToString(k)) - buf, err := d.ds.Get(ctx, dsKey) - if err != nil { - // if we don't have the record, that's fine, just return closer peers - if errors.Is(err, ds.ErrNotFound) { - return resp, nil - } - - return nil, err - } - - // we have found a record, parse it and do basic validation - rec := &recpb.Record{} - err = rec.Unmarshal(buf) - if err != nil { - return nil, fmt.Errorf("unmarshal stored record: %w", err) - } - - // validate that we don't serve stale records. - receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) - if err != nil || time.Since(receivedAt) > d.cfg.MaxRecordAge { - d.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("error", err.Error()), slog.Duration("age", time.Since(receivedAt))) - if err = d.ds.Delete(ctx, dsKey); err != nil { - d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("error", err.Error())) - } - } - - // We don't do any additional validation beyond checking the above - // timestamp. We put the burden of validating the record on the requester as - // checking a record may be computationally expensive. - - // finally, attach the record to the response - resp.Record = rec - - return resp, nil -} - func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { if len(req.GetKey()) == 0 { return nil, fmt.Errorf("no key was provided") @@ -164,6 +112,58 @@ func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Messag return req, nil } +// handleGetValue handles GET_VALUE RPCs from remote peers. +func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + k := req.GetKey() + if len(k) == 0 { + return nil, fmt.Errorf("handleGetValue but no key in request") + } + + // prepare the response message + resp := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: k, + CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + } + + // fetch record from the datastore for the requested key + dsKey := ds.NewKey(base32.RawStdEncoding.EncodeToString(k)) + buf, err := d.ds.Get(ctx, dsKey) + if err != nil { + // if we don't have the record, that's fine, just return closer peers + if errors.Is(err, ds.ErrNotFound) { + return resp, nil + } + + return nil, err + } + + // we have found a record, parse it and do basic validation + rec := &recpb.Record{} + err = rec.Unmarshal(buf) + if err != nil { + return nil, fmt.Errorf("unmarshal stored record: %w", err) + } + + // validate that we don't serve stale records. + receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) + if err != nil || time.Since(receivedAt) > d.cfg.MaxRecordAge { + d.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", err.Error()), slog.Duration("age", time.Since(receivedAt))) + if err = d.ds.Delete(ctx, dsKey); err != nil { + d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) + } + } + + // We don't do any additional validation beyond checking the above + // timestamp. We put the burden of validating the record on the requester as + // checking a record may be computationally expensive. + + // finally, attach the record to the response + resp.Record = rec + + return resp, nil +} + func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { k := req.GetKey() if len(k) > 80 { @@ -254,6 +254,7 @@ func (d *DHT) shouldReplaceExistingRecord(ctx context.Context, dstore ds.Read, n if err := d.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { return true, nil } + records := [][]byte{newRec.GetValue(), existingRec.GetValue()} i, err := d.validator.Select(string(newRec.GetKey()), records) if err != nil { diff --git a/v2/handlers_test.go b/v2/handlers_test.go index d0832b25..d69a48b2 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -4,12 +4,19 @@ import ( "context" "fmt" "math/rand" + "reflect" "strconv" + "sync" "testing" "time" + "github.com/gogo/protobuf/proto" + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" + ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" @@ -17,6 +24,10 @@ import ( "github.com/stretchr/testify/require" ) +const ( + testPath = path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") +) + var rng = rand.New(rand.NewSource(150)) func newTestDHT(t testing.TB) *DHT { @@ -46,11 +57,29 @@ func newTestDHT(t testing.TB) *DHT { } func newPeerID(t testing.TB) peer.ID { - _, pub, err := crypto.GenerateEd25519Key(rng) + id, _ := newIdentity(t) + return id +} + +func newIdentity(t testing.TB) (peer.ID, crypto.PrivKey) { + priv, pub, err := crypto.GenerateEd25519Key(rng) require.NoError(t, err) + id, err := peer.IDFromPublicKey(pub) require.NoError(t, err) - return id + + return id, priv +} + +func mustUnmarshalIpnsRecord(t *testing.T, data []byte) *ipns.Record { + r := &recpb.Record{} + err := r.Unmarshal(data) + require.NoError(t, err) + + rec, err := ipns.UnmarshalRecord(r.Value) + require.NoError(t, err) + + return rec } func TestMessage_noKey(t *testing.T) { @@ -386,3 +415,239 @@ func BenchmarkDHT_handlePing(b *testing.B) { } } } + +func newIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { + rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl) + require.NoError(t, err) + + remote, err := peer.IDFromPublicKey(priv.GetPublic()) + require.NoError(t, err) + + data, err := ipns.MarshalRecord(rec) + require.NoError(t, err) + + key := ipns.NameFromPeer(remote).RoutingKey() + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: key, + Record: &recpb.Record{ + Key: key, + Value: data, + }, + } + + return req +} + +func BenchmarkDHT_handlePutValue_unique_peers(b *testing.B) { + d := newTestDHT(b) + + // build requests + peers := make([]peer.ID, b.N) + reqs := make([]*pb.Message, b.N) + for i := 0; i < b.N; i++ { + remote, priv := newIdentity(b) + peers[i] = remote + reqs[i] = newIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + } + + ctx := context.Background() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := d.handlePutValue(ctx, peers[i], reqs[i]) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkDHT_handlePutValue_single_peer(b *testing.B) { + d := newTestDHT(b) + + // build requests + remote, priv := newIdentity(b) + reqs := make([]*pb.Message, b.N) + for i := 0; i < b.N; i++ { + reqs[i] = newIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + } + + ctx := context.Background() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := d.handlePutValue(ctx, remote, reqs[i]) + if err != nil { + b.Error(err) + } + } +} + +func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { + d := newTestDHT(t) + + remote, priv := newIdentity(t) + + // expired record + req := newIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + + ctx := context.Background() + _, err := d.ds.Get(ctx, datastoreKey(req.Key)) + require.ErrorIs(t, err, ds.ErrNotFound) + + cloned := proto.Clone(req).(*pb.Message) + _, err = d.handlePutValue(ctx, remote, cloned) + require.NoError(t, err) + + dat, err := d.ds.Get(ctx, datastoreKey(req.Key)) + require.NoError(t, err) + + r := &recpb.Record{} + err = r.Unmarshal(dat) + require.NoError(t, err) + + assert.NotEqual(t, r.TimeReceived, req.Record.TimeReceived) + + r.TimeReceived = "" + req.Record.TimeReceived = "" + + assert.True(t, reflect.DeepEqual(r, req.Record)) +} + +func TestDHT_handlePutValue_nil_record(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte("random-key"), + Record: nil, // nil record + } + + resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "nil record") +} + +func TestDHT_handlePutValue_record_key_mismatch(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte("key-1"), + Record: &recpb.Record{ + Key: []byte("key-2"), + }, + } + + resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "key doesn't match record key") +} + +func TestDHT_handlePutValue_bad_ipns_record(t *testing.T) { + d := newTestDHT(t) + + remote, priv := newIdentity(t) + + // expired record + req := newIPNSRequest(t, priv, 10, time.Now().Add(-time.Hour), -time.Hour) + + resp, err := d.handlePutValue(context.Background(), remote, req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "bad record") +} + +func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { + d := newTestDHT(t) + + remote, priv := newIdentity(t) + + goodReq := newIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + worseReq := newIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + + for i, req := range []*pb.Message{goodReq, worseReq} { + + resp, err := d.handlePutValue(context.Background(), remote, req) + switch i { + case 0: + assert.NoError(t, err) + assert.NotNil(t, resp) + case 1: + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "received worse record") + } + } +} + +func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { + // we're storing two sequential records simultaneously many times in a row. + // After each insert, we check that indeed the record with the higher + // sequence number was stored. If the handler didn't use transactions, + // this test fails. + + d := newTestDHT(t) + + remote, priv := newIdentity(t) + + ipnsKey := ipns.NameFromPeer(remote).RoutingKey() + + for i := 0; i < 100; i++ { + + req1 := newIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour) + req2 := newIPNSRequest(t, priv, uint64(2*i+1), time.Now().Add(time.Hour), time.Hour) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + _, _ = d.handlePutValue(context.Background(), remote, req1) + wg.Done() + }() + + wg.Add(1) + go func() { + _, err := d.handlePutValue(context.Background(), remote, req2) + assert.NoError(t, err) + wg.Done() + }() + wg.Wait() + + dat, err := d.ds.Get(context.Background(), datastoreKey(ipnsKey)) + require.NoError(t, err) + + storedRec := mustUnmarshalIpnsRecord(t, dat) + + seq, err := storedRec.Sequence() + require.NoError(t, err) + + // stored record must always be the newer one! + assert.EqualValues(t, 2*i+1, seq) + } +} + +func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) { + d := newTestDHT(t) + + remote, priv := newIdentity(t) + + req := newIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + + // store corrupt record + err := d.ds.Put(context.Background(), datastoreKey(req.Record.GetKey()), []byte("corrupt-record")) + require.NoError(t, err) + + // put the correct record through handler + _, err = d.handlePutValue(context.Background(), remote, req) + require.NoError(t, err) + + // check if the corrupt record was overwritten + dat, err := d.ds.Get(context.Background(), datastoreKey(req.Record.GetKey())) + require.NoError(t, err) + + mustUnmarshalIpnsRecord(t, dat) +} From c584b02cde2e95b0b15a4fd3fbdb0e2412aef1e3 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 18 Aug 2023 17:37:13 +0200 Subject: [PATCH 13/64] add get value tests --- v2/handlers.go | 19 +++- v2/handlers_test.go | 235 +++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 239 insertions(+), 15 deletions(-) diff --git a/v2/handlers.go b/v2/handlers.go index 1ecafa96..2ce4385f 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -61,6 +61,7 @@ func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) ( return &pb.Message{Type: pb.Message_PING}, nil } +// handleGetValue handles PUT_VALUE RPCs from remote peers. func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { if len(req.GetKey()) == 0 { return nil, fmt.Errorf("no key was provided") @@ -142,16 +143,28 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag rec := &recpb.Record{} err = rec.Unmarshal(buf) if err != nil { - return nil, fmt.Errorf("unmarshal stored record: %w", err) + // we have a corrupt record in the datastore -> delete it and pretend + // that we don't know about it + if err := d.ds.Delete(ctx, dsKey); err != nil { + d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) + } + + return resp, nil } // validate that we don't serve stale records. receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) if err != nil || time.Since(receivedAt) > d.cfg.MaxRecordAge { - d.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", err.Error()), slog.Duration("age", time.Since(receivedAt))) + errStr := "" + if err != nil { + errStr = err.Error() + } + + d.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) if err = d.ds.Delete(ctx, dsKey); err != nil { d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) } + return resp, nil } // We don't do any additional validation beyond checking the above @@ -164,6 +177,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag return resp, nil } +// handleGetProviders handles GET_PROVIDERS RPCs from remote peers. func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { k := req.GetKey() if len(k) > 80 { @@ -181,6 +195,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return resp, nil } +// handleAddProvider handles ADD_PROVIDER RPCs from remote peers. func (d *DHT) handleAddProvider(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { k := req.GetKey() if len(k) == 0 { diff --git a/v2/handlers_test.go b/v2/handlers_test.go index d69a48b2..cc015ca5 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -82,6 +82,24 @@ func mustUnmarshalIpnsRecord(t *testing.T, data []byte) *ipns.Record { return rec } +func fillRoutingTable(t testing.TB, d *DHT) { + // 250 is a common number of peers to have in the routing table + for i := 0; i < 250; i++ { + // generate peer ID + pid := newPeerID(t) + + // add peer to routing table + d.rt.AddNode(nodeID(pid)) + + // craft network address for peer + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + require.NoError(t, err) + + // add peer information to peer store + d.host.Peerstore().AddAddr(pid, a, time.Hour) + } +} + func TestMessage_noKey(t *testing.T) { d := newTestDHT(t) for _, typ := range []pb.Message_MessageType{ @@ -131,7 +149,8 @@ func BenchmarkDHT_handleFindPeer(b *testing.B) { reqs := make([]*pb.Message, b.N) for i := 0; i < b.N; i++ { reqs[i] = &pb.Message{ - Key: []byte("random-key-" + strconv.Itoa(i)), + Type: pb.Message_FIND_NODE, + Key: []byte("random-key-" + strconv.Itoa(i)), } } @@ -416,7 +435,7 @@ func BenchmarkDHT_handlePing(b *testing.B) { } } -func newIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { +func newPutIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl) require.NoError(t, err) @@ -431,8 +450,9 @@ func newIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time Type: pb.Message_PUT_VALUE, Key: key, Record: &recpb.Record{ - Key: key, - Value: data, + Key: key, + Value: data, + TimeReceived: time.Now().Format(time.RFC3339Nano), }, } @@ -448,7 +468,7 @@ func BenchmarkDHT_handlePutValue_unique_peers(b *testing.B) { for i := 0; i < b.N; i++ { remote, priv := newIdentity(b) peers[i] = remote - reqs[i] = newIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) } ctx := context.Background() @@ -470,7 +490,7 @@ func BenchmarkDHT_handlePutValue_single_peer(b *testing.B) { remote, priv := newIdentity(b) reqs := make([]*pb.Message, b.N) for i := 0; i < b.N; i++ { - reqs[i] = newIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) } ctx := context.Background() @@ -491,7 +511,7 @@ func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { remote, priv := newIdentity(t) // expired record - req := newIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + req := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) ctx := context.Background() _, err := d.ds.Get(ctx, datastoreKey(req.Key)) @@ -554,7 +574,7 @@ func TestDHT_handlePutValue_bad_ipns_record(t *testing.T) { remote, priv := newIdentity(t) // expired record - req := newIPNSRequest(t, priv, 10, time.Now().Add(-time.Hour), -time.Hour) + req := newPutIPNSRequest(t, priv, 10, time.Now().Add(-time.Hour), -time.Hour) resp, err := d.handlePutValue(context.Background(), remote, req) assert.Error(t, err) @@ -567,8 +587,8 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { remote, priv := newIdentity(t) - goodReq := newIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) - worseReq := newIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + goodReq := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + worseReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) for i, req := range []*pb.Message{goodReq, worseReq} { @@ -599,8 +619,8 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { for i := 0; i < 100; i++ { - req1 := newIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour) - req2 := newIPNSRequest(t, priv, uint64(2*i+1), time.Now().Add(time.Hour), time.Hour) + req1 := newPutIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour) + req2 := newPutIPNSRequest(t, priv, uint64(2*i+1), time.Now().Add(time.Hour), time.Hour) var wg sync.WaitGroup wg.Add(1) @@ -635,7 +655,7 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) remote, priv := newIdentity(t) - req := newIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + req := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) // store corrupt record err := d.ds.Put(context.Background(), datastoreKey(req.Record.GetKey()), []byte("corrupt-record")) @@ -651,3 +671,192 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) mustUnmarshalIpnsRecord(t, dat) } + +func BenchmarkDHT_handleGetValue(b *testing.B) { + d := newTestDHT(b) + + fillRoutingTable(b, d) + + // fill datastore and build requests + reqs := make([]*pb.Message, b.N) + peers := make([]peer.ID, b.N) + for i := 0; i < b.N; i++ { + peer, priv := newIdentity(b) + + putReq := newPutIPNSRequest(b, priv, 0, time.Now().Add(time.Hour), time.Hour) + + data, err := putReq.Record.Marshal() + require.NoError(b, err) + + err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + require.NoError(b, err) + + peers[i] = peer + reqs[i] = &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: putReq.GetKey(), + } + } + + ctx := context.Background() + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := d.handleGetValue(ctx, peers[b.N-i-1], reqs[i]) + if err != nil { + b.Error(err) + } + } +} + +func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { + d := newTestDHT(t) + + fillRoutingTable(t, d) + + remote, priv := newIdentity(t) + + putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + + data, err := putReq.Record.Marshal() + require.NoError(t, err) + + err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + require.NoError(t, err) + + getReq := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: putReq.GetKey(), + } + + resp, err := d.handleGetValue(context.Background(), remote, getReq) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, putReq.Key, resp.Key) + require.NotNil(t, putReq.Record) + assert.Equal(t, putReq.Record.String(), resp.Record.String()) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) +} + +func TestDHT_handleGetValue_record_not_found(t *testing.T) { + d := newTestDHT(t) + + fillRoutingTable(t, d) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte("unknown-record-key"), + } + + resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + assert.Nil(t, resp.Record) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) +} + +func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { + d := newTestDHT(t) + + fillRoutingTable(t, d) + + key := []byte("record-key") + + err := d.ds.Put(context.Background(), datastoreKey(key), []byte("corrupt-data")) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: key, + } + + resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + assert.Nil(t, resp.Record) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) + + // check that the record was deleted from the datastore + data, err := d.ds.Get(context.Background(), datastoreKey(key)) + assert.ErrorIs(t, err, ds.ErrNotFound) + assert.Len(t, data, 0) +} + +func TestDHT_handleGetValue_max_age_exceeded_record_in_datastore(t *testing.T) { + d := newTestDHT(t) + + fillRoutingTable(t, d) + + remote, priv := newIdentity(t) + + putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + + data, err := putReq.Record.Marshal() + require.NoError(t, err) + + err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: putReq.GetKey(), + } + + d.cfg.MaxRecordAge = 0 + + resp, err := d.handleGetValue(context.Background(), remote, req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + assert.Nil(t, resp.Record) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) + + // check that the record was deleted from the datastore + data, err = d.ds.Get(context.Background(), datastoreKey(putReq.GetKey())) + assert.ErrorIs(t, err, ds.ErrNotFound) + assert.Len(t, data, 0) +} + +func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { + d := newTestDHT(t) + + fillRoutingTable(t, d) + + remote, priv := newIdentity(t) + + // generate expired record (doesn't pass validation) + putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(-time.Hour), -time.Hour) + + data, err := putReq.Record.Marshal() + require.NoError(t, err) + + err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: putReq.GetKey(), + } + + resp, err := d.handleGetValue(context.Background(), remote, req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + require.NotNil(t, putReq.Record) + assert.Equal(t, putReq.Record.String(), resp.Record.String()) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) +} From 4dfcd8b6df44c4142fb84849752efa880c57015f Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 18 Aug 2023 17:41:52 +0200 Subject: [PATCH 14/64] remove unused methods --- v2/handlers_test.go | 10 ++--- v2/pb/message.go | 101 -------------------------------------------- 2 files changed, 5 insertions(+), 106 deletions(-) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index cc015ca5..20e711fe 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -28,7 +28,7 @@ const ( testPath = path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") ) -var rng = rand.New(rand.NewSource(150)) +var rng = rand.New(rand.NewSource(1337)) func newTestDHT(t testing.TB) *DHT { t.Helper() @@ -278,8 +278,8 @@ func TestDHT_handleFindPeer_unknown_addresses_but_in_routing_table(t *testing.T) d.host.Peerstore().AddAddr(pid, a, time.Hour) } - _, pubk, _ := crypto.GenerateEd25519Key(rng) - requester, err := peer.IDFromPublicKey(pubk) + _, pub, _ := crypto.GenerateEd25519Key(rng) + requester, err := peer.IDFromPublicKey(pub) require.NoError(t, err) req := &pb.Message{ @@ -681,7 +681,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { reqs := make([]*pb.Message, b.N) peers := make([]peer.ID, b.N) for i := 0; i < b.N; i++ { - peer, priv := newIdentity(b) + pid, priv := newIdentity(b) putReq := newPutIPNSRequest(b, priv, 0, time.Now().Add(time.Hour), time.Hour) @@ -691,7 +691,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) require.NoError(b, err) - peers[i] = peer + peers[i] = pid reqs[i] = &pb.Message{ Type: pb.Message_GET_VALUE, Key: putReq.GetKey(), diff --git a/v2/pb/message.go b/v2/pb/message.go index 86d84cf0..ab1db7f1 100644 --- a/v2/pb/message.go +++ b/v2/pb/message.go @@ -17,104 +17,3 @@ func FromAddrInfo(p peer.AddrInfo) Message_Peer { return mp } - -//type PeerRoutingInfo struct { -// peer.AddrInfo -// network.Connectedness -//} - -//func peerRoutingInfoToPBPeer(p PeerRoutingInfo) Message_Peer { -// var pbp Message_Peer -// -// pbp.Addrs = make([][]byte, len(p.Addrs)) -// for i, maddr := range p.Addrs { -// pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. -// } -// pbp.Id = byteString(p.ID) -// pbp.Connection = ConnectionType(p.Connectedness) -// return pbp -//} - -//// PBPeerToPeer turns a *Message_Peer into its peer.AddrInfo counterpart -//func PBPeerToPeerInfo(pbp Message_Peer) peer.AddrInfo { -// return peer.AddrInfo{ -// ID: peer.ID(pbp.Id), -// Addrs: pbp.Addresses(), -// } -//} -// -//// RawPeerInfosToPBPeers converts a slice of Peers into a slice of *Message_Peers, -//// ready to go out on the wire. -//func RawPeerInfosToPBPeers(peers []peer.AddrInfo) []Message_Peer { -// pbpeers := make([]Message_Peer, len(peers)) -// for i, p := range peers { -// pbpeers[i] = peerInfoToPBPeer(p) -// } -// return pbpeers -//} -// -//// PeersToPBPeers converts given []peer.Peer into a set of []*Message_Peer, -//// which can be written to a message and sent out. the key thing this function -//// does (in addition to PeersToPBPeers) is set the ConnectionType with -//// information from the given network.Network. -//func PeerInfosToPBPeers(n network.Network, peers []peer.AddrInfo) []Message_Peer { -// pbps := RawPeerInfosToPBPeers(peers) -// for i, pbp := range pbps { -// c := ConnectionType(n.Connectedness(peers[i].ID)) -// pbp.Connection = c -// } -// return pbps -//} -// -//func PeerRoutingInfosToPBPeers(peers []PeerRoutingInfo) []Message_Peer { -// pbpeers := make([]Message_Peer, len(peers)) -// for i, p := range peers { -// pbpeers[i] = peerRoutingInfoToPBPeer(p) -// } -// return pbpeers -//} -// -//// PBPeersToPeerInfos converts given []*Message_Peer into []peer.AddrInfo -//// Invalid addresses will be silently omitted. -//func PBPeersToPeerInfos(pbps []Message_Peer) []*peer.AddrInfo { -// peers := make([]*peer.AddrInfo, 0, len(pbps)) -// for _, pbp := range pbps { -// ai := PBPeerToPeerInfo(pbp) -// peers = append(peers, &ai) -// } -// return peers -//} - -//// fromConnectedness returns a Message_ConnectionType associated with the -//// network.Connectedness. -//func fromConnectedness(c network.Connectedness) Message_ConnectionType { -// switch c { -// case network.NotConnected: -// return Message_NOT_CONNECTED -// case network.Connected: -// return Message_CONNECTED -// case network.CanConnect: -// return Message_CAN_CONNECT -// case network.CannotConnect: -// return Message_CANNOT_CONNECT -// default: -// return Message_NOT_CONNECTED -// } -//} -// -//// Connectedness returns a network.Connectedness associated with the given -//// Message_ConnectionType. -//func Connectedness(c Message_ConnectionType) network.Connectedness { -// switch c { -// case Message_NOT_CONNECTED: -// return network.NotConnected -// case Message_CONNECTED: -// return network.Connected -// case Message_CAN_CONNECT: -// return network.CanConnect -// case Message_CANNOT_CONNECT: -// return network.CannotConnect -// default: -// return network.NotConnected -// } -//} From 0af762ca41374e36b7744a70a19945fc129e17da Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 21 Aug 2023 09:30:42 +0200 Subject: [PATCH 15/64] update handlers --- v2/handlers.go | 48 +++++++++++++++++++++++++++++-------------- v2/handlers_test.go | 2 +- v2/pb/dht.aux.go | 18 ++++++++++++++++ v2/pb/message_test.go | 10 ++++----- 4 files changed, 57 insertions(+), 21 deletions(-) diff --git a/v2/handlers.go b/v2/handlers.go index 2ce4385f..2327e6d2 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -110,7 +110,7 @@ func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("committing new record to datastore: %w", err) } - return req, nil + return nil, nil } // handleGetValue handles GET_VALUE RPCs from remote peers. @@ -177,6 +177,30 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag return resp, nil } +// handleAddProvider handles ADD_PROVIDER RPCs from remote peers. +func (d *DHT) handleAddProvider(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + k := req.GetKey() + if len(k) > 80 { + return nil, fmt.Errorf("handleAddProvider key size too large") + } else if len(k) == 0 { + return nil, fmt.Errorf("handleAddProvider key is empty") + } + + for _, addrInfo := range req.ProviderAddrInfos() { + if addrInfo.ID == remote { + continue + } + + if len(addrInfo.Addrs) == 0 { + continue + } + + // TODO: store + } + + return nil, nil +} + // handleGetProviders handles GET_PROVIDERS RPCs from remote peers. func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { k := req.GetKey() @@ -186,24 +210,18 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("handleGetProviders key is empty") } + // TODO: fetch providers + resp := &pb.Message{ - Type: pb.Message_GET_PROVIDERS, - Key: k, - CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + Type: pb.Message_GET_PROVIDERS, + Key: k, + CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + ProviderPeers: nil, // TODO: Fill } return resp, nil } -// handleAddProvider handles ADD_PROVIDER RPCs from remote peers. -func (d *DHT) handleAddProvider(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - k := req.GetKey() - if len(k) == 0 { - return nil, fmt.Errorf("handleAddProvider but no key in request") - } - return nil, nil -} - // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []pb.Message_Peer { @@ -248,8 +266,8 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 // existing one in the local datastore. It queries the datastore, unmarshalls // the record, validates it, and compares it to the incoming record. If the // incoming one is "better" (e.g., just newer), this function returns true. -// If unmarshalling or validation fails, this function also returns true because -// the existing record should be replaced. +// If unmarshalling or validation fails, this function (alongside an error) also +// returns true because the existing record should be replaced. func (d *DHT) shouldReplaceExistingRecord(ctx context.Context, dstore ds.Read, newRec *recpb.Record) (bool, error) { ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") defer span.End() diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 20e711fe..f1f3e1a7 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -596,7 +596,7 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { switch i { case 0: assert.NoError(t, err) - assert.NotNil(t, resp) + assert.Nil(t, resp) case 1: assert.Error(t, err) assert.Nil(t, resp) diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index a403c4f8..c8a887d8 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -19,6 +19,24 @@ func (m *Message) ContainsCloserPeer(pid peer.ID) bool { return false } +// ProviderAddrInfos returns the peer.AddrInfo's of the provider peers in this +// message. +func (m *Message) ProviderAddrInfos() []peer.AddrInfo { + if m == nil { + return nil + } + + addrInfos := make([]peer.AddrInfo, 0, len(m.ProviderPeers)) + for _, p := range m.ProviderPeers { + addrInfos = append(addrInfos, peer.AddrInfo{ + ID: peer.ID(p.Id), + Addrs: p.Addresses(), + }) + } + + return addrInfos +} + // Addresses returns the Multiaddresses associated with the Message_Peer entry func (m *Message_Peer) Addresses() []ma.Multiaddr { if m == nil { diff --git a/v2/pb/message_test.go b/v2/pb/message_test.go index f1ea0406..f092e8fd 100644 --- a/v2/pb/message_test.go +++ b/v2/pb/message_test.go @@ -4,12 +4,12 @@ import ( "testing" ) -func TestBadAddrsDontReturnNil(t *testing.T) { - mp := new(Message_Peer) - mp.Addrs = [][]byte{[]byte("NOT A VALID MULTIADDR")} +func TestMessage_Peer_invalid_maddr(t *testing.T) { + msg := Message_Peer{ + Addrs: [][]byte{[]byte("invalid-maddr")}, + } - addrs := mp.Addresses() - if len(addrs) > 0 { + if len(msg.Addresses()) > 0 { t.Fatal("shouldn't have any multiaddrs") } } From 741843bead0f9019eac03031fa51303d3546771d Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 10:33:17 +0200 Subject: [PATCH 16/64] introduce backend concept --- v2/backend.go | 498 ++++++++++++++++++++++++++++++++++++++++++++ v2/config.go | 32 +-- v2/dht.go | 54 +++-- v2/handlers.go | 193 +++++++---------- v2/handlers_test.go | 84 +++++--- v2/pb/dht.aux.go | 2 +- v2/stream.go | 8 +- 7 files changed, 671 insertions(+), 200 deletions(-) create mode 100644 v2/backend.go diff --git a/v2/backend.go b/v2/backend.go new file mode 100644 index 00000000..7c0105ae --- /dev/null +++ b/v2/backend.go @@ -0,0 +1,498 @@ +package dht + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "path" + "strings" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/ipfs/boxo/ipns" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/autobatch" + dsq "github.com/ipfs/go-datastore/query" + record "github.com/libp2p/go-libp2p-record" + recpb "github.com/libp2p/go-libp2p-record/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/multiformats/go-base32" + "golang.org/x/exp/slog" +) + +// Default namespaces +const ( + namespaceIPNS = "ipns" + namespacePublicKey = "pk" + namespaceProviders = "providers" +) + +type Backend interface { + Store(ctx context.Context, key string, value any) (any, error) + Fetch(ctx context.Context, key string) (any, error) +} + +func NewValueBackend(namespace string, datastore ds.TxnDatastore, validator record.Validator) *RecordBackend { + return &RecordBackend{ + namespace: namespace, + datastore: datastore, + validator: validator, + } +} + +func NewIPNSBackend(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *ValueBackendConfig) *RecordBackend { + return &RecordBackend{ + cfg: cfg, + log: cfg.Logger, + namespace: namespaceIPNS, + datastore: ds, + validator: ipns.Validator{KeyBook: kb}, + } +} + +func NewPublicKeyBackend(ds ds.TxnDatastore, cfg *ValueBackendConfig) *RecordBackend { + if cfg == nil { + cfg = DefaultValueBackendConfig() + } + + return &RecordBackend{ + cfg: cfg, + log: cfg.Logger, + namespace: namespacePublicKey, + datastore: ds, + validator: record.PublicKeyValidator{}, + } +} + +type RecordBackend struct { + cfg *ValueBackendConfig + log *slog.Logger + namespace string + datastore ds.TxnDatastore + validator record.Validator +} + +var _ Backend = (*RecordBackend)(nil) + +type ValueBackendConfig struct { + MaxRecordAge time.Duration + Logger *slog.Logger +} + +func DefaultValueBackendConfig() *ValueBackendConfig { + return &ValueBackendConfig{ + Logger: slog.Default(), + MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md + } +} + +func (v *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { + rec, ok := value.(*recpb.Record) + if !ok { + return nil, fmt.Errorf("expected *recpb.Record value type, got: %T", value) + } + + if key != string(rec.GetKey()) { + return nil, fmt.Errorf("key doesn't match record key") + } + + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if v.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) + } + + dsKey := newDatastoreKey(v.namespace, suffix) + + if err := v.validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { + return nil, fmt.Errorf("put bad record: %w", err) + } + + txn, err := v.datastore.NewTransaction(ctx, false) + if err != nil { + return nil, fmt.Errorf("new transaction: %w", err) + } + defer txn.Discard(ctx) // discard is a no-op if txn was committed beforehand + + shouldReplace, err := v.shouldReplaceExistingRecord(ctx, txn, dsKey, rec) + if err != nil { + return nil, fmt.Errorf("checking datastore for better record: %w", err) + } else if !shouldReplace { + return nil, fmt.Errorf("received worse record") + } + + // avoid storing arbitrary data, so overwrite that field + rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + + data, err := rec.Marshal() + if err != nil { + return nil, fmt.Errorf("marshal incoming record: %w", err) + } + + if err = txn.Put(ctx, dsKey, data); err != nil { + return nil, fmt.Errorf("storing record in datastore: %w", err) + } + + if err = txn.Commit(ctx); err != nil { + return nil, fmt.Errorf("committing new record to datastore: %w", err) + } + + return rec, nil +} + +func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if v.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) + } + + dsKey := newDatastoreKey(v.namespace, suffix) + + // fetch record from the datastore for the requested key + buf, err := v.datastore.Get(ctx, dsKey) + if err != nil { + return nil, err + } + + // we have found a record, parse it and do basic validation + rec := &recpb.Record{} + err = rec.Unmarshal(buf) + if err != nil { + // we have a corrupt record in the datastore -> delete it and pretend + // that we don't know about it + if err := v.datastore.Delete(ctx, dsKey); err != nil { + v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) + } + + return nil, nil + } + + // validate that we don't serve stale records. + receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) + if err != nil || time.Since(receivedAt) > v.cfg.MaxRecordAge { + errStr := "" + if err != nil { + errStr = err.Error() + } + + v.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) + if err = v.datastore.Delete(ctx, dsKey); err != nil { + v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) + } + return nil, nil + } + + // We don't do any additional validation beyond checking the above + // timestamp. We put the burden of validating the record on the requester as + // checking a record may be computationally expensive. + + return rec, nil +} + +// shouldReplaceExistingRecord returns true if the given record should replace any +// existing one in the local datastore. It queries the datastore, unmarshalls +// the record, validates it, and compares it to the incoming record. If the +// incoming one is "better" (e.g., just newer), this function returns true. +// If unmarshalling or validation fails, this function (alongside an error) also +// returns true because the existing record should be replaced. +func (v *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, newRec *recpb.Record) (bool, error) { + ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") + defer span.End() + + existingBytes, err := txn.Get(ctx, dsKey) + if errors.Is(err, ds.ErrNotFound) { + return true, nil + } else if err != nil { + return false, fmt.Errorf("getting record from datastore: %w", err) + } + + existingRec := &recpb.Record{} + if err := existingRec.Unmarshal(existingBytes); err != nil { + return true, nil + } + + if err := v.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { + return true, nil + } + + records := [][]byte{newRec.GetValue(), existingRec.GetValue()} + i, err := v.validator.Select(string(newRec.GetKey()), records) + if err != nil { + return false, fmt.Errorf("record selection: %w", err) + } else if i != 0 { + return false, nil + } + + return true, nil +} + +type ProviderBackend struct { + namespace string + cfg *ProviderBackendConfig + log *slog.Logger + cache *lru.Cache[string, providerSet] + peerstore peerstore.Peerstore + datastore *autobatch.Datastore + gcSkip sync.Map +} + +type ProviderBackendConfig struct { + ProvideValidity time.Duration + AddressTTL time.Duration + BatchSize int + CacheSize int + Logger *slog.Logger +} + +func DefaultProviderBackendConfig() *ProviderBackendConfig { + return &ProviderBackendConfig{ + ProvideValidity: time.Hour * 48, + AddressTTL: 24 * time.Hour, + BatchSize: 256, // MAGIC + CacheSize: 256, // MAGIC + Logger: slog.Default(), + } +} + +func NewProviderBackend(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProviderBackendConfig) (*ProviderBackend, error) { + if cfg == nil { + cfg = DefaultProviderBackendConfig() + } + + cache, err := lru.New[string, providerSet](cfg.CacheSize) + if err != nil { + return nil, err + } + + p := &ProviderBackend{ + cfg: cfg, + log: cfg.Logger, + cache: cache, + namespace: namespaceProviders, + peerstore: pstore, + datastore: autobatch.NewAutoBatching(dstore, cfg.BatchSize), + } + + return p, nil +} + +var _ Backend = (*ProviderBackend)(nil) + +func (p *ProviderBackend) Store(ctx context.Context, key string, value any) (any, error) { + addrInfo, ok := value.(peer.AddrInfo) + if !ok { + return nil, fmt.Errorf("expected peer.AddrInfo value type, got: %T", value) + } + + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if p.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) + } + + rec := expiryRecord{ + expiry: time.Now(), + } + + if provs, ok := p.cache.Get(key); ok { + provs.setVal(addrInfo, rec.expiry) + } + + p.peerstore.AddAddrs(addrInfo.ID, addrInfo.Addrs, p.cfg.AddressTTL) + + dsKey := newDatastoreKey(ns, suffix, string(addrInfo.ID)) + + _, found := p.gcSkip.LoadOrStore(dsKey.String(), struct{}{}) + + if err := p.datastore.Put(ctx, dsKey, rec.MarshalBinary()); err != nil { + p.cache.Remove(key) + + // if we have just added the key to the gc skip list, delete it again + // if we have added it in a previous Store invocation, keep it around + if !found { + p.gcSkip.Delete(dsKey.String()) + } + + return nil, fmt.Errorf("datastore put: %w", err) + } + + return addrInfo, nil +} + +func (p *ProviderBackend) Fetch(ctx context.Context, key string) (any, error) { + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if p.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) + } + + if cached, ok := p.cache.Get(key); ok { + return cached, nil + } + + qKey := newDatastoreKey(ns, suffix) + q, err := p.datastore.Query(ctx, dsq.Query{Prefix: qKey.String()}) + if err != nil { + return nil, err + } + + defer func() { + if err = q.Close(); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "failed closing fetch query", slog.String("err", err.Error())) + } + }() + + now := time.Now() + out := newProviderSet() + + for e := range q.Next() { + if e.Error != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Fetch datastore entry contains error", slog.String("key", e.Key), slog.String("err", e.Error.Error())) + continue + } + + rec := expiryRecord{} + if err = rec.UnmarshalBinary(e.Value); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Fetch provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) + p.delete(ctx, ds.RawKey(e.Key)) + continue + } else if now.Sub(rec.expiry) > p.cfg.ProvideValidity { + // record is expired + p.delete(ctx, ds.RawKey(e.Key)) + continue + } + + idx := strings.LastIndex(e.Key, "/") + binPeerID, err := base32.RawStdEncoding.DecodeString(e.Key[idx+1:]) + if err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "base32 key decoding error", slog.String("key", e.Key), slog.String("err", err.Error())) + p.delete(ctx, ds.RawKey(e.Key)) + continue + } + + addrInfo := p.peerstore.PeerInfo(peer.ID(binPeerID)) + + out.setVal(addrInfo, rec.expiry) + } + + if len(out.providers) > 0 { + p.cache.Add(key, *out) + } + + return out, nil +} + +func (p *ProviderBackend) CollectGarbage(ctx context.Context) { + // Faster to purge than garbage collecting + p.cache.Purge() + + p.gcSkip = sync.Map{} + + // Now, kick off a GC of the datastore. + q, err := p.datastore.Query(ctx, dsq.Query{Prefix: p.namespace}) + if err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "provider record garbage collection query failed", slog.String("err", err.Error())) + return + } + + defer func() { + if err = q.Close(); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "failed closing garbage collection query", slog.String("err", err.Error())) + } + }() + + for e := range q.Next() { + if e.Error != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection datastore entry contains error", slog.String("key", e.Key), slog.String("err", e.Error.Error())) + continue + } + + if _, found := p.gcSkip.Load(e.Key); found { + continue + } + + rec := expiryRecord{} + if err = rec.UnmarshalBinary(e.Value); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) + p.delete(ctx, ds.RawKey(e.Key)) + } else if time.Now().Sub(rec.expiry) <= p.cfg.ProvideValidity { + continue + } + + // record expired -> garbage collect + p.delete(ctx, ds.RawKey(e.Key)) + } +} + +func (p *ProviderBackend) delete(ctx context.Context, dsKey ds.Key) { + if err := p.datastore.Delete(ctx, dsKey); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "failed to remove provider record from disk", slog.String("key", dsKey.String()), slog.String("err", err.Error())) + } +} + +type expiryRecord struct { + expiry time.Time +} + +func (e *expiryRecord) MarshalBinary() (data []byte) { + buf := make([]byte, 16) + n := binary.PutVarint(buf, e.expiry.UnixNano()) + return buf[:n] +} + +func (e *expiryRecord) UnmarshalBinary(data []byte) error { + nsec, n := binary.Varint(data) + if n == 0 { + return fmt.Errorf("failed to parse time") + } + + e.expiry = time.Unix(0, nsec) + + return nil +} + +type providerSet struct { + providers []peer.AddrInfo + set map[peer.ID]time.Time +} + +func newProviderSet() *providerSet { + return &providerSet{ + providers: []peer.AddrInfo{}, + set: make(map[peer.ID]time.Time), + } +} + +func (ps *providerSet) setVal(addrInfo peer.AddrInfo, t time.Time) { + _, found := ps.set[addrInfo.ID] + if !found { + ps.providers = append(ps.providers, addrInfo) + } + + ps.set[addrInfo.ID] = t +} + +func newDatastoreKey(namespace string, binStrs ...string) ds.Key { + elems := make([]string, len(binStrs)+1) + elems[0] = namespace + for i, bin := range binStrs { + elems[i+1] = base32.RawStdEncoding.EncodeToString([]byte(bin)) + } + return ds.NewKey("/" + path.Join(elems...)) +} diff --git a/v2/config.go b/v2/config.go index 064346e6..2db21960 100644 --- a/v2/config.go +++ b/v2/config.go @@ -4,12 +4,9 @@ import ( "fmt" "time" - "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" leveldb "github.com/ipfs/go-ds-leveldb" logging "github.com/ipfs/go-log/v2" - record "github.com/libp2p/go-libp2p-record" - "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" @@ -109,16 +106,8 @@ type Config struct { // about the local node. RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] - // Datastore configures the DHT to use the specified datastore. The - // datastore must support batching and transactions. If a datastore is - // configured by the user, they are responsible for the datastore's - // lifecycle - like closing it on shutdown. If the default datastore - // is used (this field stays nil), go-libp2p-kad-dht will handle that. - // Defaults to a leveldb in-memory (temporary) map. - Datastore Datastore - - // Validator - Validator record.Validator + // Backends ... + Backends map[string]Backend // Logger can be used to configure a custom structured logger instance. // By default go.uber.org/zap is used (wrapped in ipfs/go-log). @@ -144,8 +133,6 @@ func DefaultConfig() *Config { Kademlia: coord.DefaultConfig(), BucketSize: 20, ProtocolID: "/ipfs/kad/1.0.0", - Datastore: nil, // nil because the initialization of a datastore can fail. An in-memory leveldb datastore will be used if this field is nil. - Validator: nil, // nil because the default validator requires a peerstore.KeyBook. RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md @@ -153,17 +140,6 @@ func DefaultConfig() *Config { } } -// DefaultValidator returns a namespaced validator that can validate both public -// key (under the "pk" namespace) and IPNS records (under the "ipns" namespace). -// The validator can't be initialized in DefaultConfig because it requires -// access to a peerstore.KeyBook for the IPNS validator. -func DefaultValidator(kb peerstore.KeyBook) record.Validator { - return record.NamespacedValidator{ - "pk": record.PublicKeyValidator{}, - "ipns": ipns.Validator{KeyBook: kb}, - } -} - // DefaultRoutingTable returns a triert.TrieRT routing table. This routing table // cannot be initialized in DefaultConfig because it requires information about // the local peer. @@ -181,6 +157,10 @@ func DefaultDatastore() (Datastore, error) { return leveldb.NewDatastore("", nil) } +func (c *Config) RegisterBackend(namespace string, backend Backend) { + c.Backends[namespace] = backend +} + // Validate validates the configuration struct it is called on. It returns // an error if any configuration issue was detected and nil if this is // a valid configuration. diff --git a/v2/dht.go b/v2/dht.go index c0f00d0c..a48d0938 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -4,7 +4,6 @@ import ( "fmt" "sync" - record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -21,9 +20,6 @@ type DHT struct { // host holds a reference to the underlying libp2p host host host.Host - // ds is the datastore where provider, peer, and IPNS records are stored - ds Datastore - // cfg holds a reference to the DHT configuration struct cfg *Config @@ -40,8 +36,8 @@ type DHT struct { // configured via the Config struct. rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] - // validator . - validator record.Validator + // backends + backends map[string]Backend // log is a convenience accessor to the logging instance. It gets the value // of the logger field from the configuration. @@ -78,18 +74,30 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("new trie routing table: %w", err) } - // Use configured validator if it was provided - if cfg.Validator != nil { - d.validator = cfg.Validator + if len(cfg.Backends) != 0 { + d.backends = cfg.Backends } else { - d.validator = DefaultValidator(h.Peerstore()) - } + dstore, err := DefaultDatastore() + if err != nil { + return nil, fmt.Errorf("new default datastore: %w", err) + } + + pbeCfg := DefaultProviderBackendConfig() + pbeCfg.Logger = cfg.Logger + + pbe, err := NewProviderBackend(h.Peerstore(), dstore, pbeCfg) + if err != nil { + return nil, fmt.Errorf("new provider backend: %w", err) + } + + vbeCfg := DefaultValueBackendConfig() + vbeCfg.Logger = cfg.Logger - // Use configured datastore or default leveldb in-memory one - if cfg.Datastore != nil { - d.ds = cfg.Datastore - } else if d.ds, err = DefaultDatastore(); err != nil { - return nil, fmt.Errorf("new default datastore: %w", err) + d.backends = map[string]Backend{ + "ipns": NewIPNSBackend(dstore, h.Peerstore(), vbeCfg), + "pk": NewPublicKeyBackend(dstore, vbeCfg), + "providers": pbe, + } } // instantiate a new Kademlia DHT coordinator. @@ -127,13 +135,13 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } - // If the user hasn't configured a custom datastore, the responsibility is - // on us to properly clean up after ourselves. - if d.cfg.Datastore == nil { - if err := d.ds.Close(); err != nil { - d.log.With("err", err).Debug("failed closing default datastore") - } - } + //// If the user hasn't configured a custom datastore, the responsibility is + //// on us to properly clean up after ourselves. + //if d.cfg.Datastore == nil { + // if err := d.ds.Close(); err != nil { + // d.log.With("err", err).Debug("failed closing default datastore") + // } + //} // kill all active streams using the DHT protocol. for _, c := range d.host.Network().Conns() { diff --git a/v2/handlers.go b/v2/handlers.go index 2327e6d2..ca9773d4 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -1,16 +1,14 @@ package dht import ( - "bytes" "context" "errors" "fmt" - "time" ds "github.com/ipfs/go-datastore" + record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-base32" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" @@ -63,7 +61,8 @@ func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) ( // handleGetValue handles PUT_VALUE RPCs from remote peers. func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - if len(req.GetKey()) == 0 { + k := string(req.GetKey()) + if len(k) == 0 { return nil, fmt.Errorf("no key was provided") } @@ -72,50 +71,25 @@ func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("nil record") } - if !bytes.Equal(req.GetKey(), rec.GetKey()) { - return nil, fmt.Errorf("key doesn't match record key") - } - - // avoid storing arbitrary data - rec.TimeReceived = "" - - if err := d.validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { - return nil, fmt.Errorf("put bad record: %w", err) - } - - txn, err := d.ds.NewTransaction(ctx, false) + // key is /$namespace/$binary_id + ns, _, err := record.SplitKey(k) // get namespace (prefix of the key) if err != nil { - return nil, fmt.Errorf("new transaction: %w", err) + return nil, fmt.Errorf("invalid key %s: %w", k, err) } - defer txn.Discard(ctx) // discard is a no-op if committed beforehand - shouldReplace, err := d.shouldReplaceExistingRecord(ctx, txn, rec) - if err != nil { - return nil, fmt.Errorf("checking datastore for better record: %w", err) - } else if !shouldReplace { - return nil, fmt.Errorf("received worse record") - } - - rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) - data, err := rec.Marshal() - if err != nil { - return nil, fmt.Errorf("marshal incoming record: %w", err) + backend, found := d.backends[ns] + if !found { + return nil, fmt.Errorf("unsupported key namespace: %s", ns) } - if err = txn.Put(ctx, datastoreKey(rec.GetKey()), data); err != nil { - return nil, fmt.Errorf("storing record in datastore: %w", err) - } - - if err = txn.Commit(ctx); err != nil { - return nil, fmt.Errorf("committing new record to datastore: %w", err) - } + _, err = backend.Store(ctx, k, rec) - return nil, nil + return nil, err } // handleGetValue handles GET_VALUE RPCs from remote peers. func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - k := req.GetKey() + k := string(req.GetKey()) if len(k) == 0 { return nil, fmt.Errorf("handleGetValue but no key in request") } @@ -123,79 +97,79 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag // prepare the response message resp := &pb.Message{ Type: pb.Message_GET_VALUE, - Key: k, - CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + Key: req.GetKey(), + CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(req.GetKey())), } - // fetch record from the datastore for the requested key - dsKey := ds.NewKey(base32.RawStdEncoding.EncodeToString(k)) - buf, err := d.ds.Get(ctx, dsKey) + ns, _, err := record.SplitKey(k) // get namespace (prefix of the key) if err != nil { - // if we don't have the record, that's fine, just return closer peers - if errors.Is(err, ds.ErrNotFound) { - return resp, nil - } + return nil, fmt.Errorf("invalid key %s: %w", k, err) + } - return nil, err + backend, found := d.backends[ns] + if !found { + return nil, fmt.Errorf("unsupported record type: %s", ns) } - // we have found a record, parse it and do basic validation - rec := &recpb.Record{} - err = rec.Unmarshal(buf) + fetched, err := backend.Fetch(ctx, k) if err != nil { - // we have a corrupt record in the datastore -> delete it and pretend - // that we don't know about it - if err := d.ds.Delete(ctx, dsKey); err != nil { - d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) + if errors.Is(err, ds.ErrNotFound) { + return resp, nil } + return nil, fmt.Errorf("fetch record for key %s: %w", k, err) + } else if fetched == nil { + return resp, nil + } + rec, ok := fetched.(*recpb.Record) + if ok { + resp.Record = rec return resp, nil } - // validate that we don't serve stale records. - receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) - if err != nil || time.Since(receivedAt) > d.cfg.MaxRecordAge { - errStr := "" - if err != nil { - errStr = err.Error() + pset, ok := fetched.(*providerSet) + if ok { + resp.ProviderPeers = make([]pb.Message_Peer, len(pset.providers)) + for i, p := range pset.providers { + resp.ProviderPeers[i] = pb.FromAddrInfo(p) } - d.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) - if err = d.ds.Delete(ctx, dsKey); err != nil { - d.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) - } return resp, nil } - // We don't do any additional validation beyond checking the above - // timestamp. We put the burden of validating the record on the requester as - // checking a record may be computationally expensive. - - // finally, attach the record to the response - resp.Record = rec - - return resp, nil + return nil, fmt.Errorf("expected *recpb.Record or *providerSet value type, got: %T", pset) } // handleAddProvider handles ADD_PROVIDER RPCs from remote peers. func (d *DHT) handleAddProvider(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - k := req.GetKey() + k := string(req.GetKey()) if len(k) > 80 { - return nil, fmt.Errorf("handleAddProvider key size too large") + return nil, fmt.Errorf("key size too large") } else if len(k) == 0 { - return nil, fmt.Errorf("handleAddProvider key is empty") + return nil, fmt.Errorf("key is empty") + } + + backend, ok := d.cfg.Backends[namespaceProviders] + if !ok { + return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) } for _, addrInfo := range req.ProviderAddrInfos() { - if addrInfo.ID == remote { + addrInfo := addrInfo // TODO: remove after go.mod was updated to go 1.21 + + if addrInfo.ID != remote { + d.log.Debug("remote attempted to store provider record for other peer", "remote", remote, "other", addrInfo.ID) continue } if len(addrInfo.Addrs) == 0 { + d.log.Debug("no valid addresses for provider", "remote", addrInfo.ID) continue } - // TODO: store + if _, err := backend.Store(ctx, k, addrInfo); err != nil { + return nil, fmt.Errorf("storing provider record: %w", err) + } } return nil, nil @@ -210,13 +184,31 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("handleGetProviders key is empty") } - // TODO: fetch providers + backend, ok := d.cfg.Backends[namespaceProviders] + if !ok { + return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) + } + + fetched, err := backend.Fetch(ctx, fmt.Sprintf("/%s/%s", namespaceProviders, req.GetKey())) + if err != nil { + return nil, fmt.Errorf("fetch providers from datastore: %w", err) + } + + pset, ok := fetched.(*providerSet) + if !ok { + return nil, fmt.Errorf("expected *providerSet value type, got: %T", pset) + } + + pbProviders := make([]pb.Message_Peer, len(pset.providers)) + for i, p := range pset.providers { + pbProviders[i] = pb.FromAddrInfo(p) + } resp := &pb.Message{ Type: pb.Message_GET_PROVIDERS, Key: k, CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), - ProviderPeers: nil, // TODO: Fill + ProviderPeers: pbProviders, } return resp, nil @@ -261,44 +253,3 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 return filtered } - -// shouldReplaceExistingRecord returns true if the given record should replace any -// existing one in the local datastore. It queries the datastore, unmarshalls -// the record, validates it, and compares it to the incoming record. If the -// incoming one is "better" (e.g., just newer), this function returns true. -// If unmarshalling or validation fails, this function (alongside an error) also -// returns true because the existing record should be replaced. -func (d *DHT) shouldReplaceExistingRecord(ctx context.Context, dstore ds.Read, newRec *recpb.Record) (bool, error) { - ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") - defer span.End() - - existingBytes, err := dstore.Get(ctx, datastoreKey(newRec.GetKey())) - if errors.Is(err, ds.ErrNotFound) { - return true, nil - } else if err != nil { - return false, fmt.Errorf("getting record from datastore: %w", err) - } - - existingRec := &recpb.Record{} - if err := existingRec.Unmarshal(existingBytes); err != nil { - return true, nil - } - - if err := d.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { - return true, nil - } - - records := [][]byte{newRec.GetValue(), existingRec.GetValue()} - i, err := d.validator.Select(string(newRec.GetKey()), records) - if err != nil { - return false, fmt.Errorf("record selection: %w", err) - } else if i != 0 { - return false, nil - } - - return true, nil -} - -func datastoreKey(k []byte) ds.Key { - return ds.NewKey(base32.RawStdEncoding.EncodeToString(k)) -} diff --git a/v2/handlers_test.go b/v2/handlers_test.go index f1f3e1a7..5b56e826 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -6,6 +6,7 @@ import ( "math/rand" "reflect" "strconv" + "strings" "sync" "testing" "time" @@ -514,19 +515,18 @@ func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { req := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) ctx := context.Background() - _, err := d.ds.Get(ctx, datastoreKey(req.Key)) + _, err := d.backends[namespaceIPNS].Fetch(ctx, string(req.Key)) require.ErrorIs(t, err, ds.ErrNotFound) cloned := proto.Clone(req).(*pb.Message) _, err = d.handlePutValue(ctx, remote, cloned) require.NoError(t, err) - dat, err := d.ds.Get(ctx, datastoreKey(req.Key)) + dat, err := d.backends[namespaceIPNS].Fetch(ctx, string(req.Key)) require.NoError(t, err) - r := &recpb.Record{} - err = r.Unmarshal(dat) - require.NoError(t, err) + r, ok := dat.(*recpb.Record) + require.True(t, ok) assert.NotEqual(t, r.TimeReceived, req.Record.TimeReceived) @@ -541,7 +541,7 @@ func TestDHT_handlePutValue_nil_record(t *testing.T) { req := &pb.Message{ Type: pb.Message_PUT_VALUE, - Key: []byte("random-key"), + Key: []byte("/ipns/random-key"), Record: nil, // nil record } @@ -556,9 +556,9 @@ func TestDHT_handlePutValue_record_key_mismatch(t *testing.T) { req := &pb.Message{ Type: pb.Message_PUT_VALUE, - Key: []byte("key-1"), + Key: []byte("/ipns/key-1"), Record: &recpb.Record{ - Key: []byte("key-2"), + Key: []byte("/ipns/key-2"), }, } @@ -607,7 +607,7 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { // we're storing two sequential records simultaneously many times in a row. - // After each insert, we check that indeed the record with the higher + // After each insert, we check that indeed, the record with the higher // sequence number was stored. If the handler didn't use transactions, // this test fails. @@ -637,10 +637,14 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { }() wg.Wait() - dat, err := d.ds.Get(context.Background(), datastoreKey(ipnsKey)) + val, err := d.backends[namespaceIPNS].Fetch(context.Background(), string(ipnsKey)) require.NoError(t, err) - storedRec := mustUnmarshalIpnsRecord(t, dat) + r, ok := val.(*recpb.Record) + require.True(t, ok) + + storedRec, err := ipns.UnmarshalRecord(r.Value) + require.NoError(t, err) seq, err := storedRec.Sequence() require.NoError(t, err) @@ -657,8 +661,10 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) req := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) - // store corrupt record - err := d.ds.Put(context.Background(), datastoreKey(req.Record.GetKey()), []byte("corrupt-record")) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + + err := rbe.datastore.Put(context.Background(), ds.NewKey(string(req.GetKey())), []byte("corrupt-record")) require.NoError(t, err) // put the correct record through handler @@ -666,10 +672,14 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) require.NoError(t, err) // check if the corrupt record was overwritten - dat, err := d.ds.Get(context.Background(), datastoreKey(req.Record.GetKey())) + val, err := d.backends[namespaceIPNS].Fetch(context.Background(), string(req.GetKey())) require.NoError(t, err) - mustUnmarshalIpnsRecord(t, dat) + r, ok := val.(*recpb.Record) + require.True(t, ok) + + _, err = ipns.UnmarshalRecord(r.Value) + require.NoError(t, err) } func BenchmarkDHT_handleGetValue(b *testing.B) { @@ -677,6 +687,9 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { fillRoutingTable(b, d) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(b, ok) + // fill datastore and build requests reqs := make([]*pb.Message, b.N) peers := make([]peer.ID, b.N) @@ -688,7 +701,9 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { data, err := putReq.Record.Marshal() require.NoError(b, err) - err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + + err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(b, err) peers[i] = pid @@ -716,6 +731,9 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { fillRoutingTable(t, d) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + remote, priv := newIdentity(t) putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) @@ -723,7 +741,8 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { data, err := putReq.Record.Marshal() require.NoError(t, err) - err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(t, err) getReq := &pb.Message{ @@ -749,7 +768,7 @@ func TestDHT_handleGetValue_record_not_found(t *testing.T) { req := &pb.Message{ Type: pb.Message_GET_VALUE, - Key: []byte("unknown-record-key"), + Key: []byte("/ipns/unknown-record-key"), } resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) @@ -767,9 +786,14 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { fillRoutingTable(t, d) - key := []byte("record-key") + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + + key := []byte("/ipns/record-key") + + dsKey := newDatastoreKey(namespaceIPNS, "record-key") - err := d.ds.Put(context.Background(), datastoreKey(key), []byte("corrupt-data")) + err := rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-data")) require.NoError(t, err) req := &pb.Message{ @@ -787,7 +811,7 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { assert.Len(t, resp.ProviderPeers, 0) // check that the record was deleted from the datastore - data, err := d.ds.Get(context.Background(), datastoreKey(key)) + data, err := rbe.datastore.Get(context.Background(), dsKey) assert.ErrorIs(t, err, ds.ErrNotFound) assert.Len(t, data, 0) } @@ -797,14 +821,19 @@ func TestDHT_handleGetValue_max_age_exceeded_record_in_datastore(t *testing.T) { fillRoutingTable(t, d) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + remote, priv := newIdentity(t) putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + data, err := putReq.Record.Marshal() require.NoError(t, err) - err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(t, err) req := &pb.Message{ @@ -812,7 +841,7 @@ func TestDHT_handleGetValue_max_age_exceeded_record_in_datastore(t *testing.T) { Key: putReq.GetKey(), } - d.cfg.MaxRecordAge = 0 + rbe.cfg.MaxRecordAge = 0 resp, err := d.handleGetValue(context.Background(), remote, req) require.NoError(t, err) @@ -824,7 +853,7 @@ func TestDHT_handleGetValue_max_age_exceeded_record_in_datastore(t *testing.T) { assert.Len(t, resp.ProviderPeers, 0) // check that the record was deleted from the datastore - data, err = d.ds.Get(context.Background(), datastoreKey(putReq.GetKey())) + data, err = rbe.datastore.Get(context.Background(), dsKey) assert.ErrorIs(t, err, ds.ErrNotFound) assert.Len(t, data, 0) } @@ -834,6 +863,9 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { fillRoutingTable(t, d) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + remote, priv := newIdentity(t) // generate expired record (doesn't pass validation) @@ -842,7 +874,9 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { data, err := putReq.Record.Marshal() require.NoError(t, err) - err = d.ds.Put(context.Background(), datastoreKey(putReq.GetKey()), data) + dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + + err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(t, err) req := &pb.Message{ diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index c8a887d8..3f29cd45 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -47,7 +47,7 @@ func (m *Message_Peer) Addresses() []ma.Multiaddr { for _, addr := range m.Addrs { maddr, err := ma.NewMultiaddrBytes(addr) if err != nil { - slog.Debug("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "error", err) + slog.Debug("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "err", err) continue } diff --git a/v2/stream.go b/v2/stream.go index d30f6067..ef100e58 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -33,7 +33,7 @@ func (d *DHT) streamHandler(s network.Stream) { ) if err := s.Scope().SetService(ServiceName); err != nil { - d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("error", err.Error())) + d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("err", err.Error())) _ = s.Reset() return } @@ -164,7 +164,7 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // log any other errors than stream resets if err.Error() != "stream reset" { - slogger.LogAttrs(ctx, slog.LevelDebug, "error reading message", slog.String("error", err.Error())) + slogger.LogAttrs(ctx, slog.LevelDebug, "error reading message", slog.String("err", err.Error())) } // record any potential partial message we have received @@ -192,7 +192,7 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data var req pb.Message if err := req.Unmarshal(data); err != nil { - slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("error", err.Error())) + slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("err", err.Error())) _ = stats.RecordWithTags(ctx, []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, @@ -238,7 +238,7 @@ func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s networ defer span.End() if err := writeMsg(s, msg); err != nil { - slogger.LogAttrs(ctx, slog.LevelDebug, "error writing response", slog.String("error", err.Error())) + slogger.LogAttrs(ctx, slog.LevelDebug, "error writing response", slog.String("err", err.Error())) stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) return err } From 5e09e952bc091220e4d71727a044c78a2add67bb Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 13:13:11 +0200 Subject: [PATCH 17/64] improve backend documentation --- v2/backend.go | 475 ++++------------------------------------- v2/backend_provider.go | 294 +++++++++++++++++++++++++ v2/backend_record.go | 182 ++++++++++++++++ v2/config.go | 61 +++--- v2/dht.go | 14 +- v2/handlers.go | 2 + 6 files changed, 556 insertions(+), 472 deletions(-) create mode 100644 v2/backend_provider.go create mode 100644 v2/backend_record.go diff --git a/v2/backend.go b/v2/backend.go index 7c0105ae..a9a72da1 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -2,25 +2,13 @@ package dht import ( "context" - "encoding/binary" - "errors" - "fmt" - "path" - "strings" - "sync" - "time" lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/autobatch" - dsq "github.com/ipfs/go-datastore/query" record "github.com/libp2p/go-libp2p-record" - recpb "github.com/libp2p/go-libp2p-record/pb" - "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/multiformats/go-base32" - "golang.org/x/exp/slog" ) // Default namespaces @@ -30,20 +18,44 @@ const ( namespaceProviders = "providers" ) +// A Backend implementation handles requests from other peers. Depending on the +// keys root namespace, we pass the request to the corresponding backend. For +// example, the root namespace for the key "/ipns/$binary_id" is "ipns." If +// we receive a PUT_VALUE request from another peer for the above key, we will +// pass the included record to the "ipns backend." This backend is responsible +// for validating the record and storing or discarding it. The same applies for, +// e.g., "/providers/..." keys which we will receive for ADD_PROVIDER and +// GET_PROVIDERS requests. The [ProvidersBackend] will take care of storing the +// records so that they can be retrieved efficiently via Fetch. +// +// To support additional record types, users would implement this Backend +// interface and register it for a custom namespace with the [DHT] [Config] by +// adding it to the [Config.Backend] map. Any PUT_VALUE/GET_VALUE requests would +// start to support the new record type. The requirement is though that all +// "any" types must be [*recpb.Record] types. The below interface cannot enforce +// that type because provider records are handled slightly differently. For +// example, with provider records, the return values are not assigned to the +// [pb.Message.Record] field but to the [pb.Message.ProviderPeers] field. type Backend interface { + // Store stores the given value at the give key, returning the written record. + // The written record could be of a different type than the value that was + // passed into Store. Store(ctx context.Context, key string, value any) (any, error) + + // Fetch returns the record for the given key or a [ds.ErrNotFound] if it + // wasn't found or another error if any occurred. Fetch(ctx context.Context, key string) (any, error) } -func NewValueBackend(namespace string, datastore ds.TxnDatastore, validator record.Validator) *RecordBackend { - return &RecordBackend{ - namespace: namespace, - datastore: datastore, - validator: validator, +// NewBackendIPNS initializes a new backend for the "ipns" namespace that can +// store and fetch IPNS records from the given datastore. The stored and +// returned records must be of type [*recpb.Record]. The cfg parameter can be +// nil, in which case the [DefaultRecordBackendConfig] will be used. +func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackendConfig) *RecordBackend { + if cfg == nil { + cfg = DefaultRecordBackendConfig() } -} -func NewIPNSBackend(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *ValueBackendConfig) *RecordBackend { return &RecordBackend{ cfg: cfg, log: cfg.Logger, @@ -53,9 +65,13 @@ func NewIPNSBackend(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *ValueBackendC } } -func NewPublicKeyBackend(ds ds.TxnDatastore, cfg *ValueBackendConfig) *RecordBackend { +// NewBackendPublicKey initializes a new backend for the "pk" namespace that can +// store and fetch public key records from the given datastore. The stored and +// returned records must be of type [*recpb.Record]. The cfg parameter can be +// nil, in which case the [DefaultRecordBackendConfig] will be used. +func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBackend { if cfg == nil { - cfg = DefaultValueBackendConfig() + cfg = DefaultRecordBackendConfig() } return &RecordBackend{ @@ -67,204 +83,12 @@ func NewPublicKeyBackend(ds ds.TxnDatastore, cfg *ValueBackendConfig) *RecordBac } } -type RecordBackend struct { - cfg *ValueBackendConfig - log *slog.Logger - namespace string - datastore ds.TxnDatastore - validator record.Validator -} - -var _ Backend = (*RecordBackend)(nil) - -type ValueBackendConfig struct { - MaxRecordAge time.Duration - Logger *slog.Logger -} - -func DefaultValueBackendConfig() *ValueBackendConfig { - return &ValueBackendConfig{ - Logger: slog.Default(), - MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md - } -} - -func (v *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { - rec, ok := value.(*recpb.Record) - if !ok { - return nil, fmt.Errorf("expected *recpb.Record value type, got: %T", value) - } - - if key != string(rec.GetKey()) { - return nil, fmt.Errorf("key doesn't match record key") - } - - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if v.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) - } - - dsKey := newDatastoreKey(v.namespace, suffix) - - if err := v.validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { - return nil, fmt.Errorf("put bad record: %w", err) - } - - txn, err := v.datastore.NewTransaction(ctx, false) - if err != nil { - return nil, fmt.Errorf("new transaction: %w", err) - } - defer txn.Discard(ctx) // discard is a no-op if txn was committed beforehand - - shouldReplace, err := v.shouldReplaceExistingRecord(ctx, txn, dsKey, rec) - if err != nil { - return nil, fmt.Errorf("checking datastore for better record: %w", err) - } else if !shouldReplace { - return nil, fmt.Errorf("received worse record") - } - - // avoid storing arbitrary data, so overwrite that field - rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) - - data, err := rec.Marshal() - if err != nil { - return nil, fmt.Errorf("marshal incoming record: %w", err) - } - - if err = txn.Put(ctx, dsKey, data); err != nil { - return nil, fmt.Errorf("storing record in datastore: %w", err) - } - - if err = txn.Commit(ctx); err != nil { - return nil, fmt.Errorf("committing new record to datastore: %w", err) - } - - return rec, nil -} - -func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if v.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) - } - - dsKey := newDatastoreKey(v.namespace, suffix) - - // fetch record from the datastore for the requested key - buf, err := v.datastore.Get(ctx, dsKey) - if err != nil { - return nil, err - } - - // we have found a record, parse it and do basic validation - rec := &recpb.Record{} - err = rec.Unmarshal(buf) - if err != nil { - // we have a corrupt record in the datastore -> delete it and pretend - // that we don't know about it - if err := v.datastore.Delete(ctx, dsKey); err != nil { - v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) - } - - return nil, nil - } - - // validate that we don't serve stale records. - receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) - if err != nil || time.Since(receivedAt) > v.cfg.MaxRecordAge { - errStr := "" - if err != nil { - errStr = err.Error() - } - - v.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) - if err = v.datastore.Delete(ctx, dsKey); err != nil { - v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) - } - return nil, nil - } - - // We don't do any additional validation beyond checking the above - // timestamp. We put the burden of validating the record on the requester as - // checking a record may be computationally expensive. - - return rec, nil -} - -// shouldReplaceExistingRecord returns true if the given record should replace any -// existing one in the local datastore. It queries the datastore, unmarshalls -// the record, validates it, and compares it to the incoming record. If the -// incoming one is "better" (e.g., just newer), this function returns true. -// If unmarshalling or validation fails, this function (alongside an error) also -// returns true because the existing record should be replaced. -func (v *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, newRec *recpb.Record) (bool, error) { - ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") - defer span.End() - - existingBytes, err := txn.Get(ctx, dsKey) - if errors.Is(err, ds.ErrNotFound) { - return true, nil - } else if err != nil { - return false, fmt.Errorf("getting record from datastore: %w", err) - } - - existingRec := &recpb.Record{} - if err := existingRec.Unmarshal(existingBytes); err != nil { - return true, nil - } - - if err := v.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { - return true, nil - } - - records := [][]byte{newRec.GetValue(), existingRec.GetValue()} - i, err := v.validator.Select(string(newRec.GetKey()), records) - if err != nil { - return false, fmt.Errorf("record selection: %w", err) - } else if i != 0 { - return false, nil - } - - return true, nil -} - -type ProviderBackend struct { - namespace string - cfg *ProviderBackendConfig - log *slog.Logger - cache *lru.Cache[string, providerSet] - peerstore peerstore.Peerstore - datastore *autobatch.Datastore - gcSkip sync.Map -} - -type ProviderBackendConfig struct { - ProvideValidity time.Duration - AddressTTL time.Duration - BatchSize int - CacheSize int - Logger *slog.Logger -} - -func DefaultProviderBackendConfig() *ProviderBackendConfig { - return &ProviderBackendConfig{ - ProvideValidity: time.Hour * 48, - AddressTTL: 24 * time.Hour, - BatchSize: 256, // MAGIC - CacheSize: 256, // MAGIC - Logger: slog.Default(), - } -} - -func NewProviderBackend(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProviderBackendConfig) (*ProviderBackend, error) { +// NewBackendProvider initializes a new backend for the "providers" namespace +// that can store and fetch provider records from the given datastore. The +// values passed into Store must be of type [peer.AddrInfo]. The values returned +// from Fetch will be of type [providerSet] (unexported). The cfg parameter can +// be nil, in which case the [DefaultProviderBackendConfig] will be used. +func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProviderBackendConfig) (*ProvidersBackend, error) { if cfg == nil { cfg = DefaultProviderBackendConfig() } @@ -274,7 +98,7 @@ func NewProviderBackend(pstore peerstore.Peerstore, dstore ds.Batching, cfg *Pro return nil, err } - p := &ProviderBackend{ + p := &ProvidersBackend{ cfg: cfg, log: cfg.Logger, cache: cache, @@ -285,214 +109,3 @@ func NewProviderBackend(pstore peerstore.Peerstore, dstore ds.Batching, cfg *Pro return p, nil } - -var _ Backend = (*ProviderBackend)(nil) - -func (p *ProviderBackend) Store(ctx context.Context, key string, value any) (any, error) { - addrInfo, ok := value.(peer.AddrInfo) - if !ok { - return nil, fmt.Errorf("expected peer.AddrInfo value type, got: %T", value) - } - - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if p.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) - } - - rec := expiryRecord{ - expiry: time.Now(), - } - - if provs, ok := p.cache.Get(key); ok { - provs.setVal(addrInfo, rec.expiry) - } - - p.peerstore.AddAddrs(addrInfo.ID, addrInfo.Addrs, p.cfg.AddressTTL) - - dsKey := newDatastoreKey(ns, suffix, string(addrInfo.ID)) - - _, found := p.gcSkip.LoadOrStore(dsKey.String(), struct{}{}) - - if err := p.datastore.Put(ctx, dsKey, rec.MarshalBinary()); err != nil { - p.cache.Remove(key) - - // if we have just added the key to the gc skip list, delete it again - // if we have added it in a previous Store invocation, keep it around - if !found { - p.gcSkip.Delete(dsKey.String()) - } - - return nil, fmt.Errorf("datastore put: %w", err) - } - - return addrInfo, nil -} - -func (p *ProviderBackend) Fetch(ctx context.Context, key string) (any, error) { - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if p.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) - } - - if cached, ok := p.cache.Get(key); ok { - return cached, nil - } - - qKey := newDatastoreKey(ns, suffix) - q, err := p.datastore.Query(ctx, dsq.Query{Prefix: qKey.String()}) - if err != nil { - return nil, err - } - - defer func() { - if err = q.Close(); err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "failed closing fetch query", slog.String("err", err.Error())) - } - }() - - now := time.Now() - out := newProviderSet() - - for e := range q.Next() { - if e.Error != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "Fetch datastore entry contains error", slog.String("key", e.Key), slog.String("err", e.Error.Error())) - continue - } - - rec := expiryRecord{} - if err = rec.UnmarshalBinary(e.Value); err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "Fetch provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) - p.delete(ctx, ds.RawKey(e.Key)) - continue - } else if now.Sub(rec.expiry) > p.cfg.ProvideValidity { - // record is expired - p.delete(ctx, ds.RawKey(e.Key)) - continue - } - - idx := strings.LastIndex(e.Key, "/") - binPeerID, err := base32.RawStdEncoding.DecodeString(e.Key[idx+1:]) - if err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "base32 key decoding error", slog.String("key", e.Key), slog.String("err", err.Error())) - p.delete(ctx, ds.RawKey(e.Key)) - continue - } - - addrInfo := p.peerstore.PeerInfo(peer.ID(binPeerID)) - - out.setVal(addrInfo, rec.expiry) - } - - if len(out.providers) > 0 { - p.cache.Add(key, *out) - } - - return out, nil -} - -func (p *ProviderBackend) CollectGarbage(ctx context.Context) { - // Faster to purge than garbage collecting - p.cache.Purge() - - p.gcSkip = sync.Map{} - - // Now, kick off a GC of the datastore. - q, err := p.datastore.Query(ctx, dsq.Query{Prefix: p.namespace}) - if err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "provider record garbage collection query failed", slog.String("err", err.Error())) - return - } - - defer func() { - if err = q.Close(); err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "failed closing garbage collection query", slog.String("err", err.Error())) - } - }() - - for e := range q.Next() { - if e.Error != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection datastore entry contains error", slog.String("key", e.Key), slog.String("err", e.Error.Error())) - continue - } - - if _, found := p.gcSkip.Load(e.Key); found { - continue - } - - rec := expiryRecord{} - if err = rec.UnmarshalBinary(e.Value); err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) - p.delete(ctx, ds.RawKey(e.Key)) - } else if time.Now().Sub(rec.expiry) <= p.cfg.ProvideValidity { - continue - } - - // record expired -> garbage collect - p.delete(ctx, ds.RawKey(e.Key)) - } -} - -func (p *ProviderBackend) delete(ctx context.Context, dsKey ds.Key) { - if err := p.datastore.Delete(ctx, dsKey); err != nil { - p.log.LogAttrs(ctx, slog.LevelWarn, "failed to remove provider record from disk", slog.String("key", dsKey.String()), slog.String("err", err.Error())) - } -} - -type expiryRecord struct { - expiry time.Time -} - -func (e *expiryRecord) MarshalBinary() (data []byte) { - buf := make([]byte, 16) - n := binary.PutVarint(buf, e.expiry.UnixNano()) - return buf[:n] -} - -func (e *expiryRecord) UnmarshalBinary(data []byte) error { - nsec, n := binary.Varint(data) - if n == 0 { - return fmt.Errorf("failed to parse time") - } - - e.expiry = time.Unix(0, nsec) - - return nil -} - -type providerSet struct { - providers []peer.AddrInfo - set map[peer.ID]time.Time -} - -func newProviderSet() *providerSet { - return &providerSet{ - providers: []peer.AddrInfo{}, - set: make(map[peer.ID]time.Time), - } -} - -func (ps *providerSet) setVal(addrInfo peer.AddrInfo, t time.Time) { - _, found := ps.set[addrInfo.ID] - if !found { - ps.providers = append(ps.providers, addrInfo) - } - - ps.set[addrInfo.ID] = t -} - -func newDatastoreKey(namespace string, binStrs ...string) ds.Key { - elems := make([]string, len(binStrs)+1) - elems[0] = namespace - for i, bin := range binStrs { - elems[i+1] = base32.RawStdEncoding.EncodeToString([]byte(bin)) - } - return ds.NewKey("/" + path.Join(elems...)) -} diff --git a/v2/backend_provider.go b/v2/backend_provider.go new file mode 100644 index 00000000..baae5471 --- /dev/null +++ b/v2/backend_provider.go @@ -0,0 +1,294 @@ +package dht + +import ( + "context" + "encoding/binary" + "fmt" + "path" + "strings" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/autobatch" + dsq "github.com/ipfs/go-datastore/query" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/multiformats/go-base32" + "golang.org/x/exp/slog" +) + +// ProvidersBackend implements the [Backend] interface and handles provider +// record requests for the "/providers/" namespace. +type ProvidersBackend struct { + namespace string // the namespace string, usually set to namespaceProviders ("providers") + cfg *ProviderBackendConfig // default is given by DefaultProviderBackendConfig + log *slog.Logger // convenience accessor of cfg.Logger + cache *lru.Cache[string, providerSet] // LRU cache for frequently requested records. TODO: is that really so effective? The cache size is quite low either. + peerstore peerstore.Peerstore // reference to the peer store to store and fetch peer multiaddresses from (we don't save them in the datastore) + datastore *autobatch.Datastore // the datastore where we save the peer IDs providing a certain multihash + gcSkip sync.Map // a sync map that marks records as to-be-skipped by the garbage collection process +} + +var _ Backend = (*ProvidersBackend)(nil) + +// ProviderBackendConfig is used to construct a [ProvidersBackend]. Use +// [DefaultProviderBackendConfig] to get a default configuration struct and then +// modify it to your liking. +type ProviderBackendConfig struct { + ProvideValidity time.Duration // specifies for how long provider records are valid + AddressTTL time.Duration // specifies for how long we will keep around provider multi addresses in the peerstore. If such multiaddresses are present we send them alongside the peer ID to the requesting peer. This prevents the necessity for a second look for the multiaddresses on the requesting peers' side. + BatchSize int // specifies how many provider record writes should be batched + CacheSize int // specifies the LRU cache size + Logger *slog.Logger // the logger to use +} + +// DefaultProviderBackendConfig returns a default [ProvidersBackend] +// configuration. Use this as a starting point and modify it. If a nil +// configuration is passed to [NewBackendProvider], this default configuration +// here is used. +func DefaultProviderBackendConfig() *ProviderBackendConfig { + return &ProviderBackendConfig{ + ProvideValidity: time.Hour * 48, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md + AddressTTL: 24 * time.Hour, + BatchSize: 256, // MAGIC + CacheSize: 256, // MAGIC + Logger: slog.Default(), + } +} + +// Store implements the [Backend] interface. In the case of a [ProvidersBackend] +// this method accepts a [peer.AddrInfo] as a value and stores it in the +// configured datastore. +func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (any, error) { + addrInfo, ok := value.(peer.AddrInfo) + if !ok { + return nil, fmt.Errorf("expected peer.AddrInfo value type, got: %T", value) + } + + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if p.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) + } + + rec := expiryRecord{ + expiry: time.Now(), + } + + if provs, ok := p.cache.Get(key); ok { + provs.setVal(addrInfo, rec.expiry) + } + + p.peerstore.AddAddrs(addrInfo.ID, addrInfo.Addrs, p.cfg.AddressTTL) + + dsKey := newDatastoreKey(ns, suffix, string(addrInfo.ID)) + + _, found := p.gcSkip.LoadOrStore(dsKey.String(), struct{}{}) + + if err := p.datastore.Put(ctx, dsKey, rec.MarshalBinary()); err != nil { + p.cache.Remove(key) + + // if we have just added the key to the gc skip list, delete it again + // if we have added it in a previous Store invocation, keep it around + if !found { + p.gcSkip.Delete(dsKey.String()) + } + + return nil, fmt.Errorf("datastore put: %w", err) + } + + return addrInfo, nil +} + +// Fetch implements the [Backend] interface. In the case of a [ProvidersBackend] +// this method returns a [providerSet] (unexported) that contains all peer IDs +// and known multiaddresses for the given key. The key parameter should be of +// the form "/providers/$binary_multihash". +func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if p.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) + } + + if cached, ok := p.cache.Get(key); ok { + return cached, nil + } + + qKey := newDatastoreKey(ns, suffix) + q, err := p.datastore.Query(ctx, dsq.Query{Prefix: qKey.String()}) + if err != nil { + return nil, err + } + + defer func() { + if err = q.Close(); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "failed closing fetch query", slog.String("err", err.Error())) + } + }() + + now := time.Now() + out := newProviderSet() + + for e := range q.Next() { + if e.Error != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Fetch datastore entry contains error", slog.String("key", e.Key), slog.String("err", e.Error.Error())) + continue + } + + rec := expiryRecord{} + if err = rec.UnmarshalBinary(e.Value); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Fetch provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) + p.delete(ctx, ds.RawKey(e.Key)) + continue + } else if now.Sub(rec.expiry) > p.cfg.ProvideValidity { + // record is expired + p.delete(ctx, ds.RawKey(e.Key)) + continue + } + + idx := strings.LastIndex(e.Key, "/") + binPeerID, err := base32.RawStdEncoding.DecodeString(e.Key[idx+1:]) + if err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "base32 key decoding error", slog.String("key", e.Key), slog.String("err", err.Error())) + p.delete(ctx, ds.RawKey(e.Key)) + continue + } + + addrInfo := p.peerstore.PeerInfo(peer.ID(binPeerID)) + + out.setVal(addrInfo, rec.expiry) + } + + if len(out.providers) > 0 { + p.cache.Add(key, *out) + } + + return out, nil +} + +// CollectGarbage sweeps through the datastore and deletes all provider records +// that have expired. A record is expired if the +// [ProviderBackendConfig].ProvideValidity is exceeded. +func (p *ProvidersBackend) CollectGarbage(ctx context.Context) { + // Faster to purge than garbage collecting + p.cache.Purge() + + p.gcSkip = sync.Map{} // TODO: racy + + // Now, kick off a GC of the datastore. + q, err := p.datastore.Query(ctx, dsq.Query{Prefix: p.namespace}) + if err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "provider record garbage collection query failed", slog.String("err", err.Error())) + return + } + + defer func() { + if err = q.Close(); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "failed closing garbage collection query", slog.String("err", err.Error())) + } + }() + + for e := range q.Next() { + if e.Error != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection datastore entry contains error", slog.String("key", e.Key), slog.String("err", e.Error.Error())) + continue + } + + if _, found := p.gcSkip.Load(e.Key); found { + continue + } + + rec := expiryRecord{} + if err = rec.UnmarshalBinary(e.Value); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) + p.delete(ctx, ds.RawKey(e.Key)) + } else if time.Now().Sub(rec.expiry) <= p.cfg.ProvideValidity { + continue + } + + // record expired -> garbage collect + p.delete(ctx, ds.RawKey(e.Key)) + } +} + +// delete is a convenience method to delete the record at the given datastore +// key. It doesn't return any error but logs it instead as a warning. +func (p *ProvidersBackend) delete(ctx context.Context, dsKey ds.Key) { + if err := p.datastore.Delete(ctx, dsKey); err != nil { + p.log.LogAttrs(ctx, slog.LevelWarn, "failed to remove provider record from disk", slog.String("key", dsKey.String()), slog.String("err", err.Error())) + } +} + +// expiryRecord is captures the information that gets written to the datastore +// for any provider record. This record doesn't include any peer IDs or +// multiaddresses because peer IDs are part of the key that this record gets +// stored under and multiaddresses are stored in the peerstore. This record +// just tracks the expiry time of the record. It implements binary marshalling +// and unmarshalling methods for easy (de)serialization into the datastore. +type expiryRecord struct { + expiry time.Time +} + +// MarshalBinary returns the byte slice that should be stored in the datastore. +// This method doesn't comply to the [encoding.BinaryMarshaler] interface +// because it doesn't return an error. We don't need the conformance here +// though. +func (e *expiryRecord) MarshalBinary() (data []byte) { + buf := make([]byte, 16) + n := binary.PutVarint(buf, e.expiry.UnixNano()) + return buf[:n] +} + +// UnmarshalBinary is the inverse operation to the above MarshalBinary and is +// used to deserialize any blob of bytes that was previously stored in the +// datastore. +func (e *expiryRecord) UnmarshalBinary(data []byte) error { + nsec, n := binary.Varint(data) + if n == 0 { + return fmt.Errorf("failed to parse time") + } + + e.expiry = time.Unix(0, nsec) + + return nil +} + +type providerSet struct { + providers []peer.AddrInfo + set map[peer.ID]time.Time +} + +func newProviderSet() *providerSet { + return &providerSet{ + providers: []peer.AddrInfo{}, + set: make(map[peer.ID]time.Time), + } +} + +func (ps *providerSet) setVal(addrInfo peer.AddrInfo, t time.Time) { + _, found := ps.set[addrInfo.ID] + if !found { + ps.providers = append(ps.providers, addrInfo) + } + + ps.set[addrInfo.ID] = t +} + +func newDatastoreKey(namespace string, binStrs ...string) ds.Key { + elems := make([]string, len(binStrs)+1) + elems[0] = namespace + for i, bin := range binStrs { + elems[i+1] = base32.RawStdEncoding.EncodeToString([]byte(bin)) + } + return ds.NewKey("/" + path.Join(elems...)) +} diff --git a/v2/backend_record.go b/v2/backend_record.go new file mode 100644 index 00000000..3c0a976d --- /dev/null +++ b/v2/backend_record.go @@ -0,0 +1,182 @@ +package dht + +import ( + "context" + "errors" + "fmt" + "time" + + ds "github.com/ipfs/go-datastore" + record "github.com/libp2p/go-libp2p-record" + recpb "github.com/libp2p/go-libp2p-record/pb" + "golang.org/x/exp/slog" +) + +type RecordBackend struct { + cfg *RecordBackendConfig + log *slog.Logger + namespace string + datastore ds.TxnDatastore + validator record.Validator +} + +var _ Backend = (*RecordBackend)(nil) + +type RecordBackendConfig struct { + MaxRecordAge time.Duration + Logger *slog.Logger +} + +func DefaultRecordBackendConfig() *RecordBackendConfig { + return &RecordBackendConfig{ + Logger: slog.Default(), + MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md + } +} + +func (v *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { + rec, ok := value.(*recpb.Record) + if !ok { + return nil, fmt.Errorf("expected *recpb.Record value type, got: %T", value) + } + + if key != string(rec.GetKey()) { + return nil, fmt.Errorf("key doesn't match record key") + } + + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if v.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) + } + + dsKey := newDatastoreKey(v.namespace, suffix) + + if err := v.validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { + return nil, fmt.Errorf("put bad record: %w", err) + } + + txn, err := v.datastore.NewTransaction(ctx, false) + if err != nil { + return nil, fmt.Errorf("new transaction: %w", err) + } + defer txn.Discard(ctx) // discard is a no-op if txn was committed beforehand + + shouldReplace, err := v.shouldReplaceExistingRecord(ctx, txn, dsKey, rec) + if err != nil { + return nil, fmt.Errorf("checking datastore for better record: %w", err) + } else if !shouldReplace { + return nil, fmt.Errorf("received worse record") + } + + // avoid storing arbitrary data, so overwrite that field + rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + + data, err := rec.Marshal() + if err != nil { + return nil, fmt.Errorf("marshal incoming record: %w", err) + } + + if err = txn.Put(ctx, dsKey, data); err != nil { + return nil, fmt.Errorf("storing record in datastore: %w", err) + } + + if err = txn.Commit(ctx); err != nil { + return nil, fmt.Errorf("committing new record to datastore: %w", err) + } + + return rec, nil +} + +func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { + ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) + if err != nil { + return nil, fmt.Errorf("invalid key %s: %w", key, err) + } + + if v.namespace != ns { + return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) + } + + dsKey := newDatastoreKey(v.namespace, suffix) + + // fetch record from the datastore for the requested key + buf, err := v.datastore.Get(ctx, dsKey) + if err != nil { + return nil, err + } + + // we have found a record, parse it and do basic validation + rec := &recpb.Record{} + err = rec.Unmarshal(buf) + if err != nil { + // we have a corrupt record in the datastore -> delete it and pretend + // that we don't know about it + if err := v.datastore.Delete(ctx, dsKey); err != nil { + v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) + } + + return nil, nil + } + + // validate that we don't serve stale records. + receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) + if err != nil || time.Since(receivedAt) > v.cfg.MaxRecordAge { + errStr := "" + if err != nil { + errStr = err.Error() + } + + v.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) + if err = v.datastore.Delete(ctx, dsKey); err != nil { + v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) + } + return nil, nil + } + + // We don't do any additional validation beyond checking the above + // timestamp. We put the burden of validating the record on the requester as + // checking a record may be computationally expensive. + + return rec, nil +} + +// shouldReplaceExistingRecord returns true if the given record should replace any +// existing one in the local datastore. It queries the datastore, unmarshalls +// the record, validates it, and compares it to the incoming record. If the +// incoming one is "better" (e.g., just newer), this function returns true. +// If unmarshalling or validation fails, this function (alongside an error) also +// returns true because the existing record should be replaced. +func (v *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, newRec *recpb.Record) (bool, error) { + ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") + defer span.End() + + existingBytes, err := txn.Get(ctx, dsKey) + if errors.Is(err, ds.ErrNotFound) { + return true, nil + } else if err != nil { + return false, fmt.Errorf("getting record from datastore: %w", err) + } + + existingRec := &recpb.Record{} + if err := existingRec.Unmarshal(existingBytes); err != nil { + return true, nil + } + + if err := v.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { + return true, nil + } + + records := [][]byte{newRec.GetValue(), existingRec.GetValue()} + i, err := v.validator.Select(string(newRec.GetKey()), records) + if err != nil { + return false, fmt.Errorf("record selection: %w", err) + } else if i != 0 { + return false, nil + } + + return true, nil +} diff --git a/v2/config.go b/v2/config.go index 2db21960..57935969 100644 --- a/v2/config.go +++ b/v2/config.go @@ -24,27 +24,27 @@ const ServiceName = "libp2p.DHT" var tracer = otel.Tracer("go-libp2p-kad-dht") type ( - // ModeOpt describes in which mode this DHT process should operate in. + // ModeOpt describes in which mode this [DHT] process should operate in. // Possible options are client, server, and any variant that switches - // between both automatically based on public reachability. The DHT receives + // between both automatically based on public reachability. The [DHT] receives // reachability updates from libp2p via the EvtLocalReachabilityChanged - // event. A DHT that operates in client mode won't register a stream handler + // event. A [DHT] that operates in client mode won't register a stream handler // for incoming requests and therefore won't store, e.g., any provider or - // IPNS records. A DHT in server mode, on the other hand, does all of that. + // IPNS records. A [DHT] in server mode, on the other hand, does all of that. // - // The `mode` type, on the other hand, captures the current state that the - // DHT is in. This can either be client or server. + // The unexported "mode" type, on the other hand, captures the current state + // that the [DHT] is in. This can either be client or server. ModeOpt string - // mode describes in which mode the DHT currently operates. Because the ModeOpt + // mode describes in which mode the [DHT] currently operates. Because the [ModeOpt] // type has options that automatically switch between client and server mode - // based on public connectivity, the DHT mode at any point in time can differ + // based on public connectivity, the [DHT] mode at any point in time can differ // from the desired mode. Therefore, we define this second mode type that // only has the two forms: client or server. mode string // Datastore is an interface definition that gathers the datastore - // requirements. The DHT requires the datastore to support batching and + // requirements. The [DHT] requires the datastore to support batching and // transactions. Example datastores that implement both features are leveldb // and badger. leveldb can also be used in memory - this is used as the // default datastore. @@ -73,18 +73,18 @@ const ( // to client mode. ModeOptAutoServer ModeOpt = "auto-server" - // modeClient means that the DHT is currently operating in client mode. + // modeClient means that the [DHT] is currently operating in client [mode]. // For more information, check ModeOpt documentation. modeClient mode = "client" - // modeServer means that the DHT is currently operating in server mode. + // modeServer means that the [DHT] is currently operating in server [mode]. // For more information, check ModeOpt documentation. modeServer mode = "server" ) -// Config contains all the configuration options for a DHT. Use DefaultConfig -// to build up your own configuration struct. The DHT constructor New uses the -// below method Validate to test for violations of configuration invariants. +// Config contains all the configuration options for a [DHT]. Use [DefaultConfig] +// to build up your own configuration struct. The [DHT] constructor [New] uses the +// below method [*Config.Validate] to test for violations of configuration invariants. type Config struct { // Mode defines if the DHT should operate as a server or client or switch // between both automatically (see ModeOpt). @@ -106,17 +106,18 @@ type Config struct { // about the local node. RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] - // Backends ... + // The Backends field holds a map of key namespaces to their corresponding + // backend implementation. For example, if we received an IPNS record, the + // key will have the form "/ipns/binary_id". We will forward the handling + // of this record the corresponding backend behind the "ipns" key in this + // map. A backend does record validation and handles the storage of the + // record. Backends map[string]Backend // Logger can be used to configure a custom structured logger instance. // By default go.uber.org/zap is used (wrapped in ipfs/go-log). Logger *slog.Logger - // MaxRecordAge is the default time that a record should last in the DHT. - // This value is also known as the provider record expiration. - MaxRecordAge time.Duration - // TimeoutStreamIdle is the duration we're reading from a stream without // receiving before closing/resetting it. The timeout gets reset every time // we have successfully read a message from the stream. @@ -124,7 +125,7 @@ type Config struct { } // DefaultConfig returns a configuration struct that can be used as-is to -// instantiate a fully functional DHT client. All fields that are nil require +// instantiate a fully functional [DHT] client. All fields that are nil require // some additional information to instantiate. The default values for these // fields come from separate top-level methods prefixed with Default. func DefaultConfig() *Config { @@ -134,15 +135,15 @@ func DefaultConfig() *Config { BucketSize: 20, ProtocolID: "/ipfs/kad/1.0.0", RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. + Backends: map[string]Backend{}, Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), - MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md - TimeoutStreamIdle: time.Minute, // MAGIC: could be done dynamically + TimeoutStreamIdle: time.Minute, // MAGIC: could be done dynamically } } // DefaultRoutingTable returns a triert.TrieRT routing table. This routing table -// cannot be initialized in DefaultConfig because it requires information about -// the local peer. +// cannot be initialized in [DefaultConfig] because it requires information +// about the local peer. func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]], error) { rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() rt, err := triert.New[key.Key256, kad.NodeID[key.Key256]](nodeID, rtCfg) @@ -152,15 +153,11 @@ func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (kad.RoutingTable[key.Ke return rt, nil } -// DefaultDatastore returns an in-memory leveldb datastore. -func DefaultDatastore() (Datastore, error) { +// InMemoryDatastore returns an in-memory leveldb datastore. +func InMemoryDatastore() (Datastore, error) { return leveldb.NewDatastore("", nil) } -func (c *Config) RegisterBackend(namespace string, backend Backend) { - c.Backends[namespace] = backend -} - // Validate validates the configuration struct it is called on. It returns // an error if any configuration issue was detected and nil if this is // a valid configuration. @@ -190,10 +187,6 @@ func (c *Config) Validate() error { return fmt.Errorf("logger must not be nil") } - if c.MaxRecordAge <= 0 { - return fmt.Errorf("max record age must be a positive duration") - } - if c.TimeoutStreamIdle <= 0 { return fmt.Errorf("stream idle timeout must be a positive duration") } diff --git a/v2/dht.go b/v2/dht.go index a48d0938..a787a963 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -49,8 +49,8 @@ type DHT struct { sub event.Subscription } -// New constructs a new DHT for the given underlying host and with the given -// configuration. Use DefaultConfig() to construct a configuration. +// New constructs a new [DHT] for the given underlying host and with the given +// configuration. Use [DefaultConfig] to construct a configuration. func New(h host.Host, cfg *Config) (*DHT, error) { var err error @@ -77,7 +77,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { if len(cfg.Backends) != 0 { d.backends = cfg.Backends } else { - dstore, err := DefaultDatastore() + dstore, err := InMemoryDatastore() if err != nil { return nil, fmt.Errorf("new default datastore: %w", err) } @@ -85,17 +85,17 @@ func New(h host.Host, cfg *Config) (*DHT, error) { pbeCfg := DefaultProviderBackendConfig() pbeCfg.Logger = cfg.Logger - pbe, err := NewProviderBackend(h.Peerstore(), dstore, pbeCfg) + pbe, err := NewBackendProvider(h.Peerstore(), dstore, pbeCfg) if err != nil { return nil, fmt.Errorf("new provider backend: %w", err) } - vbeCfg := DefaultValueBackendConfig() + vbeCfg := DefaultRecordBackendConfig() vbeCfg.Logger = cfg.Logger d.backends = map[string]Backend{ - "ipns": NewIPNSBackend(dstore, h.Peerstore(), vbeCfg), - "pk": NewPublicKeyBackend(dstore, vbeCfg), + "ipns": NewBackendIPNS(dstore, h.Peerstore(), vbeCfg), + "pk": NewBackendPublicKey(dstore, vbeCfg), "providers": pbe, } } diff --git a/v2/handlers.go b/v2/handlers.go index ca9773d4..63632746 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -21,6 +21,7 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("handleFindPeer with empty key") } + // "parse" requested peer ID from the key field target := peer.ID(req.GetKey()) // initialize the response message @@ -61,6 +62,7 @@ func (d *DHT) handlePing(ctx context.Context, remote peer.ID, req *pb.Message) ( // handleGetValue handles PUT_VALUE RPCs from remote peers. func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { + // validate incoming request -> key and record must not be empty/nil k := string(req.GetKey()) if len(k) == 0 { return nil, fmt.Errorf("no key was provided") From 619429c92739a10c564799447450adb7b969ff13 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 14:02:44 +0200 Subject: [PATCH 18/64] introduce protocol constants --- v2/backend_provider.go | 45 +++++++++++++++++++++++++++--------------- v2/config.go | 29 +++++++++++++++++++++++---- v2/config_test.go | 16 --------------- v2/dht.go | 17 +++++++++------- 4 files changed, 64 insertions(+), 43 deletions(-) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index baae5471..10022b27 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -38,11 +38,24 @@ var _ Backend = (*ProvidersBackend)(nil) // [DefaultProviderBackendConfig] to get a default configuration struct and then // modify it to your liking. type ProviderBackendConfig struct { - ProvideValidity time.Duration // specifies for how long provider records are valid - AddressTTL time.Duration // specifies for how long we will keep around provider multi addresses in the peerstore. If such multiaddresses are present we send them alongside the peer ID to the requesting peer. This prevents the necessity for a second look for the multiaddresses on the requesting peers' side. - BatchSize int // specifies how many provider record writes should be batched - CacheSize int // specifies the LRU cache size - Logger *slog.Logger // the logger to use + // ProvideValidity specifies for how long provider records are valid + ProvideValidity time.Duration + + // AddressTTL specifies for how long we will keep around provider multi + // addresses in the peerstore. If such multiaddresses are present we send + // them alongside the peer ID to the requesting peer. This prevents the + // necessity for a second look for the multiaddresses on the requesting + // peers' side. + AddressTTL time.Duration + + // BatchSize specifies how many provider record writes should be batched + BatchSize int + + // CacheSize specifies the LRU cache size + CacheSize int + + // Logger is the logger to use + Logger *slog.Logger } // DefaultProviderBackendConfig returns a default [ProvidersBackend] @@ -82,7 +95,7 @@ func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (an } if provs, ok := p.cache.Get(key); ok { - provs.setVal(addrInfo, rec.expiry) + provs.addProvider(addrInfo, rec.expiry) } p.peerstore.AddAddrs(addrInfo.ID, addrInfo.Addrs, p.cfg.AddressTTL) @@ -137,7 +150,10 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { }() now := time.Now() - out := newProviderSet() + out := &providerSet{ + providers: []peer.AddrInfo{}, + set: make(map[peer.ID]time.Time), + } for e := range q.Next() { if e.Error != nil { @@ -166,7 +182,7 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { addrInfo := p.peerstore.PeerInfo(peer.ID(binPeerID)) - out.setVal(addrInfo, rec.expiry) + out.addProvider(addrInfo, rec.expiry) } if len(out.providers) > 0 { @@ -263,19 +279,16 @@ func (e *expiryRecord) UnmarshalBinary(data []byte) error { return nil } +// A providerSet is used to gather provider information in a single struct. It +// also makes sure that the user doesn't add any duplicate peers. type providerSet struct { providers []peer.AddrInfo set map[peer.ID]time.Time } -func newProviderSet() *providerSet { - return &providerSet{ - providers: []peer.AddrInfo{}, - set: make(map[peer.ID]time.Time), - } -} - -func (ps *providerSet) setVal(addrInfo peer.AddrInfo, t time.Time) { +// addProvider adds the given address information to the providerSet. If the +// provider already exists, only the time is updated. +func (ps *providerSet) addProvider(addrInfo peer.AddrInfo, t time.Time) { _, found := ps.set[addrInfo.ID] if !found { ps.providers = append(ps.providers, addrInfo) diff --git a/v2/config.go b/v2/config.go index 57935969..05a1473c 100644 --- a/v2/config.go +++ b/v2/config.go @@ -20,6 +20,19 @@ import ( // ServiceName is used to scope incoming streams for the resource manager. const ServiceName = "libp2p.DHT" +const ( + // ProtocolIPFS is the protocol identifier for the main IPFS network. If the + // DHT is configured with this protocol, you must configure backends for + // IPNS, Public Key, and provider records (ipns, pk, and providers + // namespaces). + ProtocolIPFS protocol.ID = "/ipfs/kad/1.0.0" + + // ProtocolFilecoin is the protocol identifier for Filecoin mainnet. If this + // protocol is configured, the DHT won't automatically add support for any + // of the above record types. + ProtocolFilecoin protocol.ID = "/fil/kad/testnetnet/kad/1.0.0" +) + // tracer is an open telemetry tracing instance var tracer = otel.Tracer("go-libp2p-kad-dht") @@ -109,11 +122,18 @@ type Config struct { // The Backends field holds a map of key namespaces to their corresponding // backend implementation. For example, if we received an IPNS record, the // key will have the form "/ipns/binary_id". We will forward the handling - // of this record the corresponding backend behind the "ipns" key in this + // of this record to the corresponding backend behind the "ipns" key in this // map. A backend does record validation and handles the storage of the - // record. + // record. If this map stays empty, it will be populated with the default + // IPNS ([NewBackendIPNS]), PublicKey ([NewBackendPublicKey]), and + // Providers ([NewBackendProvider]) backends. Backends map[string]Backend + // Datastore will be used to construct the default backends. If this is nil, + // an in-memory leveldb from [InMemoryDatastore] will be used for all + // backends. + Datastore Datastore + // Logger can be used to configure a custom structured logger instance. // By default go.uber.org/zap is used (wrapped in ipfs/go-log). Logger *slog.Logger @@ -133,11 +153,12 @@ func DefaultConfig() *Config { Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), BucketSize: 20, - ProtocolID: "/ipfs/kad/1.0.0", + ProtocolID: ProtocolIPFS, RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. Backends: map[string]Backend{}, + Datastore: nil, Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), - TimeoutStreamIdle: time.Minute, // MAGIC: could be done dynamically + TimeoutStreamIdle: time.Minute, // MAGIC } } diff --git a/v2/config_test.go b/v2/config_test.go index 9049a1a3..8b67d2ba 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -56,22 +56,6 @@ func TestConfig_Validate(t *testing.T) { return c }, }, - { - name: "0 max record age", - wantErr: true, - mutate: func(c *Config) *Config { - c.MaxRecordAge = time.Duration(0) - return c - }, - }, - { - name: "negative max record age", - wantErr: true, - mutate: func(c *Config) *Config { - c.MaxRecordAge = time.Duration(-1) - return c - }, - }, { name: "0 stream idle timeout", wantErr: true, diff --git a/v2/dht.go b/v2/dht.go index a787a963..2a2f5215 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -76,9 +76,12 @@ func New(h host.Host, cfg *Config) (*DHT, error) { if len(cfg.Backends) != 0 { d.backends = cfg.Backends - } else { - dstore, err := InMemoryDatastore() - if err != nil { + } else if cfg.ProtocolID == ProtocolIPFS { + + var dstore Datastore + if cfg.Datastore != nil { + dstore = cfg.Datastore + } else if dstore, err = InMemoryDatastore(); err != nil { return nil, fmt.Errorf("new default datastore: %w", err) } @@ -90,12 +93,12 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("new provider backend: %w", err) } - vbeCfg := DefaultRecordBackendConfig() - vbeCfg.Logger = cfg.Logger + rbeCfg := DefaultRecordBackendConfig() + rbeCfg.Logger = cfg.Logger d.backends = map[string]Backend{ - "ipns": NewBackendIPNS(dstore, h.Peerstore(), vbeCfg), - "pk": NewBackendPublicKey(dstore, vbeCfg), + "ipns": NewBackendIPNS(dstore, h.Peerstore(), rbeCfg), + "pk": NewBackendPublicKey(dstore, rbeCfg), "providers": pbe, } } From 3648184cce60220564073acdd3cfba16cb155872 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 16:07:44 +0200 Subject: [PATCH 19/64] simplify key handling --- v2/backend.go | 17 ++- v2/backend_provider.go | 39 ++---- v2/backend_record.go | 65 ++++----- v2/config.go | 18 +++ v2/config_test.go | 26 ++++ v2/handlers.go | 24 ++-- v2/handlers_test.go | 292 ++++++++++++++++++++++++++++------------- 7 files changed, 308 insertions(+), 173 deletions(-) diff --git a/v2/backend.go b/v2/backend.go index a9a72da1..7bd8c068 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -20,7 +20,7 @@ const ( // A Backend implementation handles requests from other peers. Depending on the // keys root namespace, we pass the request to the corresponding backend. For -// example, the root namespace for the key "/ipns/$binary_id" is "ipns." If +// example, the root namespace for the key "/ipns/BINARY_ID" is "ipns." If // we receive a PUT_VALUE request from another peer for the above key, we will // pass the included record to the "ipns backend." This backend is responsible // for validating the record and storing or discarding it. The same applies for, @@ -37,12 +37,19 @@ const ( // example, with provider records, the return values are not assigned to the // [pb.Message.Record] field but to the [pb.Message.ProviderPeers] field. type Backend interface { - // Store stores the given value at the give key, returning the written record. - // The written record could be of a different type than the value that was - // passed into Store. + // Store stores the given value at the give key (prefixed with the namespace + // that this backend operates in). It returns the written record. The key + // that will be handed into the Store won't contain the namespace prefix. For + // example, if we receive a request for /ipns/BINARY_ID, key will be set to + // BINARY_ID. The backend implementation is free to decide how to store the + // data in the datastore. However, it makes sense to prefix the record with + // the namespace that this Backend operates in. The written record that gets + // returned from this method could have a different type than the value that + // was passed into Store, or it could be enriched with additional information + // like the timestamp when it was written. Store(ctx context.Context, key string, value any) (any, error) - // Fetch returns the record for the given key or a [ds.ErrNotFound] if it + // Fetch returns the record for the given path or a [ds.ErrNotFound] if it // wasn't found or another error if any occurred. Fetch(ctx context.Context, key string) (any, error) } diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 10022b27..1a249dac 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -13,7 +13,6 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/autobatch" dsq "github.com/ipfs/go-datastore/query" - record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-base32" @@ -81,31 +80,22 @@ func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (an return nil, fmt.Errorf("expected peer.AddrInfo value type, got: %T", value) } - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if p.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) - } - rec := expiryRecord{ expiry: time.Now(), } - if provs, ok := p.cache.Get(key); ok { + cacheKey := newDatastoreKey(p.namespace, key).String() + dsKey := newDatastoreKey(p.namespace, key, string(addrInfo.ID)) + if provs, ok := p.cache.Get(cacheKey); ok { provs.addProvider(addrInfo, rec.expiry) } p.peerstore.AddAddrs(addrInfo.ID, addrInfo.Addrs, p.cfg.AddressTTL) - dsKey := newDatastoreKey(ns, suffix, string(addrInfo.ID)) - _, found := p.gcSkip.LoadOrStore(dsKey.String(), struct{}{}) if err := p.datastore.Put(ctx, dsKey, rec.MarshalBinary()); err != nil { - p.cache.Remove(key) + p.cache.Remove(cacheKey) // if we have just added the key to the gc skip list, delete it again // if we have added it in a previous Store invocation, keep it around @@ -124,20 +114,12 @@ func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (an // and known multiaddresses for the given key. The key parameter should be of // the form "/providers/$binary_multihash". func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if p.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", p.namespace, ns) - } + qKey := newDatastoreKey(p.namespace, key) - if cached, ok := p.cache.Get(key); ok { + if cached, ok := p.cache.Get(qKey.String()); ok { return cached, nil } - qKey := newDatastoreKey(ns, suffix) q, err := p.datastore.Query(ctx, dsq.Query{Prefix: qKey.String()}) if err != nil { return nil, err @@ -186,7 +168,7 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { } if len(out.providers) > 0 { - p.cache.Add(key, *out) + p.cache.Add(qKey.String(), *out) } return out, nil @@ -297,6 +279,13 @@ func (ps *providerSet) addProvider(addrInfo peer.AddrInfo, t time.Time) { ps.set[addrInfo.ID] = t } +// newDatastoreKey assembles a datastore for the given namespace and set of +// binary strings. For example, the IPNS record keys have the format: +// "/ipns/BINARY_ID" (see [Routing Record]). To construct a datastore key this +// function base32-encodes the BINARY_ID (and any additional path components) +// and joins the parts together separated by forward slashes. +// +// [Routing Record]: https://specs.ipfs.tech/ipns/ipns-record/#routing-record func newDatastoreKey(namespace string, binStrs ...string) ds.Key { elems := make([]string, len(binStrs)+1) elems[0] = namespace diff --git a/v2/backend_record.go b/v2/backend_record.go index 3c0a976d..072afaf9 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -34,38 +34,24 @@ func DefaultRecordBackendConfig() *RecordBackendConfig { } } -func (v *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { +func (r *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { rec, ok := value.(*recpb.Record) if !ok { return nil, fmt.Errorf("expected *recpb.Record value type, got: %T", value) } - if key != string(rec.GetKey()) { - return nil, fmt.Errorf("key doesn't match record key") - } - - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if v.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) - } - - dsKey := newDatastoreKey(v.namespace, suffix) - - if err := v.validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil { + if err := r.validator.Validate(r.routingKey(key), rec.GetValue()); err != nil { return nil, fmt.Errorf("put bad record: %w", err) } - txn, err := v.datastore.NewTransaction(ctx, false) + txn, err := r.datastore.NewTransaction(ctx, false) if err != nil { return nil, fmt.Errorf("new transaction: %w", err) } defer txn.Discard(ctx) // discard is a no-op if txn was committed beforehand - shouldReplace, err := v.shouldReplaceExistingRecord(ctx, txn, dsKey, rec) + dsKey := newDatastoreKey(r.namespace, key) + shouldReplace, err := r.shouldReplaceExistingRecord(ctx, txn, dsKey, rec.GetValue()) if err != nil { return nil, fmt.Errorf("checking datastore for better record: %w", err) } else if !shouldReplace { @@ -91,20 +77,11 @@ func (v *RecordBackend) Store(ctx context.Context, key string, value any) (any, return rec, nil } -func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { - ns, suffix, err := record.SplitKey(key) // get namespace (prefix of the key) - if err != nil { - return nil, fmt.Errorf("invalid key %s: %w", key, err) - } - - if v.namespace != ns { - return nil, fmt.Errorf("expected namespace %s, got %s", v.namespace, ns) - } - - dsKey := newDatastoreKey(v.namespace, suffix) +func (r *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { + dsKey := newDatastoreKey(r.namespace, key) // fetch record from the datastore for the requested key - buf, err := v.datastore.Get(ctx, dsKey) + buf, err := r.datastore.Get(ctx, dsKey) if err != nil { return nil, err } @@ -115,8 +92,8 @@ func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { if err != nil { // we have a corrupt record in the datastore -> delete it and pretend // that we don't know about it - if err := v.datastore.Delete(ctx, dsKey); err != nil { - v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) + if err := r.datastore.Delete(ctx, dsKey); err != nil { + r.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting corrupt record from datastore", slog.String("err", err.Error())) } return nil, nil @@ -124,15 +101,15 @@ func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { // validate that we don't serve stale records. receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) - if err != nil || time.Since(receivedAt) > v.cfg.MaxRecordAge { + if err != nil || time.Since(receivedAt) > r.cfg.MaxRecordAge { errStr := "" if err != nil { errStr = err.Error() } - v.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) - if err = v.datastore.Delete(ctx, dsKey); err != nil { - v.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) + r.log.LogAttrs(ctx, slog.LevelWarn, "Invalid received timestamp on stored record", slog.String("err", errStr), slog.Duration("age", time.Since(receivedAt))) + if err = r.datastore.Delete(ctx, dsKey); err != nil { + r.log.LogAttrs(ctx, slog.LevelWarn, "Failed deleting bad record from datastore", slog.String("err", err.Error())) } return nil, nil } @@ -150,7 +127,7 @@ func (v *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { // incoming one is "better" (e.g., just newer), this function returns true. // If unmarshalling or validation fails, this function (alongside an error) also // returns true because the existing record should be replaced. -func (v *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, newRec *recpb.Record) (bool, error) { +func (r *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, value []byte) (bool, error) { ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") defer span.End() @@ -166,12 +143,12 @@ func (v *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds. return true, nil } - if err := v.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { + if err := r.validator.Validate(string(existingRec.GetKey()), existingRec.GetValue()); err != nil { return true, nil } - records := [][]byte{newRec.GetValue(), existingRec.GetValue()} - i, err := v.validator.Select(string(newRec.GetKey()), records) + records := [][]byte{value, existingRec.GetValue()} + i, err := r.validator.Select(dsKey.String(), records) if err != nil { return false, fmt.Errorf("record selection: %w", err) } else if i != 0 { @@ -180,3 +157,9 @@ func (v *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds. return true, nil } + +// routingKey returns the routing key for the given key by prefixing it with +// the namespace. +func (r *RecordBackend) routingKey(key string) string { + return fmt.Sprintf("/%s/%s", r.namespace, key) +} diff --git a/v2/config.go b/v2/config.go index 05a1473c..dff18d69 100644 --- a/v2/config.go +++ b/v2/config.go @@ -212,5 +212,23 @@ func (c *Config) Validate() error { return fmt.Errorf("stream idle timeout must be a positive duration") } + if c.ProtocolID == ProtocolIPFS && len(c.Backends) != 0 { + if len(c.Backends) != 3 { + return fmt.Errorf("ipfs protocol requires exactly three backends") + } + + if _, found := c.Backends[namespaceIPNS]; !found { + return fmt.Errorf("ipfs protocol requires an IPNS backend") + } + + if _, found := c.Backends[namespacePublicKey]; !found { + return fmt.Errorf("ipfs protocol requires a public key backend") + } + + if _, found := c.Backends[namespaceProviders]; !found { + return fmt.Errorf("ipfs protocol requires a providers backend") + } + } + return nil } diff --git a/v2/config_test.go b/v2/config_test.go index 8b67d2ba..14f46faf 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -72,6 +72,32 @@ func TestConfig_Validate(t *testing.T) { return c }, }, + { + // When we're using the IPFS protocol, we always require support + // for ipns, pk, and provider records. + // If the Backends map is empty and the IPFS protocol is configured, + // we automatically populate the DHT backends for these record + // types. + name: "incompatible backends with ipfs protocol", + wantErr: true, + mutate: func(c *Config) *Config { + c.ProtocolID = ProtocolIPFS + c.Backends["another"] = &RecordBackend{} + return c + }, + }, + { + name: "additional backends for ipfs protocol", + wantErr: true, + mutate: func(c *Config) *Config { + c.ProtocolID = ProtocolIPFS + c.Backends[namespaceProviders] = &RecordBackend{} + c.Backends[namespaceIPNS] = &RecordBackend{} + c.Backends[namespacePublicKey] = &RecordBackend{} + c.Backends["another"] = &RecordBackend{} + return c + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/v2/handlers.go b/v2/handlers.go index 63632746..fbedd259 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -1,6 +1,7 @@ package dht import ( + "bytes" "context" "errors" "fmt" @@ -73,18 +74,22 @@ func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("nil record") } - // key is /$namespace/$binary_id - ns, _, err := record.SplitKey(k) // get namespace (prefix of the key) - if err != nil { + if !bytes.Equal(req.GetKey(), rec.GetKey()) { + return nil, fmt.Errorf("key doesn't match record key") + } + + // key is /$namespace/BINARY_ID + ns, path, err := record.SplitKey(k) // get namespace (prefix of the key) + if err != nil || len(path) == 0 { return nil, fmt.Errorf("invalid key %s: %w", k, err) } backend, found := d.backends[ns] if !found { - return nil, fmt.Errorf("unsupported key namespace: %s", ns) + return nil, fmt.Errorf("unsupported record type: %s", ns) } - _, err = backend.Store(ctx, k, rec) + _, err = backend.Store(ctx, path, rec) return nil, err } @@ -103,8 +108,8 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(req.GetKey())), } - ns, _, err := record.SplitKey(k) // get namespace (prefix of the key) - if err != nil { + ns, path, err := record.SplitKey(k) // get namespace (prefix of the key) + if err != nil || path == "" { return nil, fmt.Errorf("invalid key %s: %w", k, err) } @@ -113,7 +118,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("unsupported record type: %s", ns) } - fetched, err := backend.Fetch(ctx, k) + fetched, err := backend.Fetch(ctx, path) if err != nil { if errors.Is(err, ds.ErrNotFound) { return resp, nil @@ -128,6 +133,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag resp.Record = rec return resp, nil } + // the returned value wasn't a record pset, ok := fetched.(*providerSet) if ok { @@ -191,7 +197,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) } - fetched, err := backend.Fetch(ctx, fmt.Sprintf("/%s/%s", namespaceProviders, req.GetKey())) + fetched, err := backend.Fetch(ctx, string(req.GetKey())) if err != nil { return nil, fmt.Errorf("fetch providers from datastore: %w", err) } diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 5b56e826..6c900f01 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -6,7 +6,6 @@ import ( "math/rand" "reflect" "strconv" - "strings" "sync" "testing" "time" @@ -17,6 +16,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" @@ -25,10 +25,6 @@ import ( "github.com/stretchr/testify/require" ) -const ( - testPath = path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") -) - var rng = rand.New(rand.NewSource(1337)) func newTestDHT(t testing.TB) *DHT { @@ -72,17 +68,6 @@ func newIdentity(t testing.TB) (peer.ID, crypto.PrivKey) { return id, priv } -func mustUnmarshalIpnsRecord(t *testing.T, data []byte) *ipns.Record { - r := &recpb.Record{} - err := r.Unmarshal(data) - require.NoError(t, err) - - rec, err := ipns.UnmarshalRecord(r.Value) - require.NoError(t, err) - - return rec -} - func fillRoutingTable(t testing.TB, d *DHT) { // 250 is a common number of peers to have in the routing table for i := 0; i < 250; i++ { @@ -437,6 +422,8 @@ func BenchmarkDHT_handlePing(b *testing.B) { } func newPutIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { + testPath := path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") + rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl) require.NoError(t, err) @@ -507,22 +494,27 @@ func BenchmarkDHT_handlePutValue_single_peer(b *testing.B) { } func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { + ctx := context.Background() + + // init new DHT d := newTestDHT(t) + // generate new identity for the peer that issues the request remote, priv := newIdentity(t) // expired record req := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + ns, suffix, err := record.SplitKey(string(req.Key)) + require.NoError(t, err) - ctx := context.Background() - _, err := d.backends[namespaceIPNS].Fetch(ctx, string(req.Key)) + _, err = d.backends[ns].Fetch(ctx, suffix) require.ErrorIs(t, err, ds.ErrNotFound) cloned := proto.Clone(req).(*pb.Message) _, err = d.handlePutValue(ctx, remote, cloned) require.NoError(t, err) - dat, err := d.backends[namespaceIPNS].Fetch(ctx, string(req.Key)) + dat, err := d.backends[ns].Fetch(ctx, suffix) require.NoError(t, err) r, ok := dat.(*recpb.Record) @@ -536,36 +528,45 @@ func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { assert.True(t, reflect.DeepEqual(r, req.Record)) } -func TestDHT_handlePutValue_nil_record(t *testing.T) { +func TestDHT_handlePutValue_nil_records(t *testing.T) { d := newTestDHT(t) - req := &pb.Message{ - Type: pb.Message_PUT_VALUE, - Key: []byte("/ipns/random-key"), - Record: nil, // nil record - } + for _, ns := range []string{namespaceIPNS, namespacePublicKey} { + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte(fmt.Sprintf("/%s/random-key", ns)), + Record: nil, // nil record + } - resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) - assert.Error(t, err) - assert.Nil(t, resp) - assert.ErrorContains(t, err, "nil record") + resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "nil record") + } } func TestDHT_handlePutValue_record_key_mismatch(t *testing.T) { d := newTestDHT(t) - req := &pb.Message{ - Type: pb.Message_PUT_VALUE, - Key: []byte("/ipns/key-1"), - Record: &recpb.Record{ - Key: []byte("/ipns/key-2"), - }, - } + for _, ns := range []string{namespaceIPNS, namespacePublicKey} { + t.Run(ns, func(t *testing.T) { + key1 := []byte(fmt.Sprintf("/%s/key-1", ns)) + key2 := []byte(fmt.Sprintf("/%s/key-2", ns)) + + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: key1, + Record: &recpb.Record{ + Key: key2, + }, + } - resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) - assert.Error(t, err) - assert.Nil(t, resp) - assert.ErrorContains(t, err, "key doesn't match record key") + resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "key doesn't match record key") + }) + } } func TestDHT_handlePutValue_bad_ipns_record(t *testing.T) { @@ -591,7 +592,6 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { worseReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) for i, req := range []*pb.Message{goodReq, worseReq} { - resp, err := d.handlePutValue(context.Background(), remote, req) switch i { case 0: @@ -615,8 +615,6 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { remote, priv := newIdentity(t) - ipnsKey := ipns.NameFromPeer(remote).RoutingKey() - for i := 0; i < 100; i++ { req1 := newPutIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour) @@ -637,7 +635,10 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { }() wg.Wait() - val, err := d.backends[namespaceIPNS].Fetch(context.Background(), string(ipnsKey)) + // an IPNS record key has the form /ipns/BINARY_ID where binary_id + // is just the peer ID of the peer that belongs to the IPNS record. + // Therefore, we can just string-cast the remote peer.ID here. + val, err := d.backends[namespaceIPNS].Fetch(context.Background(), string(remote)) require.NoError(t, err) r, ok := val.(*recpb.Record) @@ -661,27 +662,76 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) req := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + dsKey := newDatastoreKey(namespaceIPNS, string(remote)) // string(remote) is the key suffix + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) require.True(t, ok) - err := rbe.datastore.Put(context.Background(), ds.NewKey(string(req.GetKey())), []byte("corrupt-record")) + err := rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-record")) require.NoError(t, err) // put the correct record through handler _, err = d.handlePutValue(context.Background(), remote, req) require.NoError(t, err) - // check if the corrupt record was overwritten - val, err := d.backends[namespaceIPNS].Fetch(context.Background(), string(req.GetKey())) + value, err := rbe.datastore.Get(context.Background(), dsKey) require.NoError(t, err) - r, ok := val.(*recpb.Record) - require.True(t, ok) + r := &recpb.Record{} + require.NoError(t, r.Unmarshal(value)) _, err = ipns.UnmarshalRecord(r.Value) require.NoError(t, err) } +func TestDHT_handlePutValue_malformed_key(t *testing.T) { + d := newTestDHT(t) + + keys := []string{ + "malformed-key", + " ", + "/ipns/", + "/pk/", + "/ipns", + "/pk", + "ipns/", + "pk/", + } + for _, k := range keys { + t.Run("malformed key: "+k, func(t *testing.T) { + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte(k), + Record: &recpb.Record{ + Key: []byte(k), + }, + } + + resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "invalid key") + }) + } +} + +func TestDHT_handlePutValue_unknown_backend(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte("/other-namespace/record-key"), + Record: &recpb.Record{ + Key: []byte("/other-namespace/record-key"), + }, + } + + resp, err := d.handlePutValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "unsupported record type") +} + func BenchmarkDHT_handleGetValue(b *testing.B) { d := newTestDHT(b) @@ -701,7 +751,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { data, err := putReq.Record.Marshal() require.NoError(b, err) - dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + dsKey := newDatastoreKey(namespaceIPNS, string(pid)) err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(b, err) @@ -731,17 +781,17 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { fillRoutingTable(t, d) - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) - remote, priv := newIdentity(t) putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + data, err := putReq.Record.Marshal() require.NoError(t, err) - dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + dsKey := newDatastoreKey(namespaceIPNS, string(remote)) err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(t, err) @@ -766,19 +816,23 @@ func TestDHT_handleGetValue_record_not_found(t *testing.T) { fillRoutingTable(t, d) - req := &pb.Message{ - Type: pb.Message_GET_VALUE, - Key: []byte("/ipns/unknown-record-key"), - } + for _, ns := range []string{namespaceIPNS, namespacePublicKey} { + t.Run(ns, func(t *testing.T) { + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte(fmt.Sprintf("/%s/unknown-record-key", ns)), + } - resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) - require.NoError(t, err) + resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) + require.NoError(t, err) - assert.Equal(t, pb.Message_GET_VALUE, resp.Type) - assert.Equal(t, req.Key, resp.Key) - assert.Nil(t, resp.Record) - assert.Len(t, resp.CloserPeers, 20) - assert.Len(t, resp.ProviderPeers, 0) + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + assert.Nil(t, resp.Record) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) + }) + } } func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { @@ -786,49 +840,52 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { fillRoutingTable(t, d) - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) + for _, ns := range []string{namespaceIPNS, namespacePublicKey} { + t.Run(ns, func(t *testing.T) { + rbe, ok := d.backends[ns].(*RecordBackend) + require.True(t, ok) - key := []byte("/ipns/record-key") + key := []byte(fmt.Sprintf("/%s/record-key", ns)) - dsKey := newDatastoreKey(namespaceIPNS, "record-key") + dsKey := newDatastoreKey(ns, "record-key") + err := rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-data")) + require.NoError(t, err) - err := rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-data")) - require.NoError(t, err) - - req := &pb.Message{ - Type: pb.Message_GET_VALUE, - Key: key, - } + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: key, + } - resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) - require.NoError(t, err) + resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) + require.NoError(t, err) - assert.Equal(t, pb.Message_GET_VALUE, resp.Type) - assert.Equal(t, req.Key, resp.Key) - assert.Nil(t, resp.Record) - assert.Len(t, resp.CloserPeers, 20) - assert.Len(t, resp.ProviderPeers, 0) + assert.Equal(t, pb.Message_GET_VALUE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + assert.Nil(t, resp.Record) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) - // check that the record was deleted from the datastore - data, err := rbe.datastore.Get(context.Background(), dsKey) - assert.ErrorIs(t, err, ds.ErrNotFound) - assert.Len(t, data, 0) + // check that the record was deleted from the datastore + data, err := rbe.datastore.Get(context.Background(), dsKey) + assert.ErrorIs(t, err, ds.ErrNotFound) + assert.Len(t, data, 0) + }) + } } -func TestDHT_handleGetValue_max_age_exceeded_record_in_datastore(t *testing.T) { +func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { d := newTestDHT(t) fillRoutingTable(t, d) - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) - remote, priv := newIdentity(t) putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) - dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(t, ok) + + dsKey := newDatastoreKey(namespaceIPNS, string(remote)) data, err := putReq.Record.Marshal() require.NoError(t, err) @@ -874,7 +931,7 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { data, err := putReq.Record.Marshal() require.NoError(t, err) - dsKey := newDatastoreKey(namespaceIPNS, strings.TrimPrefix(string(putReq.GetKey()), fmt.Sprintf("/%s/", namespaceIPNS))) + dsKey := newDatastoreKey(namespaceIPNS, string(remote)) err = rbe.datastore.Put(context.Background(), dsKey, data) require.NoError(t, err) @@ -894,3 +951,52 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { assert.Len(t, resp.CloserPeers, 20) assert.Len(t, resp.ProviderPeers, 0) } + +func TestDHT_handleGetValue_malformed_key(t *testing.T) { + d := newTestDHT(t) + + keys := []string{ + "malformed-key", + " ", + "/ipns/", + "/pk/", + "/ipns", + "/pk", + "ipns/", + "pk/", + } + for _, k := range keys { + t.Run("malformed key: "+k, func(t *testing.T) { + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte(k), + } + + resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "invalid key") + }) + } +} + +func TestDHT_handleGetValue_unknown_backend(t *testing.T) { + d := newTestDHT(t) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte("/other-namespace/record-key"), + Record: &recpb.Record{ + Key: []byte("/other-namespace/record-key"), + }, + } + + resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) + assert.Error(t, err) + assert.Nil(t, resp) + assert.ErrorContains(t, err, "unsupported record type") +} + +func TestDHT_handleGetValue_supports_providers(t *testing.T) { + t.Skip("TODO") +} From 015617e4fe37c5951e74ecdfe966d2a63a167849 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 17:06:27 +0200 Subject: [PATCH 20/64] improve documentation --- v2/backend.go | 43 ++++++++++++---------- v2/backend_provider.go | 4 +- v2/config.go | 8 +++- v2/handlers.go | 2 +- v2/handlers_test.go | 2 +- v2/pb/dht.aux.go | 14 +++++++ v2/pb/{message_test.go => dht.aux_test.go} | 0 v2/pb/message.go | 19 ---------- 8 files changed, 47 insertions(+), 45 deletions(-) rename v2/pb/{message_test.go => dht.aux_test.go} (100%) delete mode 100644 v2/pb/message.go diff --git a/v2/backend.go b/v2/backend.go index 7bd8c068..6f3cd57e 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -18,15 +18,16 @@ const ( namespaceProviders = "providers" ) -// A Backend implementation handles requests from other peers. Depending on the -// keys root namespace, we pass the request to the corresponding backend. For -// example, the root namespace for the key "/ipns/BINARY_ID" is "ipns." If -// we receive a PUT_VALUE request from another peer for the above key, we will -// pass the included record to the "ipns backend." This backend is responsible -// for validating the record and storing or discarding it. The same applies for, -// e.g., "/providers/..." keys which we will receive for ADD_PROVIDER and -// GET_PROVIDERS requests. The [ProvidersBackend] will take care of storing the -// records so that they can be retrieved efficiently via Fetch. +// A Backend implementation handles requests for certain record types from other +// peers. A Backend always belongs to a certain namespace. In this case a +// namespace is equivalent to a type of record that this DHT supports. In the +// case of IPFS, the DHT supports the "ipns", "pk", and "providers" namespaces +// and therefore uses three different backends. Depending on the request's key +// the DHT invokes the corresponding backend Store and Fetch methods. A key +// has the structure "/$namespace/$path". The DHT parses uses the $namespace +// part to decide which Backend to use. The $path part is then passed to the +// Backend's Store and Fetch methods as the "key" parameter. Backends for +// different namespace may or may not operate on the same underlying datastore. // // To support additional record types, users would implement this Backend // interface and register it for a custom namespace with the [DHT] [Config] by @@ -36,17 +37,18 @@ const ( // that type because provider records are handled slightly differently. For // example, with provider records, the return values are not assigned to the // [pb.Message.Record] field but to the [pb.Message.ProviderPeers] field. +// +// This repository defines default Backends for the "ipns", "pk", and +// "providers" namespaces. They can be instantiated with [NewBackendIPNS], +// [NewBackendPublicKey], and [NewBackendProvider] respectively. type Backend interface { - // Store stores the given value at the give key (prefixed with the namespace - // that this backend operates in). It returns the written record. The key + // Store stores the given value such that it can be retrieved via Fetch + // with the same key parameter. It returns the written record. The key // that will be handed into the Store won't contain the namespace prefix. For - // example, if we receive a request for /ipns/BINARY_ID, key will be set to - // BINARY_ID. The backend implementation is free to decide how to store the + // example, if we receive a request for /ipns/$binary_id, key will be set to + // $binary_id. The backend implementation is free to decide how to store the // data in the datastore. However, it makes sense to prefix the record with - // the namespace that this Backend operates in. The written record that gets - // returned from this method could have a different type than the value that - // was passed into Store, or it could be enriched with additional information - // like the timestamp when it was written. + // the namespace that this Backend operates in. Store(ctx context.Context, key string, value any) (any, error) // Fetch returns the record for the given path or a [ds.ErrNotFound] if it @@ -92,9 +94,10 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBa // NewBackendProvider initializes a new backend for the "providers" namespace // that can store and fetch provider records from the given datastore. The -// values passed into Store must be of type [peer.AddrInfo]. The values returned -// from Fetch will be of type [providerSet] (unexported). The cfg parameter can -// be nil, in which case the [DefaultProviderBackendConfig] will be used. +// values passed into [ProvidersBackend.Store] must be of type [peer.AddrInfo]. +// The values returned from [ProvidersBackend.Fetch] will be of type +// [*providerSet] (unexported). The cfg parameter can be nil, in which case the +// [DefaultProviderBackendConfig] will be used. func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProviderBackendConfig) (*ProvidersBackend, error) { if cfg == nil { cfg = DefaultProviderBackendConfig() diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 1a249dac..af1035c4 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -281,8 +281,8 @@ func (ps *providerSet) addProvider(addrInfo peer.AddrInfo, t time.Time) { // newDatastoreKey assembles a datastore for the given namespace and set of // binary strings. For example, the IPNS record keys have the format: -// "/ipns/BINARY_ID" (see [Routing Record]). To construct a datastore key this -// function base32-encodes the BINARY_ID (and any additional path components) +// "/ipns/$binary_id" (see [Routing Record]). To construct a datastore key this +// function base32-encodes the $binary_id (and any additional path components) // and joins the parts together separated by forward slashes. // // [Routing Record]: https://specs.ipfs.tech/ipns/ipns-record/#routing-record diff --git a/v2/config.go b/v2/config.go index dff18d69..83951d9d 100644 --- a/v2/config.go +++ b/v2/config.go @@ -24,7 +24,7 @@ const ( // ProtocolIPFS is the protocol identifier for the main IPFS network. If the // DHT is configured with this protocol, you must configure backends for // IPNS, Public Key, and provider records (ipns, pk, and providers - // namespaces). + // namespaces). Configuration validation will fail if backends are missing. ProtocolIPFS protocol.ID = "/ipfs/kad/1.0.0" // ProtocolFilecoin is the protocol identifier for Filecoin mainnet. If this @@ -121,7 +121,7 @@ type Config struct { // The Backends field holds a map of key namespaces to their corresponding // backend implementation. For example, if we received an IPNS record, the - // key will have the form "/ipns/binary_id". We will forward the handling + // key will have the form "/ipns/$binary_id". We will forward the handling // of this record to the corresponding backend behind the "ipns" key in this // map. A backend does record validation and handles the storage of the // record. If this map stays empty, it will be populated with the default @@ -132,6 +132,10 @@ type Config struct { // Datastore will be used to construct the default backends. If this is nil, // an in-memory leveldb from [InMemoryDatastore] will be used for all // backends. + // If you want to use individual datastores per backend, you will need to + // construct them individually and register them with the above Backends + // map. Note that if you configure the DHT to use [ProtocolIPFS] it is + // required to register backends for the ipns, pk, and providers namespaces. Datastore Datastore // Logger can be used to configure a custom structured logger instance. diff --git a/v2/handlers.go b/v2/handlers.go index fbedd259..3d021031 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -78,7 +78,7 @@ func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("key doesn't match record key") } - // key is /$namespace/BINARY_ID + // key is /$namespace/$binary_id ns, path, err := record.SplitKey(k) // get namespace (prefix of the key) if err != nil || len(path) == 0 { return nil, fmt.Errorf("invalid key %s: %w", k, err) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 6c900f01..04583415 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -635,7 +635,7 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { }() wg.Wait() - // an IPNS record key has the form /ipns/BINARY_ID where binary_id + // an IPNS record key has the form /ipns/$binary_id where $binary_id // is just the peer ID of the peer that belongs to the IPNS record. // Therefore, we can just string-cast the remote peer.ID here. val, err := d.backends[namespaceIPNS].Fetch(context.Background(), string(remote)) diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index 3f29cd45..abc589c1 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -7,6 +7,20 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +// FromAddrInfo constructs a Message_Peer from the given peer.AddrInfo +func FromAddrInfo(p peer.AddrInfo) Message_Peer { + mp := Message_Peer{ + Id: byteString(p.ID), + Addrs: make([][]byte, len(p.Addrs)), + } + + for i, maddr := range p.Addrs { + mp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. + } + + return mp +} + // ContainsCloserPeer returns true if the provided peer ID is among the // list of closer peers contained in this message. func (m *Message) ContainsCloserPeer(pid peer.ID) bool { diff --git a/v2/pb/message_test.go b/v2/pb/dht.aux_test.go similarity index 100% rename from v2/pb/message_test.go rename to v2/pb/dht.aux_test.go diff --git a/v2/pb/message.go b/v2/pb/message.go deleted file mode 100644 index ab1db7f1..00000000 --- a/v2/pb/message.go +++ /dev/null @@ -1,19 +0,0 @@ -package dht_pb - -import ( - "github.com/libp2p/go-libp2p/core/peer" -) - -// FromAddrInfo constructs a Message_Peer from the given peer.AddrInfo -func FromAddrInfo(p peer.AddrInfo) Message_Peer { - mp := Message_Peer{ - Id: byteString(p.ID), - Addrs: make([][]byte, len(p.Addrs)), - } - - for i, maddr := range p.Addrs { - mp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. - } - - return mp -} From 38c2d32c5742b727794759f2251b26410f9e6c1f Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 18:58:53 +0200 Subject: [PATCH 21/64] add add providers tests --- v2/backend.go | 2 +- v2/backend_provider.go | 94 ++++++++--- v2/dht.go | 8 +- v2/handlers.go | 25 +-- v2/handlers_test.go | 348 ++++++++++++++++++++++++++++++++++++++++- 5 files changed, 437 insertions(+), 40 deletions(-) diff --git a/v2/backend.go b/v2/backend.go index 6f3cd57e..d831b0bb 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -98,7 +98,7 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBa // The values returned from [ProvidersBackend.Fetch] will be of type // [*providerSet] (unexported). The cfg parameter can be nil, in which case the // [DefaultProviderBackendConfig] will be used. -func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProviderBackendConfig) (*ProvidersBackend, error) { +func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProvidersBackendConfig) (*ProvidersBackend, error) { if cfg == nil { cfg = DefaultProviderBackendConfig() } diff --git a/v2/backend_provider.go b/v2/backend_provider.go index af1035c4..f4091dc9 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -7,6 +7,7 @@ import ( "path" "strings" "sync" + "sync/atomic" "time" lru "github.com/hashicorp/golang-lru/v2" @@ -22,21 +23,42 @@ import ( // ProvidersBackend implements the [Backend] interface and handles provider // record requests for the "/providers/" namespace. type ProvidersBackend struct { - namespace string // the namespace string, usually set to namespaceProviders ("providers") - cfg *ProviderBackendConfig // default is given by DefaultProviderBackendConfig - log *slog.Logger // convenience accessor of cfg.Logger - cache *lru.Cache[string, providerSet] // LRU cache for frequently requested records. TODO: is that really so effective? The cache size is quite low either. - peerstore peerstore.Peerstore // reference to the peer store to store and fetch peer multiaddresses from (we don't save them in the datastore) - datastore *autobatch.Datastore // the datastore where we save the peer IDs providing a certain multihash - gcSkip sync.Map // a sync map that marks records as to-be-skipped by the garbage collection process + // namespace holds the namespace string - usually + // this is set to namespaceProviders ("providers") + namespace string + + // cfg is set to DefaultProviderBackendConfig by default + cfg *ProvidersBackendConfig + + // log is convenience accessor of cfg.Logger + log *slog.Logger + + // cache is a LRU cache for frequently requested records. It is populated + // when peers request a record and pruned during garbage collection. + // TODO: is that really so effective? The cache size is quite low either. + cache *lru.Cache[string, providerSet] + + // peerstore holds a reference to the peer store to store and fetch peer + // multiaddresses from (we don't save them in the datastore). + peerstore peerstore.Peerstore + + // datastore is where we save the peer IDs providing a certain multihash + datastore *autobatch.Datastore + + // gcSkip is a sync map that marks records as to-be-skipped by the garbage + // collection process. TODO: this is a sub-optimal pattern. + gcSkip sync.Map + + // gcActive indicates whether garbage collection is scheduled + gcActive atomic.Bool } var _ Backend = (*ProvidersBackend)(nil) -// ProviderBackendConfig is used to construct a [ProvidersBackend]. Use +// ProvidersBackendConfig is used to construct a [ProvidersBackend]. Use // [DefaultProviderBackendConfig] to get a default configuration struct and then // modify it to your liking. -type ProviderBackendConfig struct { +type ProvidersBackendConfig struct { // ProvideValidity specifies for how long provider records are valid ProvideValidity time.Duration @@ -53,6 +75,9 @@ type ProviderBackendConfig struct { // CacheSize specifies the LRU cache size CacheSize int + // GCInterval defines how frequently garbage collection should run + GCInterval time.Duration + // Logger is the logger to use Logger *slog.Logger } @@ -61,12 +86,13 @@ type ProviderBackendConfig struct { // configuration. Use this as a starting point and modify it. If a nil // configuration is passed to [NewBackendProvider], this default configuration // here is used. -func DefaultProviderBackendConfig() *ProviderBackendConfig { - return &ProviderBackendConfig{ +func DefaultProviderBackendConfig() *ProvidersBackendConfig { + return &ProvidersBackendConfig{ ProvideValidity: time.Hour * 48, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md AddressTTL: 24 * time.Hour, - BatchSize: 256, // MAGIC - CacheSize: 256, // MAGIC + BatchSize: 256, // MAGIC + CacheSize: 256, // MAGIC + GCInterval: time.Hour, // MAGIC Logger: slog.Default(), } } @@ -97,7 +123,7 @@ func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (an if err := p.datastore.Put(ctx, dsKey, rec.MarshalBinary()); err != nil { p.cache.Remove(cacheKey) - // if we have just added the key to the gc skip list, delete it again + // if we have just added the key to the collectGarbage skip list, delete it again // if we have added it in a previous Store invocation, keep it around if !found { p.gcSkip.Delete(dsKey.String()) @@ -174,14 +200,46 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { return out, nil } -// CollectGarbage sweeps through the datastore and deletes all provider records +// StartGarbageCollection starts the garbage collection loop. The garbage +// collection interval can be configured with [ProvidersBackendConfig.GCInterval]. +func (p *ProvidersBackend) StartGarbageCollection(ctx context.Context) { + if p.gcActive.Swap(true) { + p.log.Info("Provider backend's garbage collection is already running") + return + } + + p.log.Info("Provider backend's started garbage collection schedule") + + ticker := time.NewTicker(p.cfg.GCInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + p.gcActive.Store(false) + p.log.Info("Provider backend's garbage collection stopped") + return + case <-ticker.C: + p.collectGarbage(ctx) + } + } +} + +// collectGarbage sweeps through the datastore and deletes all provider records // that have expired. A record is expired if the -// [ProviderBackendConfig].ProvideValidity is exceeded. -func (p *ProvidersBackend) CollectGarbage(ctx context.Context) { +// [ProvidersBackendConfig].ProvideValidity is exceeded. +func (p *ProvidersBackend) collectGarbage(ctx context.Context) { + p.log.Info("Provider backend starting garbage collection...") + defer p.log.Info("Provider backend finished garbage collection!") + // Faster to purge than garbage collecting p.cache.Purge() - p.gcSkip = sync.Map{} // TODO: racy + // erase map + p.gcSkip.Range(func(key interface{}, value interface{}) bool { + p.gcSkip.Delete(key) + return true + }) // Now, kick off a GC of the datastore. q, err := p.datastore.Query(ctx, dsq.Query{Prefix: p.namespace}) diff --git a/v2/dht.go b/v2/dht.go index 2a2f5215..c5cd901d 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -138,13 +138,7 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } - //// If the user hasn't configured a custom datastore, the responsibility is - //// on us to properly clean up after ourselves. - //if d.cfg.Datastore == nil { - // if err := d.ds.Close(); err != nil { - // d.log.With("err", err).Debug("failed closing default datastore") - // } - //} + // TODO clean up backends // kill all active streams using the DHT protocol. for _, c := range d.host.Network().Conns() { diff --git a/v2/handlers.go b/v2/handlers.go index 3d021031..16e08a7f 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -155,26 +155,31 @@ func (d *DHT) handleAddProvider(ctx context.Context, remote peer.ID, req *pb.Mes return nil, fmt.Errorf("key size too large") } else if len(k) == 0 { return nil, fmt.Errorf("key is empty") + } else if len(req.GetProviderPeers()) == 0 { + return nil, fmt.Errorf("no provider peers given") } - backend, ok := d.cfg.Backends[namespaceProviders] - if !ok { - return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) - } - + var addrInfos []peer.AddrInfo for _, addrInfo := range req.ProviderAddrInfos() { addrInfo := addrInfo // TODO: remove after go.mod was updated to go 1.21 if addrInfo.ID != remote { - d.log.Debug("remote attempted to store provider record for other peer", "remote", remote, "other", addrInfo.ID) - continue + return nil, fmt.Errorf("attempted to store provider record for other peer %s", addrInfo.ID) } if len(addrInfo.Addrs) == 0 { - d.log.Debug("no valid addresses for provider", "remote", addrInfo.ID) - continue + return nil, fmt.Errorf("no addresses for provider") } + addrInfos = append(addrInfos, addrInfo) + } + + backend, ok := d.backends[namespaceProviders] + if !ok { + return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) + } + + for _, addrInfo := range addrInfos { if _, err := backend.Store(ctx, k, addrInfo); err != nil { return nil, fmt.Errorf("storing provider record: %w", err) } @@ -192,7 +197,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("handleGetProviders key is empty") } - backend, ok := d.cfg.Backends[namespaceProviders] + backend, ok := d.backends[namespaceProviders] if !ok { return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) } diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 04583415..d8ee390b 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -986,9 +986,6 @@ func TestDHT_handleGetValue_unknown_backend(t *testing.T) { req := &pb.Message{ Type: pb.Message_GET_VALUE, Key: []byte("/other-namespace/record-key"), - Record: &recpb.Record{ - Key: []byte("/other-namespace/record-key"), - }, } resp, err := d.handleGetValue(context.Background(), newPeerID(t), req) @@ -998,5 +995,348 @@ func TestDHT_handleGetValue_unknown_backend(t *testing.T) { } func TestDHT_handleGetValue_supports_providers(t *testing.T) { - t.Skip("TODO") + ctx := context.Background() + d := newTestDHT(t) + + p := newAddrInfo(t) + key := []byte("random-key") + + fillRoutingTable(t, d) + + // add to addresses peerstore + d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) + + be, ok := d.backends[namespaceProviders].(*ProvidersBackend) + require.True(t, ok) + + // write to datastore + dsKey := newDatastoreKey(namespaceProviders, string(key), string(p.ID)) + rec := expiryRecord{expiry: time.Now()} + err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte("/providers/random-key"), + } + + res, err := d.handleGetValue(context.Background(), newPeerID(t), req) + assert.NoError(t, err) + + assert.Equal(t, pb.Message_GET_VALUE, res.Type) + assert.Equal(t, req.Key, res.Key) + assert.Nil(t, res.Record) + assert.Len(t, res.CloserPeers, 20) + require.Len(t, res.ProviderPeers, 1) + for _, p := range res.ProviderPeers { + assert.Len(t, p.Addresses(), 1) + } + + cacheKey := newDatastoreKey(be.namespace, string(key)) + set, found := be.cache.Get(cacheKey.String()) + require.True(t, found) + assert.Len(t, set.providers, 1) +} + +func newAddrInfo(t testing.TB) peer.AddrInfo { + return peer.AddrInfo{ + ID: newPeerID(t), + Addrs: []ma.Multiaddr{ + ma.StringCast("/ip4/100.100.100.100/tcp/2000"), + }, + } +} + +func newAddProviderRequest(key []byte, addrInfos ...peer.AddrInfo) *pb.Message { + providerPeers := make([]pb.Message_Peer, len(addrInfos)) + for i, addrInfo := range addrInfos { + providerPeers[i] = pb.FromAddrInfo(addrInfo) + } + + return &pb.Message{ + Type: pb.Message_ADD_PROVIDER, + Key: key, + ProviderPeers: providerPeers, + } +} + +func BenchmarkDHT_handleAddProvider_unique_peers(b *testing.B) { + d := newTestDHT(b) + + // build requests + peers := make([]peer.ID, b.N) + reqs := make([]*pb.Message, b.N) + for i := 0; i < b.N; i++ { + addrInfo := newAddrInfo(b) + req := newAddProviderRequest([]byte(fmt.Sprintf("key-%d", i)), addrInfo) + peers[i] = addrInfo.ID + reqs[i] = req + } + + ctx := context.Background() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := d.handleAddProvider(ctx, peers[i], reqs[i]) + if err != nil { + b.Error(err) + } + } +} + +func TestDHT_handleAddProvider_happy_path(t *testing.T) { + ctx := context.Background() + d := newTestDHT(t) + + // construct request + addrInfo := newAddrInfo(t) + key := []byte("random-key") + req := newAddProviderRequest(key, addrInfo) + + // do the request + _, err := d.handleAddProvider(ctx, addrInfo.ID, req) + require.NoError(t, err) + + addrs := d.host.Peerstore().Addrs(addrInfo.ID) + require.Len(t, addrs, 1) + assert.Equal(t, addrs[0], addrInfo.Addrs[0]) + + // check if the record was store in the datastore + be, ok := d.backends[namespaceProviders].(*ProvidersBackend) + require.True(t, ok) + + dsKey := newDatastoreKey(be.namespace, string(key), string(addrInfo.ID)) + + val, err := be.datastore.Get(ctx, dsKey) + assert.NoError(t, err) + + rec := expiryRecord{} + err = rec.UnmarshalBinary(val) + require.NoError(t, err) + assert.False(t, rec.expiry.IsZero()) + + cacheKey := newDatastoreKey(be.namespace, string(key)).String() + _, found := be.cache.Get(cacheKey) + assert.False(t, found) // only cache on Fetch, not on write +} + +func TestDHT_handleAddProvider_key_size_check(t *testing.T) { + d := newTestDHT(t) + + // construct request + addrInfo := newAddrInfo(t) + req := newAddProviderRequest(make([]byte, 81), addrInfo) + + _, err := d.handleAddProvider(context.Background(), addrInfo.ID, req) + assert.Error(t, err) + + // same exercise with valid key length + req = newAddProviderRequest(make([]byte, 80), addrInfo) + + _, err = d.handleAddProvider(context.Background(), addrInfo.ID, req) + assert.NoError(t, err) +} + +func TestDHT_handleAddProvider_unsupported_record_type(t *testing.T) { + d := newTestDHT(t) + + addrInfo := newAddrInfo(t) + req := newAddProviderRequest([]byte("random-key"), addrInfo) + + // remove backend + delete(d.backends, namespaceProviders) + + _, err := d.handleAddProvider(context.Background(), addrInfo.ID, req) + assert.Error(t, err) + assert.ErrorContains(t, err, "unsupported record type") +} + +func TestDHT_handleAddProvider_record_for_other_peer(t *testing.T) { + ctx := context.Background() + d := newTestDHT(t) + + // construct request + addrInfo := newAddrInfo(t) + req := newAddProviderRequest([]byte("random-key"), addrInfo) + + // do the request + _, err := d.handleAddProvider(ctx, newPeerID(t), req) // other peer + assert.Error(t, err) + assert.ErrorContains(t, err, "attempted to store provider record for other peer") +} + +func TestDHT_handleAddProvider_record_with_empty_addresses(t *testing.T) { + ctx := context.Background() + d := newTestDHT(t) + + // construct request + addrInfo := newAddrInfo(t) + addrInfo.Addrs = make([]ma.Multiaddr, 0) // overwrite + + req := newAddProviderRequest([]byte("random-key"), addrInfo) + _, err := d.handleAddProvider(ctx, addrInfo.ID, req) + assert.Error(t, err) + assert.ErrorContains(t, err, "no addresses for provider") +} + +func TestDHT_handleAddProvider_empty_provider_peers(t *testing.T) { + ctx := context.Background() + d := newTestDHT(t) + + // construct request + req := newAddProviderRequest([]byte("random-key")) + + req.ProviderPeers = make([]pb.Message_Peer, 0) // overwrite + + // do the request + _, err := d.handleAddProvider(ctx, newPeerID(t), req) + assert.Error(t, err) + assert.ErrorContains(t, err, "no provider peers given") +} + +func BenchmarkDHT_handleGetProviders(b *testing.B) { + ctx := context.Background() + d := newTestDHT(b) + + fillRoutingTable(b, d) + + be, ok := d.backends[namespaceIPNS].(*RecordBackend) + require.True(b, ok) + + // fill datastore and build requests + keys := make([][]byte, b.N) + reqs := make([]*pb.Message, b.N) + peers := make([]peer.ID, b.N) + for i := 0; i < b.N; i++ { + + p := newAddrInfo(b) + k := fmt.Sprintf("key-%d", i) + keys[i] = []byte(k) + + // add to addresses peerstore + d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) + + // write to datastore + dsKey := newDatastoreKey(namespaceProviders, k, string(p.ID)) + rec := expiryRecord{expiry: time.Now()} + err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(b, err) + + peers[i] = p.ID + reqs[i] = &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: keys[i], + } + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := d.handleGetProviders(ctx, peers[b.N-i-1], reqs[i]) + if err != nil { + b.Error(err) + } + } +} + +func TestDHT_handleGetProviders_happy_path(t *testing.T) { + ctx := context.Background() + d := newTestDHT(t) + + fillRoutingTable(t, d) + + key := []byte("random-key") + + be, ok := d.backends[namespaceProviders].(*ProvidersBackend) + require.True(t, ok) + + providers := []peer.AddrInfo{ + newAddrInfo(t), + newAddrInfo(t), + newAddrInfo(t), + } + + for _, p := range providers { + // add to addresses peerstore + d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) + + // write to datastore + dsKey := newDatastoreKey(namespaceProviders, string(key), string(p.ID)) + rec := expiryRecord{expiry: time.Now()} + err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(t, err) + } + + req := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: key, + } + + res, err := d.handleGetProviders(ctx, newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_PROVIDERS, res.Type) + assert.Equal(t, req.Key, res.Key) + assert.Nil(t, res.Record) + assert.Len(t, res.CloserPeers, 20) + require.Len(t, res.ProviderPeers, 3) + for _, p := range res.ProviderPeers { + assert.Len(t, p.Addresses(), 1) + } + + cacheKey := newDatastoreKey(be.namespace, string(key)) + set, found := be.cache.Get(cacheKey.String()) + require.True(t, found) + assert.Len(t, set.providers, 3) +} + +func TestDHT_handleGetProviders_do_not_return_expired_records(t *testing.T) { + ctx := context.Background() + d := newTestDHT(t) + + fillRoutingTable(t, d) + + key := []byte("random-key") + + // check if the record was store in the datastore + be, ok := d.backends[namespaceProviders].(*ProvidersBackend) + require.True(t, ok) + + provider1 := newAddrInfo(t) + provider2 := newAddrInfo(t) + + d.host.Peerstore().AddAddrs(provider1.ID, provider1.Addrs, time.Hour) + d.host.Peerstore().AddAddrs(provider2.ID, provider2.Addrs, time.Hour) + + // write valid record + dsKey := newDatastoreKey(namespaceProviders, string(key), string(provider1.ID)) + rec := expiryRecord{expiry: time.Now()} + err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(t, err) + + // write expired record + dsKey = newDatastoreKey(namespaceProviders, string(key), string(provider2.ID)) + rec = expiryRecord{expiry: time.Now().Add(-be.cfg.ProvideValidity - time.Second)} + err = be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: key, + } + + res, err := d.handleGetProviders(ctx, newPeerID(t), req) + require.NoError(t, err) + + assert.Equal(t, pb.Message_GET_PROVIDERS, res.Type) + assert.Equal(t, req.Key, res.Key) + assert.Nil(t, res.Record) + assert.Len(t, res.CloserPeers, 20) + require.Len(t, res.ProviderPeers, 1) // only one provider + + // record was deleted + _, err = be.datastore.Get(ctx, dsKey) + assert.ErrorIs(t, err, ds.ErrNotFound) } From 7a0e26cab54533923eb48074764504ad2ffef9cb Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 23 Aug 2023 19:33:34 +0200 Subject: [PATCH 22/64] improve telemetry --- v2/backend_provider.go | 18 ++++++++++++++++++ v2/dht.go | 5 +++++ v2/metrics/metrics.go | 8 ++++++++ 3 files changed, 31 insertions(+) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index f4091dc9..3707ea04 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -5,11 +5,16 @@ import ( "encoding/binary" "fmt" "path" + "strconv" "strings" "sync" "sync/atomic" "time" + "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + lru "github.com/hashicorp/golang-lru/v2" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/autobatch" @@ -143,8 +148,10 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { qKey := newDatastoreKey(p.namespace, key) if cached, ok := p.cache.Get(qKey.String()); ok { + p.trackCacheQuery(ctx, true) return cached, nil } + p.trackCacheQuery(ctx, false) q, err := p.datastore.Query(ctx, dsq.Query{Prefix: qKey.String()}) if err != nil { @@ -277,6 +284,17 @@ func (p *ProvidersBackend) collectGarbage(ctx context.Context) { } } +// trackCacheQuery updates the prometheus metrics about cache hit/miss performance +func (p *ProvidersBackend) trackCacheQuery(ctx context.Context, hit bool) { + _ = stats.RecordWithTags(ctx, + []tag.Mutator{ + tag.Upsert(metrics.KeyCacheHit, strconv.FormatBool(hit)), + tag.Upsert(metrics.KeyRecordType, "provider"), + }, + metrics.LRUCache.M(1), + ) +} + // delete is a convenience method to delete the record at the given datastore // key. It doesn't return any error but logs it instead as a warning. func (p *ProvidersBackend) delete(ctx context.Context, dsKey ds.Key) { diff --git a/v2/dht.go b/v2/dht.go index c5cd901d..706b430e 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -4,6 +4,8 @@ import ( "fmt" "sync" + "github.com/ipfs/go-datastore/trace" + "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -85,6 +87,9 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("new default datastore: %w", err) } + // wrap datastore in open telemetry tracing + dstore = trace.New(dstore, tracer) + pbeCfg := DefaultProviderBackendConfig() pbeCfg.Logger = cfg.Logger diff --git a/v2/metrics/metrics.go b/v2/metrics/metrics.go index 510b9fa2..3cee215d 100644 --- a/v2/metrics/metrics.go +++ b/v2/metrics/metrics.go @@ -13,6 +13,8 @@ var ( // Keys var ( + KeyCacheHit, _ = tag.NewKey("hit") + KeyRecordType, _ = tag.NewKey("record_type") // currently only used for the provider backend LRU cache KeyMessageType, _ = tag.NewKey("message_type") KeyPeerID, _ = tag.NewKey("peer_id") // KeyInstanceID identifies a dht instance by the pointer address. @@ -32,6 +34,7 @@ var ( SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless) SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless) SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes) + LRUCache = stats.Int64("libp2p.io/dht/kad/lru_cache", "Cache hit or miss counter", stats.UnitDimensionless) NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless) ) @@ -87,6 +90,11 @@ var ( TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, Aggregation: defaultBytesDistribution, } + LRUCacheView = &view.View{ + Measure: LRUCache, + TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, + Aggregation: view.Count(), + } NetworkSizeView = &view.View{ Measure: NetworkSize, TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, From a8a222b3491e29302dbc124c3c387e9ecd439423 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 11:52:00 +0200 Subject: [PATCH 23/64] Add AddressFilter feature --- v2/backend.go | 2 +- v2/backend_provider.go | 34 +++++++++++------ v2/config.go | 27 ++++++++++++++ v2/config_test.go | 8 ++++ v2/dht.go | 1 + v2/handlers_test.go | 84 +++++++++++++++++++++++++++++++++++++++++- 6 files changed, 142 insertions(+), 14 deletions(-) diff --git a/v2/backend.go b/v2/backend.go index d831b0bb..48dab7a4 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -113,7 +113,7 @@ func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *Pro log: cfg.Logger, cache: cache, namespace: namespaceProviders, - peerstore: pstore, + addrBook: pstore, datastore: autobatch.NewAutoBatching(dstore, cfg.BatchSize), } diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 3707ea04..791ad890 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -43,9 +43,9 @@ type ProvidersBackend struct { // TODO: is that really so effective? The cache size is quite low either. cache *lru.Cache[string, providerSet] - // peerstore holds a reference to the peer store to store and fetch peer - // multiaddresses from (we don't save them in the datastore). - peerstore peerstore.Peerstore + // addrBook holds a reference to the peerstore's address book to store and + // fetch peer multiaddresses from (we don't save them in the datastore). + addrBook peerstore.AddrBook // datastore is where we save the peer IDs providing a certain multihash datastore *autobatch.Datastore @@ -68,10 +68,10 @@ type ProvidersBackendConfig struct { ProvideValidity time.Duration // AddressTTL specifies for how long we will keep around provider multi - // addresses in the peerstore. If such multiaddresses are present we send - // them alongside the peer ID to the requesting peer. This prevents the - // necessity for a second look for the multiaddresses on the requesting - // peers' side. + // addresses in the peerstore's address book. If such multiaddresses are + // present we send them alongside the peer ID to the requesting peer. This + // prevents the necessity for a second look for the multiaddresses on the + // requesting peers' side. AddressTTL time.Duration // BatchSize specifies how many provider record writes should be batched @@ -85,6 +85,12 @@ type ProvidersBackendConfig struct { // Logger is the logger to use Logger *slog.Logger + + // AddressFilter is a filter function that any addresses that we attempt to + // store or fetch from the peerstore's address book need to pass through. + // If you're manually configuring this backend, make sure to align the + // filter with the one configured in [Config.AddressFilter]. + AddressFilter AddressFilter } // DefaultProviderBackendConfig returns a default [ProvidersBackend] @@ -93,12 +99,13 @@ type ProvidersBackendConfig struct { // here is used. func DefaultProviderBackendConfig() *ProvidersBackendConfig { return &ProvidersBackendConfig{ - ProvideValidity: time.Hour * 48, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md + ProvideValidity: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md AddressTTL: 24 * time.Hour, BatchSize: 256, // MAGIC CacheSize: 256, // MAGIC GCInterval: time.Hour, // MAGIC Logger: slog.Default(), + AddressFilter: AddrFilterIdentity, } } @@ -121,7 +128,8 @@ func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (an provs.addProvider(addrInfo, rec.expiry) } - p.peerstore.AddAddrs(addrInfo.ID, addrInfo.Addrs, p.cfg.AddressTTL) + filtered := p.cfg.AddressFilter(addrInfo.Addrs) + p.addrBook.AddAddrs(addrInfo.ID, filtered, p.cfg.AddressTTL) _, found := p.gcSkip.LoadOrStore(dsKey.String(), struct{}{}) @@ -195,7 +203,11 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { continue } - addrInfo := p.peerstore.PeerInfo(peer.ID(binPeerID)) + maddrs := p.addrBook.Addrs(peer.ID(binPeerID)) + addrInfo := peer.AddrInfo{ + ID: peer.ID(binPeerID), + Addrs: p.cfg.AddressFilter(maddrs), + } out.addProvider(addrInfo, rec.expiry) } @@ -306,7 +318,7 @@ func (p *ProvidersBackend) delete(ctx context.Context, dsKey ds.Key) { // expiryRecord is captures the information that gets written to the datastore // for any provider record. This record doesn't include any peer IDs or // multiaddresses because peer IDs are part of the key that this record gets -// stored under and multiaddresses are stored in the peerstore. This record +// stored under and multiaddresses are stored in the addrBook. This record // just tracks the expiry time of the record. It implements binary marshalling // and unmarshalling methods for easy (de)serialization into the datastore. type expiryRecord struct { diff --git a/v2/config.go b/v2/config.go index 83951d9d..6d6d8a6c 100644 --- a/v2/config.go +++ b/v2/config.go @@ -8,6 +8,8 @@ import ( leveldb "github.com/ipfs/go-ds-leveldb" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" @@ -66,6 +68,8 @@ type ( ds.BatchingFeature ds.TxnFeature } + + AddressFilter func([]ma.Multiaddr) []ma.Multiaddr ) const ( @@ -146,6 +150,11 @@ type Config struct { // receiving before closing/resetting it. The timeout gets reset every time // we have successfully read a message from the stream. TimeoutStreamIdle time.Duration + + // AddressFilter is used to filter the addresses we put into the peer store and + // also fetch from the peer store and serve to other peers. It is mainly + // used to filter out private addresses. + AddressFilter AddressFilter } // DefaultConfig returns a configuration struct that can be used as-is to @@ -163,6 +172,7 @@ func DefaultConfig() *Config { Datastore: nil, Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), TimeoutStreamIdle: time.Minute, // MAGIC + AddressFilter: AddrFilterPrivate, } } @@ -234,5 +244,22 @@ func (c *Config) Validate() error { } } + if c.AddressFilter == nil { + return fmt.Errorf("address filter must not be nil - use AddrFilterIdentity to disable filtering") + } + return nil } + +// AddrFilterIdentity is an [AddressFilter] that does not apply any filtering +// and just returns that passed-in multi addresses without modification. +func AddrFilterIdentity(maddrs []ma.Multiaddr) []ma.Multiaddr { + return maddrs +} + +// AddrFilterPrivate filters out any multiaddresses that are private. It +// evaluates the [manet.IsPublicAddr] on each multiaddress, and if it returns +// true, the multiaddress will be in the result set. +func AddrFilterPrivate(maddrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(maddrs, manet.IsPublicAddr) +} diff --git a/v2/config_test.go b/v2/config_test.go index 14f46faf..670dcc84 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -98,6 +98,14 @@ func TestConfig_Validate(t *testing.T) { return c }, }, + { + name: "nil address filter", + wantErr: true, + mutate: func(c *Config) *Config { + c.AddressFilter = nil + return c + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/v2/dht.go b/v2/dht.go index 706b430e..150cbf98 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -92,6 +92,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { pbeCfg := DefaultProviderBackendConfig() pbeCfg.Logger = cfg.Logger + pbeCfg.AddressFilter = cfg.AddressFilter pbe, err := NewBackendProvider(h.Peerstore(), dstore, pbeCfg) if err != nil { diff --git a/v2/handlers_test.go b/v2/handlers_test.go index d8ee390b..368aa266 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -28,6 +28,10 @@ import ( var rng = rand.New(rand.NewSource(1337)) func newTestDHT(t testing.TB) *DHT { + return newTestDHTWithConfig(t, DefaultConfig()) +} + +func newTestDHTWithConfig(t testing.TB, cfg *Config) *DHT { t.Helper() h, err := libp2p.New(libp2p.NoListenAddrs) @@ -35,7 +39,7 @@ func newTestDHT(t testing.TB) *DHT { t.Fatalf("new libp2p host: %s", err) } - d, err := New(h, DefaultConfig()) + d, err := New(h, cfg) if err != nil { t.Fatalf("new dht: %s", err) } @@ -1042,7 +1046,7 @@ func newAddrInfo(t testing.TB) peer.AddrInfo { return peer.AddrInfo{ ID: newPeerID(t), Addrs: []ma.Multiaddr{ - ma.StringCast("/ip4/100.100.100.100/tcp/2000"), + ma.StringCast("/ip4/99.99.99.99/tcp/2000"), // must be a public address }, } } @@ -1195,6 +1199,35 @@ func TestDHT_handleAddProvider_empty_provider_peers(t *testing.T) { assert.ErrorContains(t, err, "no provider peers given") } +func TestDHT_handleAddProvider_only_store_filtered_addresses(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfig() + + testMaddr := ma.StringCast("/dns/maddr.dummy") + + // define a filter that returns a completely different address and discards + // every other + cfg.AddressFilter = func(maddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{testMaddr} + } + + d := newTestDHTWithConfig(t, cfg) + + addrInfo := newAddrInfo(t) + require.True(t, len(addrInfo.Addrs) > 0, "need addr info with at least one address") + + // construct request + req := newAddProviderRequest([]byte("random-key"), addrInfo) + + // do the request + _, err := d.handleAddProvider(ctx, addrInfo.ID, req) + assert.NoError(t, err) + + maddrs := d.host.Peerstore().Addrs(addrInfo.ID) + require.Len(t, maddrs, 1) + assert.True(t, maddrs[0].Equal(testMaddr), "address filter wasn't applied") +} + func BenchmarkDHT_handleGetProviders(b *testing.B) { ctx := context.Background() d := newTestDHT(b) @@ -1340,3 +1373,50 @@ func TestDHT_handleGetProviders_do_not_return_expired_records(t *testing.T) { _, err = be.datastore.Get(ctx, dsKey) assert.ErrorIs(t, err, ds.ErrNotFound) } + +func TestDHT_handleGetProviders_only_serve_filtered_addresses(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfig() + + testMaddr := ma.StringCast("/dns/maddr.dummy") + + // define a filter that returns a completely different address and discards + // every other + cfg.AddressFilter = func(maddrs []ma.Multiaddr) []ma.Multiaddr { + return []ma.Multiaddr{testMaddr} + } + + d := newTestDHTWithConfig(t, cfg) + + fillRoutingTable(t, d) + + key := []byte("random-key") + + be, ok := d.backends[namespaceProviders].(*ProvidersBackend) + require.True(t, ok) + + p := newAddrInfo(t) + require.True(t, len(p.Addrs) > 0, "need addr info with at least one address") + + // add to addresses peerstore + d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) + + // write to datastore + dsKey := newDatastoreKey(namespaceProviders, string(key), string(p.ID)) + rec := expiryRecord{expiry: time.Now()} + err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(t, err) + + req := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: key, + } + + res, err := d.handleGetProviders(ctx, newPeerID(t), req) + require.NoError(t, err) + + require.Len(t, res.ProviderPeers, 1) + maddrs := res.ProviderPeers[0].Addresses() + require.Len(t, maddrs, 1) + assert.True(t, maddrs[0].Equal(testMaddr)) +} From 6bdd7ec5d4f02da922444e536a65c0f7f667f4e3 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 13:30:55 +0200 Subject: [PATCH 24/64] WIP --- v2/backend_provider.go | 44 +++++++++++++++++++++++++++++++------ v2/backend_provider_test.go | 43 ++++++++++++++++++++++++++++++++++++ v2/handlers_test.go | 14 +++++++----- 3 files changed, 88 insertions(+), 13 deletions(-) create mode 100644 v2/backend_provider_test.go diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 791ad890..eb60c10a 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -11,6 +11,8 @@ import ( "sync/atomic" "time" + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" "go.opencensus.io/stats" "go.opencensus.io/tag" @@ -54,8 +56,10 @@ type ProvidersBackend struct { // collection process. TODO: this is a sub-optimal pattern. gcSkip sync.Map - // gcActive indicates whether garbage collection is scheduled + // gcActive indicates whether the garbage collection loop is running gcActive atomic.Bool + gcCancel context.CancelFunc + gcDone chan struct{} } var _ Backend = (*ProvidersBackend)(nil) @@ -64,6 +68,9 @@ var _ Backend = (*ProvidersBackend)(nil) // [DefaultProviderBackendConfig] to get a default configuration struct and then // modify it to your liking. type ProvidersBackendConfig struct { + // clk is an unexported field that's used for testing time related methods + clk clock.Clock + // ProvideValidity specifies for how long provider records are valid ProvideValidity time.Duration @@ -99,6 +106,7 @@ type ProvidersBackendConfig struct { // here is used. func DefaultProviderBackendConfig() *ProvidersBackendConfig { return &ProvidersBackendConfig{ + clk: clock.New(), ProvideValidity: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md AddressTTL: 24 * time.Hour, BatchSize: 256, // MAGIC @@ -119,7 +127,7 @@ func (p *ProvidersBackend) Store(ctx context.Context, key string, value any) (an } rec := expiryRecord{ - expiry: time.Now(), + expiry: p.cfg.clk.Now(), } cacheKey := newDatastoreKey(p.namespace, key).String() @@ -172,7 +180,7 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { } }() - now := time.Now() + now := p.cfg.clk.Now() out := &providerSet{ providers: []peer.AddrInfo{}, set: make(map[peer.ID]time.Time), @@ -221,22 +229,26 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { // StartGarbageCollection starts the garbage collection loop. The garbage // collection interval can be configured with [ProvidersBackendConfig.GCInterval]. -func (p *ProvidersBackend) StartGarbageCollection(ctx context.Context) { +// The garbage collection loop can only be started a single time. Use +// [StopGarbageCollection] to stop the garbage collection loop. +func (p *ProvidersBackend) StartGarbageCollection() { if p.gcActive.Swap(true) { p.log.Info("Provider backend's garbage collection is already running") return } + ctx, cancel := context.WithCancel(context.Background()) + p.gcCancel = cancel + defer close(p.gcDone) + p.log.Info("Provider backend's started garbage collection schedule") - ticker := time.NewTicker(p.cfg.GCInterval) + ticker := p.cfg.clk.Ticker(p.cfg.GCInterval) defer ticker.Stop() for { select { case <-ctx.Done(): - p.gcActive.Store(false) - p.log.Info("Provider backend's garbage collection stopped") return case <-ticker.C: p.collectGarbage(ctx) @@ -244,6 +256,24 @@ func (p *ProvidersBackend) StartGarbageCollection(ctx context.Context) { } } +// StopGarbageCollection stops the garbage collection loop started with +// [StartGarbageCollection]. If garbage collection is not running, this method +// is a no-op. +func (p *ProvidersBackend) StopGarbageCollection() { + if !p.gcActive.Load() { + return + } + + p.gcCancel() + <-p.gcDone + + p.gcCancel = nil + p.gcDone = make(chan struct{}) + + p.gcActive.Store(false) + p.log.Info("Provider backend's garbage collection stopped") +} + // collectGarbage sweeps through the datastore and deletes all provider records // that have expired. A record is expired if the // [ProvidersBackendConfig].ProvideValidity is exceeded. diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go new file mode 100644 index 00000000..5bb7bcd4 --- /dev/null +++ b/v2/backend_provider_test.go @@ -0,0 +1,43 @@ +package dht + +import ( + "testing" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p" +) + +func TestProvidersBackend_GarbageCollectionLifecycle(t *testing.T) { + h, err := libp2p.New(libp2p.NoListenAddrs) + require.NoError(t, err) + + dstore, err := InMemoryDatastore() + require.NoError(t, err) + + t.Cleanup(func() { + if err = dstore.Close(); err != nil { + t.Logf("closing datastore: %s", err) + } + + if err = h.Close(); err != nil { + t.Logf("closing host: %s", err) + } + }) + + mockClock := clock.NewMock() + + cfg := DefaultProviderBackendConfig() + cfg.clk = mockClock + + b, err := NewBackendProvider(h.Peerstore(), dstore, nil) + require.NoError(t, err) + + b.StartGarbageCollection() + b.StopGarbageCollection() + b.StartGarbageCollection() + b.StopGarbageCollection() + b.StartGarbageCollection() + b.StopGarbageCollection() +} diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 368aa266..36b463f6 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -35,14 +35,10 @@ func newTestDHTWithConfig(t testing.TB, cfg *Config) *DHT { t.Helper() h, err := libp2p.New(libp2p.NoListenAddrs) - if err != nil { - t.Fatalf("new libp2p host: %s", err) - } + require.NoError(t, err) d, err := New(h, cfg) - if err != nil { - t.Fatalf("new dht: %s", err) - } + require.NoError(t, err) t.Cleanup(func() { if err = d.Close(); err != nil { @@ -63,6 +59,8 @@ func newPeerID(t testing.TB) peer.ID { } func newIdentity(t testing.TB) (peer.ID, crypto.PrivKey) { + t.Helper() + priv, pub, err := crypto.GenerateEd25519Key(rng) require.NoError(t, err) @@ -73,6 +71,8 @@ func newIdentity(t testing.TB) (peer.ID, crypto.PrivKey) { } func fillRoutingTable(t testing.TB, d *DHT) { + t.Helper() + // 250 is a common number of peers to have in the routing table for i := 0; i < 250; i++ { // generate peer ID @@ -426,6 +426,8 @@ func BenchmarkDHT_handlePing(b *testing.B) { } func newPutIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { + t.Helper() + testPath := path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl) From 14c478cf8d62c662693e3037c1b8256c38d00f14 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 14:59:23 +0200 Subject: [PATCH 25/64] test backend provider garbage collection --- v2/backend_provider.go | 50 ++++++++++------- v2/backend_provider_test.go | 106 +++++++++++++++++++++++++++++++++--- 2 files changed, 127 insertions(+), 29 deletions(-) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index eb60c10a..25042f6a 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -8,7 +8,6 @@ import ( "strconv" "strings" "sync" - "sync/atomic" "time" "github.com/benbjohnson/clock" @@ -57,9 +56,9 @@ type ProvidersBackend struct { gcSkip sync.Map // gcActive indicates whether the garbage collection loop is running - gcActive atomic.Bool - gcCancel context.CancelFunc - gcDone chan struct{} + gcCancelMu sync.RWMutex + gcCancel context.CancelFunc + gcDone chan struct{} } var _ Backend = (*ProvidersBackend)(nil) @@ -232,45 +231,53 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { // The garbage collection loop can only be started a single time. Use // [StopGarbageCollection] to stop the garbage collection loop. func (p *ProvidersBackend) StartGarbageCollection() { - if p.gcActive.Swap(true) { + p.gcCancelMu.Lock() + if p.gcCancel != nil { p.log.Info("Provider backend's garbage collection is already running") + p.gcCancelMu.Unlock() return } + defer p.gcCancelMu.Unlock() ctx, cancel := context.WithCancel(context.Background()) p.gcCancel = cancel - defer close(p.gcDone) + p.gcDone = make(chan struct{}) p.log.Info("Provider backend's started garbage collection schedule") - ticker := p.cfg.clk.Ticker(p.cfg.GCInterval) - defer ticker.Stop() + go func() { + defer close(p.gcDone) + + ticker := p.cfg.clk.Ticker(p.cfg.GCInterval) + defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - p.collectGarbage(ctx) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.collectGarbage(ctx) + } } - } + }() } // StopGarbageCollection stops the garbage collection loop started with // [StartGarbageCollection]. If garbage collection is not running, this method // is a no-op. func (p *ProvidersBackend) StopGarbageCollection() { - if !p.gcActive.Load() { + p.gcCancelMu.Lock() + if p.gcCancel == nil { + p.log.Info("Provider backend's garbage collection isn't running") + p.gcCancelMu.Unlock() return } + defer p.gcCancelMu.Unlock() p.gcCancel() <-p.gcDone - + p.gcDone = nil p.gcCancel = nil - p.gcDone = make(chan struct{}) - - p.gcActive.Store(false) p.log.Info("Provider backend's garbage collection stopped") } @@ -314,10 +321,11 @@ func (p *ProvidersBackend) collectGarbage(ctx context.Context) { } rec := expiryRecord{} + now := p.cfg.clk.Now() if err = rec.UnmarshalBinary(e.Value); err != nil { p.log.LogAttrs(ctx, slog.LevelWarn, "Garbage collection provider record unmarshalling failed", slog.String("key", e.Key), slog.String("err", err.Error())) p.delete(ctx, ds.RawKey(e.Key)) - } else if time.Now().Sub(rec.expiry) <= p.cfg.ProvideValidity { + } else if now.Sub(rec.expiry) <= p.cfg.ProvideValidity { continue } diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index 5bb7bcd4..5a29c1b1 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -1,15 +1,24 @@ package dht import ( + "context" + "io" + "sync" "testing" + "time" "github.com/benbjohnson/clock" - "github.com/stretchr/testify/require" - + ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slog" ) -func TestProvidersBackend_GarbageCollectionLifecycle(t *testing.T) { +var devnull = slog.New(slog.NewTextHandler(io.Discard, nil)) + +func TestProvidersBackend_GarbageCollection(t *testing.T) { + // construct host, datastore, mock clock and provider backend h, err := libp2p.New(libp2p.NoListenAddrs) require.NoError(t, err) @@ -26,18 +35,99 @@ func TestProvidersBackend_GarbageCollectionLifecycle(t *testing.T) { } }) + // configure mock clock mockClock := clock.NewMock() - cfg := DefaultProviderBackendConfig() cfg.clk = mockClock + cfg.Logger = devnull - b, err := NewBackendProvider(h.Peerstore(), dstore, nil) + // init backend + b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) require.NoError(t, err) + // start the garbage collection process b.StartGarbageCollection() + + // write random record to datastore and peerstore + ctx := context.Background() + p := newAddrInfo(t) + + // write to datastore + dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID)) + rec := expiryRecord{expiry: mockClock.Now()} + err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + require.NoError(t, err) + + // write to peerstore + h.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) + + // advance clock half the gc time and check if record is still there + mockClock.Add(cfg.ProvideValidity / 2) + + // sync autobatching datastore to have all put/deletes visible + err = b.datastore.Sync(ctx, ds.NewKey(namespaceProviders)) + require.NoError(t, err) + + // we expect the record to still be there after half the ProvideValidity + _, err = b.datastore.Get(ctx, dsKey) + require.NoError(t, err) + + // advance clock another gc time and check if record was GC'd now + mockClock.Add(cfg.ProvideValidity + cfg.GCInterval) + + // sync autobatching datastore to have all put/deletes visible + err = b.datastore.Sync(ctx, ds.NewKey(namespaceProviders)) + require.NoError(t, err) + + // we expect the record to be GC'd now + _, err = b.datastore.Get(ctx, dsKey) + require.ErrorIs(t, err, ds.ErrNotFound) + b.StopGarbageCollection() - b.StartGarbageCollection() - b.StopGarbageCollection() - b.StartGarbageCollection() +} + +func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) { + h, err := libp2p.New(libp2p.NoListenAddrs) + require.NoError(t, err) + + dstore, err := InMemoryDatastore() + require.NoError(t, err) + + t.Cleanup(func() { + if err = dstore.Close(); err != nil { + t.Logf("closing datastore: %s", err) + } + + if err = h.Close(); err != nil { + t.Logf("closing host: %s", err) + } + }) + + cfg := DefaultProviderBackendConfig() + cfg.Logger = devnull + + b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + for i := 0; i < 100; i++ { + b.StartGarbageCollection() + } + wg.Done() + }() + wg.Add(1) + go func() { + for i := 0; i < 100; i++ { + b.StopGarbageCollection() + } + wg.Done() + }() + wg.Wait() + b.StopGarbageCollection() + + assert.Nil(t, b.gcCancel) + assert.Nil(t, b.gcDone) } From 2643b724902713a9e087a63403488016be3d305b Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 15:02:01 +0200 Subject: [PATCH 26/64] refactor provider backend tests --- v2/backend_provider_test.go | 38 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index 5a29c1b1..60d67c32 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -17,8 +17,7 @@ import ( var devnull = slog.New(slog.NewTextHandler(io.Discard, nil)) -func TestProvidersBackend_GarbageCollection(t *testing.T) { - // construct host, datastore, mock clock and provider backend +func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBackend { h, err := libp2p.New(libp2p.NoListenAddrs) require.NoError(t, err) @@ -35,15 +34,19 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { } }) - // configure mock clock + b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) + require.NoError(t, err) + + return b +} + +func TestProvidersBackend_GarbageCollection(t *testing.T) { mockClock := clock.NewMock() cfg := DefaultProviderBackendConfig() cfg.clk = mockClock cfg.Logger = devnull - // init backend - b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) - require.NoError(t, err) + b := newBackendProvider(t, cfg) // start the garbage collection process b.StartGarbageCollection() @@ -55,11 +58,11 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { // write to datastore dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID)) rec := expiryRecord{expiry: mockClock.Now()} - err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + err := b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) // write to peerstore - h.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) + b.addrBook.AddAddrs(p.ID, p.Addrs, time.Hour) // advance clock half the gc time and check if record is still there mockClock.Add(cfg.ProvideValidity / 2) @@ -87,27 +90,10 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { } func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) { - h, err := libp2p.New(libp2p.NoListenAddrs) - require.NoError(t, err) - - dstore, err := InMemoryDatastore() - require.NoError(t, err) - - t.Cleanup(func() { - if err = dstore.Close(); err != nil { - t.Logf("closing datastore: %s", err) - } - - if err = h.Close(); err != nil { - t.Logf("closing host: %s", err) - } - }) - cfg := DefaultProviderBackendConfig() cfg.Logger = devnull - b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) - require.NoError(t, err) + b := newBackendProvider(t, cfg) var wg sync.WaitGroup wg.Add(1) From 890d4ec1930e53ec38cbe37103c31d5c029bcdcb Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 16:48:32 +0200 Subject: [PATCH 27/64] add stream tests --- v2/stream.go | 12 +-- v2/stream_test.go | 193 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 197 insertions(+), 8 deletions(-) create mode 100644 v2/stream_test.go diff --git a/v2/stream.go b/v2/stream.go index ef100e58..4cd391a9 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -82,9 +82,10 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // 1. read message from stream data, err := d.streamReadMsg(ctx, slogger, reader) if err != nil { + if errors.Is(err, io.EOF) { + return nil + } return err - } else if data == nil { - return nil // nil error, nil data -> graceful end } // we have received a message, start the timer to @@ -157,13 +158,8 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R data, err := r.ReadMsg() if err != nil { - // if the reader returns an end-of-file signal, exit gracefully - if errors.Is(err, io.EOF) { - return nil, nil - } - // log any other errors than stream resets - if err.Error() != "stream reset" { + if !errors.Is(err, network.ErrReset) { slogger.LogAttrs(ctx, slog.LevelDebug, "error reading message", slog.String("err", err.Error())) } diff --git a/v2/stream_test.go b/v2/stream_test.go new file mode 100644 index 00000000..f2d9a362 --- /dev/null +++ b/v2/stream_test.go @@ -0,0 +1,193 @@ +package dht + +import ( + "context" + "runtime" + "testing" + + "github.com/libp2p/go-libp2p" + pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-msgio" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testReadWriter struct { + w msgio.WriteCloser + r msgio.ReadCloser +} + +func newTestReadWriter(s network.Stream) *testReadWriter { + return &testReadWriter{ + w: msgio.NewVarintWriter(s), + r: msgio.NewVarintReader(s), + } +} + +func (trw testReadWriter) ReadMsg() (*pb.Message, error) { + msg, err := trw.r.ReadMsg() + if err != nil { + return nil, err + } + + resp := &pb.Message{} + err = resp.Unmarshal(msg) + return resp, err +} + +func (trw testReadWriter) WriteMsg(msg *pb.Message) error { + data, err := msg.Marshal() + if err != nil { + return err + } + + return trw.w.WriteMsg(data) +} + +func newPeerPair(t testing.TB) (host.Host, *DHT) { + client, err := libp2p.New( + libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), + ) + require.NoError(t, err) + + server, err := libp2p.New( + libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), + ) + require.NoError(t, err) + + cfg := DefaultConfig() + cfg.Mode = ModeOptServer + serverDHT, err := New(server, cfg) + + fillRoutingTable(t, serverDHT) + + t.Cleanup(func() { + if err = serverDHT.Close(); err != nil { + t.Logf("failed closing DHT: %s", err) + } + + if err = client.Close(); err != nil { + t.Logf("failed closing client host: %s", err) + } + + if err = server.Close(); err != nil { + t.Logf("failed closing client host: %s", err) + } + }) + + ctx := context.Background() + err = client.Connect(ctx, peer.AddrInfo{ + ID: server.ID(), + Addrs: server.Addrs(), + }) + require.NoError(t, err) + + return client, serverDHT +} + +func TestDHT_handleStream_find_node(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + trw := newTestReadWriter(s) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + err = trw.WriteMsg(req) + require.NoError(t, err) + + resp, err := trw.ReadMsg() + require.NoError(t, err) + + assert.Equal(t, pb.Message_FIND_NODE, resp.Type) + assert.Equal(t, req.Key, resp.Key) + assert.Len(t, resp.CloserPeers, 20) + assert.Len(t, resp.ProviderPeers, 0) + + assert.NoError(t, s.Close()) +} + +func TestDHT_handleStream_unknown_message_type(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + trw := newTestReadWriter(s) + + req := &pb.Message{ + Type: pb.Message_MessageType(99), + Key: []byte("random-key"), + } + + err = trw.WriteMsg(req) + require.NoError(t, err) + _, err = trw.ReadMsg() + assert.ErrorIs(t, err, network.ErrReset) +} + +func TestDHT_handleStream_reset_stream_without_message(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + err = s.Reset() + require.NoError(t, err) +} + +func TestDHT_handleStream_garbage_data(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + trw := newTestReadWriter(s) + + _, err = trw.w.Write([]byte("garbage-data")) + require.NoError(t, err) + + _, err = trw.ReadMsg() + assert.ErrorIs(t, err, network.ErrReset) +} + +func TestDHT_handleStream_write_nil_data(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + trw := newTestReadWriter(s) + + _, err = trw.w.Write(nil) + require.NoError(t, err) + + _, err = trw.ReadMsg() + assert.ErrorIs(t, err, network.ErrReset) +} + +func TestDHT_handleStream_graceful_close(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + err = s.Close() + require.NoError(t, err) + + runtime.Gosched() +} From bdac2914699dbc54476dd4efd02b9b51ad01bcd4 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 16:56:13 +0200 Subject: [PATCH 28/64] add logErr helper method --- v2/dht.go | 11 +++++++++++ v2/stream.go | 6 +++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/v2/dht.go b/v2/dht.go index 150cbf98..a276340f 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -218,3 +218,14 @@ func (d *DHT) setClientMode() { } } } + +// logErr is a helper method that uses the slogger of the DHT and writes a +// warning log line with the given message alongside the error. If the error +// is nil, this method is a no-op. +func (d *DHT) logErr(err error, msg string) { + if err == nil { + return + } + + d.log.Warn(msg, "err", err.Error()) +} diff --git a/v2/stream.go b/v2/stream.go index 4cd391a9..b5fb1341 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -34,16 +34,16 @@ func (d *DHT) streamHandler(s network.Stream) { if err := s.Scope().SetService(ServiceName); err != nil { d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("err", err.Error())) - _ = s.Reset() + d.logErr(s.Reset(), "failed to reset stream") return } if err := d.handleNewStream(ctx, s); err != nil { // If we exited with an error, let the remote peer know. - _ = s.Reset() + d.logErr(s.Reset(), "failed to reset stream") } else { // If we exited without an error, close gracefully. - _ = s.Close() + d.logErr(s.Close(), "failed to close stream") } } From 50f8d00f96a7a3be2d31fa8c1ac34148e1f4a144 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 18:05:35 +0200 Subject: [PATCH 29/64] babysteps towards routing.Routing and kademlia.Router --- v2/backend_provider.go | 8 +-- v2/dht.go | 7 +-- v2/go.mod | 34 +++++++--- v2/go.sum | 129 ++++++++++++++++++++++++++++++++++++-- v2/handlers.go | 4 +- v2/kad.go | 10 ++- v2/router.go | 41 ++++++++++++ v2/routing.go | 138 ++++++++++++++++++++++++++++++++++++++--- v2/routing_test.go | 1 + 9 files changed, 339 insertions(+), 33 deletions(-) create mode 100644 v2/router.go create mode 100644 v2/routing_test.go diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 25042f6a..61d6aa6c 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -11,18 +11,16 @@ import ( "time" "github.com/benbjohnson/clock" - - "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" - "go.opencensus.io/stats" - "go.opencensus.io/tag" - lru "github.com/hashicorp/golang-lru/v2" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/autobatch" dsq "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-base32" + "go.opencensus.io/stats" + "go.opencensus.io/tag" "golang.org/x/exp/slog" ) diff --git a/v2/dht.go b/v2/dht.go index a276340f..a5088fbf 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -4,13 +4,12 @@ import ( "fmt" "sync" + "github.com/iand/zikade/kademlia" "github.com/ipfs/go-datastore/trace" - "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" @@ -32,7 +31,7 @@ type DHT struct { mode mode // kad is a reference to the go-kademlia coordinator - kad *coord.Coordinator[key.Key256, ma.Multiaddr] + kad *kademlia.Dht[key.Key256, ma.Multiaddr] // rt holds a reference to the routing table implementation. This can be // configured via the Config struct. @@ -110,7 +109,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - d.kad, err = coord.NewCoordinator[key.Key256, ma.Multiaddr](nid, nil, nil, d.rt, cfg.Kademlia) + d.kad, err = kademlia.NewDht[key.Key256, ma.Multiaddr](nid, d, d.rt, nil) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } diff --git a/v2/go.mod b/v2/go.mod index e71ef11b..6107c289 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -3,21 +3,34 @@ module github.com/libp2p/go-libp2p-kad-dht/v2 go 1.20 require ( + github.com/benbjohnson/clock v1.3.5 + github.com/gogo/protobuf v1.3.2 + github.com/hashicorp/golang-lru/v2 v2.0.2 + github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb + github.com/ipfs/boxo v0.12.0 github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.27.7 + github.com/libp2p/go-libp2p v0.28.2 + github.com/libp2p/go-libp2p-record v0.2.0 + github.com/libp2p/go-msgio v0.3.0 + github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde + github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 + github.com/stretchr/testify v1.8.4 + go.opencensus.io v0.24.0 + go.opentelemetry.io/otel v1.16.0 go.uber.org/zap/exp v0.1.0 golang.org/x/exp v0.0.0-20230725012225-302865e7556b ) require ( - github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -28,15 +41,18 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/huin/goupnp v1.2.0 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect @@ -44,7 +60,6 @@ require ( github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect - github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.3.0 // indirect @@ -57,7 +72,6 @@ require ( github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect @@ -66,10 +80,13 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect + github.com/nxadm/tail v1.4.8 // indirect github.com/onsi/ginkgo/v2 v2.11.0 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect @@ -81,7 +98,7 @@ require ( github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect + github.com/syndtr/goleveldb v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect go.opentelemetry.io/otel/trace v1.16.0 // indirect go.uber.org/atomic v1.11.0 // indirect @@ -97,5 +114,8 @@ require ( golang.org/x/text v0.11.0 // indirect golang.org/x/tools v0.11.0 // indirect google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) + +replace github.com/ipfs/go-datastore v0.6.0 => github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a diff --git a/v2/go.sum b/v2/go.sum index 65ee9d28..c32c6ebd 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -18,10 +18,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -39,6 +41,8 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a h1:YnrW4Kcy7kTIJRfL3Xg7+fIMS17izs0WWH2GdYwYhNs= +github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -46,12 +50,19 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -62,6 +73,7 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -71,6 +83,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -78,12 +92,27 @@ github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -94,28 +123,53 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= +github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb h1:L0sxl/vHUf/wdEX6+QJGC0cQsnn2AglFL0qbJvv8+64= +github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb/go.mod h1:9BszmzAjw3qghO/oCaTvIhQUHb3h+F0EAHecClvcUnA= +github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= +github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -126,6 +180,7 @@ github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZY github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -138,10 +193,12 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.29.2 h1:uPw/c8hOxoLP/KhFnzlc5Ejqf+OmAL1dwIsqE31WBtY= -github.com/libp2p/go-libp2p v0.29.2/go.mod h1:OU7nSq0aEZMsV2wY8nXn1+XNNt9q2UiR8LjW3Kmp2UE= +github.com/libp2p/go-libp2p v0.28.2 h1:lO/g0ccVru6nUVHyLE7C1VRr7B2AFp9cvHhf+l+Te6w= +github.com/libp2p/go-libp2p v0.28.2/go.mod h1:fOLgCNgLiWFdmtXyQBwmuCpukaYOA+yw4rnBiScDNmI= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= @@ -208,8 +265,14 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -219,14 +282,17 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde h1:UiucUJDpMRYfuaLYfO2/euSyBjH/5okj2PaUwCq7DtE= -github.com/plprobelab/go-kademlia v0.0.0-20230813192759-e4050185fbde/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= +github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 h1:qqrJgUNOCAozZDkL0gH57FUi+aXj/d/SdldaLAZUFUU= +github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -247,6 +313,7 @@ github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2Ep github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -275,31 +342,49 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -309,9 +394,11 @@ go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= @@ -322,6 +409,7 @@ golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+ golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -335,8 +423,10 @@ golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N0 golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -356,6 +446,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -383,6 +474,7 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -411,6 +503,11 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -435,29 +532,53 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= diff --git a/v2/handlers.go b/v2/handlers.go index 16e08a7f..052b9639 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -105,7 +105,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag resp := &pb.Message{ Type: pb.Message_GET_VALUE, Key: req.GetKey(), - CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(req.GetKey())), + CloserPeers: d.closerPeers(ctx, remote, newSHA256Key(req.GetKey())), } ns, path, err := record.SplitKey(k) // get namespace (prefix of the key) @@ -220,7 +220,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me resp := &pb.Message{ Type: pb.Message_GET_PROVIDERS, Key: k, - CloserPeers: d.closerPeers(ctx, remote, key.NewSha256(k)), + CloserPeers: d.closerPeers(ctx, remote, newSHA256Key(k)), ProviderPeers: pbProviders, } diff --git a/v2/kad.go b/v2/kad.go index fd29ec48..77d15cb7 100644 --- a/v2/kad.go +++ b/v2/kad.go @@ -1,6 +1,8 @@ package dht import ( + "crypto/sha256" + "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" @@ -19,7 +21,7 @@ var _ kad.NodeID[key.Key256] = nodeID("") // hashes of, in this case, peer.IDs. This means this Key method takes // the peer.ID, hashes it and constructs a 256-bit key. func (p nodeID) Key() key.Key256 { - return key.NewSha256([]byte(p)) + return newSHA256Key([]byte(p)) } // String calls String on the underlying peer.ID and returns a string like @@ -49,3 +51,9 @@ func (ai nodeInfo) Addresses() []ma.Multiaddr { copy(addrs, ai.info.Addrs) return addrs } + +// newSHA256Key SHA256 hashes the given bytes and returns a new 256-bit key. +func newSHA256Key(data []byte) key.Key256 { + h := sha256.Sum256(data) + return key.NewKey256(h[:]) +} diff --git a/v2/router.go b/v2/router.go new file mode 100644 index 00000000..1879795d --- /dev/null +++ b/v2/router.go @@ -0,0 +1,41 @@ +package dht + +import ( + "context" + "fmt" + "time" + + "github.com/iand/zikade/kademlia" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" +) + +var _ kademlia.Router[key.Key256, ma.Multiaddr] = (*DHT)(nil) + +func (d *DHT) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], protoID address.ProtocolID, req kad.Request[key.Key256, ma.Multiaddr]) (kad.Response[key.Key256, ma.Multiaddr], error) { + s, err := d.host.NewStream(ctx, peer.ID(to.ID().(nodeID)), d.cfg.ProtocolID) + if err != nil { + return nil, fmt.Errorf("new stream: %w", err) + } + defer d.logErr(s.Close(), "failed to close stream") + + return nil, nil +} + +func (d *DHT) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { + // TODO implement me + panic("implement me") +} + +func (d *DHT) GetNodeInfo(ctx context.Context, id kad.NodeID[key.Key256]) (kad.NodeInfo[key.Key256, ma.Multiaddr], error) { + // TODO implement me + panic("implement me") +} + +func (d *DHT) GetClosestNodes(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], target key.Key256) ([]kad.NodeInfo[key.Key256, ma.Multiaddr], error) { + // TODO implement me + panic("implement me") +} diff --git a/v2/routing.go b/v2/routing.go index d277fa9c..6107a30d 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -2,42 +2,160 @@ package dht import ( "context" + "errors" + "fmt" + "time" "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + record "github.com/libp2p/go-libp2p-record" + recpb "github.com/libp2p/go-libp2p-record/pb" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/attribute" + otel "go.opentelemetry.io/otel/trace" ) -// Assert that IPFS assumptions about interfaces aren't broken. These aren't a -// guarantee, but we can use them to aid refactoring. -var ( - _ routing.Routing = (*DHT)(nil) -) +var _ routing.Routing = (*DHT)(nil) + +func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { + ctx, span := tracer.Start(ctx, "DHT.FindPeer") + defer span.End() + + // First check locally. If are or were recently connected to the peer, + // return the addresses from our peerstore unless the information doesn't + // contain any. + switch d.host.Network().Connectedness(id) { + case network.Connected, network.CanConnect: + addrInfo := d.host.Peerstore().PeerInfo(id) + if addrInfo.ID != "" && len(addrInfo.Addrs) > 0 { + return addrInfo, nil + } + default: + // we're + } + + // TODO reach out to Zikade -func (d *DHT) Provide(ctx context.Context, cid cid.Cid, b bool) error { panic("implement me") } -func (d *DHT) FindProvidersAsync(ctx context.Context, cid cid.Cid, i int) <-chan peer.AddrInfo { +func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { + ctx, span := tracer.Start(ctx, "DHT.Provide", otel.WithAttributes(attribute.String("cid", c.String()))) + defer span.End() + + // verify if this DHT supports provider records by checking if a "providers" + // backend is registered. + b, found := d.backends[namespaceProviders] + if !found { + return routing.ErrNotSupported + } + + // verify that it's "defined" CID (not empty) + if !c.Defined() { + return fmt.Errorf("invalid cid: undefined") + } + + // store ourselves as one provider for that CID + _, err := b.Store(ctx, string(c.Hash()), peer.AddrInfo{ID: d.host.ID()}) + if err != nil { + return fmt.Errorf("storing own provider record: %w", err) + } + + // if broadcast is "false" we won't query the DHT + if !brdcst { + return nil + } + + // TODO reach out to Zikade panic("implement me") } -func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { +func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { + ctx, span := tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) + defer span.End() + + // verify if this DHT supports provider records by checking if a "providers" + // backend is registered. + _, found := d.backends[namespaceProviders] + if !found || !c.Defined() { + peerOut := make(chan peer.AddrInfo) + close(peerOut) + return peerOut + } + + // TODO reach out to Zikade panic("implement me") } -func (d *DHT) PutValue(ctx context.Context, s string, bytes []byte, option ...routing.Option) error { +func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ...routing.Option) error { + ctx, span := tracer.Start(ctx, "DHT.PutValue") + defer span.End() + + ns, path, err := record.SplitKey(key) + if err != nil { + return fmt.Errorf("splitting key: %w", err) + } + + b, found := d.backends[ns] + if !found { + return routing.ErrNotSupported + } + + rec := record.MakePutRecord(key, value) + rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + + _, err = b.Store(ctx, path, rec) + if err != nil { + return fmt.Errorf("store record locally: %w", err) + } + + // TODO reach out to Zikade panic("implement me") } -func (d *DHT) GetValue(ctx context.Context, s string, option ...routing.Option) ([]byte, error) { +func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option) ([]byte, error) { + ctx, span := tracer.Start(ctx, "DHT.GetValue") + defer span.End() + + ns, path, err := record.SplitKey(key) + if err != nil { + return nil, fmt.Errorf("splitting key: %w", err) + } + + b, found := d.backends[ns] + if !found { + return nil, routing.ErrNotSupported + } + + val, err := b.Fetch(ctx, path) + if err != nil { + if !errors.Is(err, ds.ErrNotFound) { + return nil, fmt.Errorf("fetch value locally: %w", err) + } + } else { + rec, ok := val.(*recpb.Record) + if !ok { + return nil, fmt.Errorf("expected *recpb.Record from backend, got: %T", val) + } + return rec.GetValue(), nil + } + + // TODO reach out to Zikade panic("implement me") } func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { + ctx, span := tracer.Start(ctx, "DHT.SearchValue") + defer span.End() + panic("implement me") } func (d *DHT) Bootstrap(ctx context.Context) error { + ctx, span := tracer.Start(ctx, "DHT.Bootstrap") + defer span.End() + panic("implement me") } diff --git a/v2/routing_test.go b/v2/routing_test.go new file mode 100644 index 00000000..745a87dc --- /dev/null +++ b/v2/routing_test.go @@ -0,0 +1 @@ +package dht From 5a30d89d75d7a961ab500662605e906d00bd292b Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 24 Aug 2023 18:10:44 +0200 Subject: [PATCH 30/64] update deps --- v2/go.mod | 32 ++++++++++++++---------------- v2/go.sum | 58 +++++++++++++++++++++++++++---------------------------- 2 files changed, 43 insertions(+), 47 deletions(-) diff --git a/v2/go.mod b/v2/go.mod index 6107c289..32888ee7 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -5,14 +5,14 @@ go 1.20 require ( github.com/benbjohnson/clock v1.3.5 github.com/gogo/protobuf v1.3.2 - github.com/hashicorp/golang-lru/v2 v2.0.2 + github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb github.com/ipfs/boxo v0.12.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 - github.com/libp2p/go-libp2p v0.28.2 + github.com/libp2p/go-libp2p v0.30.0 github.com/libp2p/go-libp2p-record v0.2.0 github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 @@ -21,8 +21,9 @@ require ( github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/trace v1.16.0 go.uber.org/zap/exp v0.1.0 - golang.org/x/exp v0.0.0-20230725012225-302865e7556b + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 ) require ( @@ -45,7 +46,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect + github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/huin/goupnp v1.2.0 // indirect @@ -62,7 +63,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.3.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.19 // indirect @@ -82,7 +83,7 @@ require ( github.com/multiformats/go-varint v0.0.7 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/onsi/ginkgo/v2 v2.11.0 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -92,27 +93,24 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.3.3 // indirect - github.com/quic-go/qtls-go1-20 v0.3.0 // indirect - github.com/quic-go/quic-go v0.36.4 // indirect + github.com/quic-go/qtls-go1-20 v0.3.2 // indirect + github.com/quic-go/quic-go v0.37.6 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect - go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + go.uber.org/zap v1.25.0 // indirect + golang.org/x/crypto v0.12.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/tools v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/v2/go.sum b/v2/go.sum index c32c6ebd..24ae5122 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -121,8 +121,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -136,8 +136,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= -github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= +github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -193,8 +193,8 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.28.2 h1:lO/g0ccVru6nUVHyLE7C1VRr7B2AFp9cvHhf+l+Te6w= -github.com/libp2p/go-libp2p v0.28.2/go.mod h1:fOLgCNgLiWFdmtXyQBwmuCpukaYOA+yw4rnBiScDNmI= +github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= +github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= @@ -206,8 +206,8 @@ github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= -github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -274,8 +274,9 @@ github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= @@ -303,12 +304,10 @@ github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuR github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= -github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.3.0 h1:NrCXmDl8BddZwO67vlvEpBTwT89bJfKYygxv4HQvuDk= -github.com/quic-go/qtls-go1-20 v0.3.0/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.36.4 h1:CXn/ZLN5Vntlk53fjR+kUMC8Jt7flfQe+I5Ty5A+k0o= -github.com/quic-go/quic-go v0.36.4/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o= +github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= +github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY= +github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -387,7 +386,6 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= @@ -400,8 +398,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= go.uber.org/zap/exp v0.1.0 h1:Ol9zQNvAEAgFHSBiR5LlwS9Xq8u5QF+7HBwNHUB8rcI= go.uber.org/zap/exp v0.1.0/go.mod h1:z/0T3As39ttolxZGOsvk1OEvQfwwfTZpmV9YTp+VAkc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -415,11 +413,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230725012225-302865e7556b h1:tK7yjGqVRzYdXsBcfD2MLhFAhHfDgGLm2rY1ub7FA9k= -golang.org/x/exp v0.0.0-20230725012225-302865e7556b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -450,8 +448,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -486,14 +484,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -514,8 +512,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 4660bbc13a64682703efe6cc0537aa9ff0aa1638 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 25 Aug 2023 09:47:42 +0200 Subject: [PATCH 31/64] add test for unregistered message type --- v2/config.go | 12 +++++++----- v2/handlers.go | 3 ++- v2/stream_test.go | 26 ++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/v2/config.go b/v2/config.go index 6d6d8a6c..cc6a6af9 100644 --- a/v2/config.go +++ b/v2/config.go @@ -113,12 +113,14 @@ type Config struct { // BucketSize determines the number of closer peers to return BucketSize int - // ProtocolID represents the DHT protocol we can query with and respond to. + // ProtocolID represents the DHT [protocol] we can query with and respond to. + // + // [protocol]: https://docs.libp2p.io/concepts/fundamentals/protocols/ ProtocolID protocol.ID // RoutingTable holds a reference to the specific routing table // implementation that this DHT should use. If this field is nil, the - // triert.TrieRT routing table will be used. This field will be nil + // [triert.TrieRT] routing table will be used. This field will be nil // in the default configuration because a routing table requires information // about the local node. RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] @@ -165,10 +167,10 @@ func DefaultConfig() *Config { return &Config{ Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), - BucketSize: 20, + BucketSize: 20, // MAGIC ProtocolID: ProtocolIPFS, - RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. - Backends: map[string]Backend{}, + RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. + Backends: map[string]Backend{}, // if empty and [ProtocolIPFS] is used, it'll be populated with the ipns, pk and providers backends Datastore: nil, Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), TimeoutStreamIdle: time.Minute, // MAGIC diff --git a/v2/handlers.go b/v2/handlers.go index 052b9639..a0215433 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -133,7 +133,8 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag resp.Record = rec return resp, nil } - // the returned value wasn't a record + // the returned value wasn't a record, which could be the case if the + // key was prefixed with "providers." pset, ok := fetched.(*providerSet) if ok { diff --git a/v2/stream_test.go b/v2/stream_test.go index f2d9a362..cfdfb24f 100644 --- a/v2/stream_test.go +++ b/v2/stream_test.go @@ -2,6 +2,7 @@ package dht import ( "context" + "fmt" "runtime" "testing" @@ -132,6 +133,31 @@ func TestDHT_handleStream_unknown_message_type(t *testing.T) { err = trw.WriteMsg(req) require.NoError(t, err) + + _, err = trw.ReadMsg() + assert.ErrorIs(t, err, network.ErrReset) +} + +func TestDHT_handleStream_supported_but_unregistered_message_type(t *testing.T) { + ctx := context.Background() + client, serverDHT := newPeerPair(t) + + // unregister providers + delete(serverDHT.backends, namespaceProviders) + + s, err := client.NewStream(ctx, serverDHT.host.ID(), serverDHT.cfg.ProtocolID) + require.NoError(t, err) + + trw := newTestReadWriter(s) + + req := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: []byte(fmt.Sprintf("/%s/random-key", namespaceProviders)), + } + + err = trw.WriteMsg(req) + require.NoError(t, err) + _, err = trw.ReadMsg() assert.ErrorIs(t, err, network.ErrReset) } From 9f88f20acaf1598db18ad7ac49ef47d4b4cc2819 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 25 Aug 2023 10:08:02 +0200 Subject: [PATCH 32/64] add comments --- v2/backend_provider.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 61d6aa6c..c2a6b3df 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -105,12 +105,12 @@ func DefaultProviderBackendConfig() *ProvidersBackendConfig { return &ProvidersBackendConfig{ clk: clock.New(), ProvideValidity: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md - AddressTTL: 24 * time.Hour, - BatchSize: 256, // MAGIC - CacheSize: 256, // MAGIC - GCInterval: time.Hour, // MAGIC + AddressTTL: 24 * time.Hour, // MAGIC + BatchSize: 256, // MAGIC + CacheSize: 256, // MAGIC + GCInterval: time.Hour, // MAGIC Logger: slog.Default(), - AddressFilter: AddrFilterIdentity, + AddressFilter: AddrFilterIdentity, // verify alignment with [Config.AddressFilter] } } From 9559064b72dcc09a73fae8b31f9fb2ade221b903 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 25 Aug 2023 14:37:33 +0200 Subject: [PATCH 33/64] clean up backends in DHT close --- v2/backend_provider.go | 13 ++++++++++++- v2/backend_provider_test.go | 1 + v2/config.go | 10 ++++++++++ v2/dht.go | 29 ++++++++++++++++++++++++++++- 4 files changed, 51 insertions(+), 2 deletions(-) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index c2a6b3df..8ce8477a 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "fmt" + "io" "path" "strconv" "strings" @@ -59,7 +60,10 @@ type ProvidersBackend struct { gcDone chan struct{} } -var _ Backend = (*ProvidersBackend)(nil) +var ( + _ Backend = (*ProvidersBackend)(nil) + _ io.Closer = (*ProvidersBackend)(nil) +) // ProvidersBackendConfig is used to construct a [ProvidersBackend]. Use // [DefaultProviderBackendConfig] to get a default configuration struct and then @@ -224,6 +228,13 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { return out, nil } +// Close is here to implement the [io.Closer] interface. This will get called +// when the [DHT] "shuts down"/closes. +func (p *ProvidersBackend) Close() error { + p.StopGarbageCollection() + return nil +} + // StartGarbageCollection starts the garbage collection loop. The garbage // collection interval can be configured with [ProvidersBackendConfig.GCInterval]. // The garbage collection loop can only be started a single time. Use diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index 60d67c32..fab0a6cd 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -103,6 +103,7 @@ func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) } wg.Done() }() + wg.Add(1) go func() { for i := 0; i < 100; i++ { diff --git a/v2/config.go b/v2/config.go index cc6a6af9..e1e18347 100644 --- a/v2/config.go +++ b/v2/config.go @@ -133,6 +133,9 @@ type Config struct { // record. If this map stays empty, it will be populated with the default // IPNS ([NewBackendIPNS]), PublicKey ([NewBackendPublicKey]), and // Providers ([NewBackendProvider]) backends. + // + // Backends that implement the [io.Closer] interface will get closed when + // the DHT is closed. Backends map[string]Backend // Datastore will be used to construct the default backends. If this is nil, @@ -265,3 +268,10 @@ func AddrFilterIdentity(maddrs []ma.Multiaddr) []ma.Multiaddr { func AddrFilterPrivate(maddrs []ma.Multiaddr) []ma.Multiaddr { return ma.FilterAddrs(maddrs, manet.IsPublicAddr) } + +// AddrFilterPublic filters out any multiaddresses that are public. It +// evaluates the [manet.IsIPLoopback] on each multiaddress, and if it returns +// true, the multiaddress will be in the result set. +func AddrFilterPublic(maddrs []ma.Multiaddr) []ma.Multiaddr { + return ma.FilterAddrs(maddrs, func(maddr ma.Multiaddr) bool { return !manet.IsIPLoopback(maddr) }) +} diff --git a/v2/dht.go b/v2/dht.go index a5088fbf..be34d685 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -2,6 +2,7 @@ package dht import ( "fmt" + "io" "sync" "github.com/iand/zikade/kademlia" @@ -143,7 +144,33 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } - // TODO clean up backends + for ns, b := range d.backends { + closer, ok := b.(io.Closer) + if !ok { + continue + } + + if err := closer.Close(); err != nil { + d.log.Warn("failed closing backend", "namespace", ns, "err", err.Error()) + } + } + + // TODO: improve the following. + // If the protocol is the IPFS kademlia protocol + // and the user didn't provide a datastore implementation, we have initialized + // an in-memory datastore and assigned it to all backends. In the following + // we check if the conditions are met that we have initialized the datastore + // and the get hold of a reference to that datastore by looking in our + // backends map and casting one to one of our known providers. + if d.cfg.ProtocolID == ProtocolIPFS && d.cfg.Datastore == nil { + if b, found := d.backends[namespaceProviders]; found { + if pbe, ok := b.(*ProvidersBackend); ok { + if err := pbe.datastore.Close(); err != nil { + d.log.Warn("failed closing in memory datastore", "err", err.Error()) + } + } + } + } // kill all active streams using the DHT protocol. for _, c := range d.host.Network().Conns() { From 4418cfb60eaf6f09b287dc3e9429f25c61ca6b77 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 25 Aug 2023 18:57:57 +0200 Subject: [PATCH 34/64] add notifee test --- v2/notifee_test.go | 67 ++++++++++++++++++++++++++++++++++++++++++++++ v2/stream_test.go | 10 +++---- 2 files changed, 71 insertions(+), 6 deletions(-) create mode 100644 v2/notifee_test.go diff --git a/v2/notifee_test.go b/v2/notifee_test.go new file mode 100644 index 00000000..e8f3d90e --- /dev/null +++ b/v2/notifee_test.go @@ -0,0 +1,67 @@ +package dht + +import ( + "testing" + + "github.com/libp2p/go-libp2p/core/network" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/stretchr/testify/assert" +) + +func TestDHT_consumeNetworkEvents_onEvtLocalReachabilityChanged(t *testing.T) { + newModeDHT := func(m ModeOpt) *DHT { + cfg := DefaultConfig() + cfg.Mode = m + + return newTestDHTWithConfig(t, cfg) + } + + t.Run("set server", func(t *testing.T) { + d := newModeDHT(ModeOptAutoClient) + d.onEvtLocalReachabilityChanged(event.EvtLocalReachabilityChanged{ + Reachability: network.ReachabilityPublic, + }) + assert.Equal(t, modeServer, d.mode) + }) + + t.Run("set client", func(t *testing.T) { + d := newModeDHT(ModeOptAutoClient) + + d.onEvtLocalReachabilityChanged(event.EvtLocalReachabilityChanged{ + Reachability: network.ReachabilityPrivate, + }) + + assert.Equal(t, modeClient, d.mode) + }) + + t.Run("on unknown set client when auto client", func(t *testing.T) { + d := newModeDHT(ModeOptAutoClient) + + d.onEvtLocalReachabilityChanged(event.EvtLocalReachabilityChanged{ + Reachability: network.ReachabilityUnknown, + }) + + assert.Equal(t, modeClient, d.mode) + }) + + t.Run("on unknown set server when auto server", func(t *testing.T) { + d := newModeDHT(ModeOptAutoServer) + + d.onEvtLocalReachabilityChanged(event.EvtLocalReachabilityChanged{ + Reachability: network.ReachabilityUnknown, + }) + + assert.Equal(t, modeServer, d.mode) + }) + + t.Run("handles unknown event gracefully", func(t *testing.T) { + d := newModeDHT(ModeOptAutoServer) + + d.onEvtLocalReachabilityChanged(event.EvtLocalReachabilityChanged{ + Reachability: network.Reachability(99), + }) + + assert.Equal(t, modeServer, d.mode) + }) +} diff --git a/v2/stream_test.go b/v2/stream_test.go index cfdfb24f..c10d48e1 100644 --- a/v2/stream_test.go +++ b/v2/stream_test.go @@ -49,14 +49,12 @@ func (trw testReadWriter) WriteMsg(msg *pb.Message) error { } func newPeerPair(t testing.TB) (host.Host, *DHT) { - client, err := libp2p.New( - libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), - ) + listenAddr := libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0") + + client, err := libp2p.New(listenAddr) require.NoError(t, err) - server, err := libp2p.New( - libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), - ) + server, err := libp2p.New(listenAddr) require.NoError(t, err) cfg := DefaultConfig() From 1cbb419613ce732bca0f8b5e947e763fe2bf351f Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 28 Aug 2023 12:30:14 +0200 Subject: [PATCH 35/64] moved two handler tests from v1 --- v2/handlers_test.go | 118 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 36b463f6..f38a2ce6 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -1,7 +1,9 @@ package dht import ( + "bytes" "context" + "errors" "fmt" "math/rand" "reflect" @@ -738,6 +740,122 @@ func TestDHT_handlePutValue_unknown_backend(t *testing.T) { assert.ErrorContains(t, err, "unsupported record type") } +func TestDHT_handlePutValue_moved_from_v1_bad_proto_message(t *testing.T) { + // Test moved from v1 to v2 - original name TestBadProtoMessages in dht_test.go + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + d := newTestDHT(t) + + nilrec := new(pb.Message) + if _, err := d.handlePutValue(ctx, "testpeer", nilrec); err == nil { + t.Fatal("should have errored on nil record") + } +} + +// atomicPutValidator moved from v1 to v2 - in support for [TestDHT_handlePutValue_atomic_operation] +type atomicPutValidator struct{} + +var _ record.Validator = (*atomicPutValidator)(nil) + +func (v atomicPutValidator) Validate(key string, value []byte) error { + if bytes.Equal(value, []byte("expired")) { + return errors.New("expired") + } + return nil +} + +// selects the entry with the 'highest' last byte +func (atomicPutValidator) Select(_ string, bs [][]byte) (int, error) { + index := -1 + max := uint8(0) + for i, b := range bs { + if bytes.Equal(b, []byte("valid")) { + if index == -1 { + index = i + } + continue + } + + str := string(b) + n := str[len(str)-1] + if n > max { + max = n + index = i + } + + } + if index == -1 { + return -1, errors.New("no rec found") + } + return index, nil +} + +func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { + // Test moved from v1 to v2 - original name TestAtomicPut in dht_test.go + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ds, err := InMemoryDatastore() + require.NoError(t, err) + + recBackend := &RecordBackend{ + cfg: DefaultRecordBackendConfig(), + log: devnull, + namespace: "test", + datastore: ds, + validator: atomicPutValidator{}, + } + + d := newTestDHT(t) + + d.backends[recBackend.namespace] = recBackend + + // fnc to put a record + key := "/test/testkey" + putRecord := func(value []byte) error { + rec := record.MakePutRecord(key, value) + msg := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: rec.Key, + Record: rec, + } + _, err := d.handlePutValue(ctx, "testpeer", msg) + return err + } + + // put a valid record + if err := putRecord([]byte("valid")); err != nil { + t.Fatal("should not have errored on a valid record") + } + + // simultaneous puts for old & new values + values := [][]byte{[]byte("newer1"), []byte("newer7"), []byte("newer3"), []byte("newer5")} + var wg sync.WaitGroup + for _, v := range values { + wg.Add(1) + go func(v []byte) { + defer wg.Done() + _ = putRecord(v) // we expect some of these to fail + }(v) + } + wg.Wait() + + // get should return the newest value + pmes := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte(key), + } + msg, err := d.handleGetValue(ctx, "testpeer", pmes) + if err != nil { + t.Fatalf("should not have errored on final get, but got %+v", err) + } + if string(msg.GetRecord().Value) != "newer7" { + t.Fatalf("Expected 'newer7' got '%s'", string(msg.GetRecord().Value)) + } +} + func BenchmarkDHT_handleGetValue(b *testing.B) { d := newTestDHT(b) From 57c5df3f61391c539d8044a75d9900dd5ac9b3c6 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 30 Aug 2023 17:55:42 +0200 Subject: [PATCH 36/64] Move kademlia types to own package --- v2/backend_provider.go | 3 +- v2/dht.go | 10 +- v2/handlers.go | 17 +- v2/handlers_test.go | 25 +- v2/kad.go | 59 -- v2/kadt/kadt.go | 61 ++ v2/pb/Makefile | 18 +- v2/pb/README.md | 18 + v2/pb/bytestring.go | 52 -- v2/pb/dht.aux.go | 72 -- v2/pb/dht.pb.go | 976 --------------------- v2/pb/msg.aux.go | 128 +++ v2/pb/{dht.aux_test.go => msg.aux_test.go} | 2 +- v2/pb/msg.pb.go | 447 ++++++++++ v2/pb/{dht.proto => msg.proto} | 24 +- v2/router.go | 4 +- v2/stream.go | 4 +- v2/stream_test.go | 9 +- 18 files changed, 724 insertions(+), 1205 deletions(-) delete mode 100644 v2/kad.go create mode 100644 v2/kadt/kadt.go create mode 100644 v2/pb/README.md delete mode 100644 v2/pb/bytestring.go delete mode 100644 v2/pb/dht.aux.go delete mode 100644 v2/pb/dht.pb.go create mode 100644 v2/pb/msg.aux.go rename v2/pb/{dht.aux_test.go => msg.aux_test.go} (93%) create mode 100644 v2/pb/msg.pb.go rename v2/pb/{dht.proto => msg.proto} (65%) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 8ce8477a..be318b59 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -16,13 +16,14 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/autobatch" dsq "github.com/ipfs/go-datastore/query" - "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-base32" "go.opencensus.io/stats" "go.opencensus.io/tag" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" ) // ProvidersBackend implements the [Backend] interface and handles provider diff --git a/v2/dht.go b/v2/dht.go index be34d685..41154e2b 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -1,6 +1,7 @@ package dht import ( + "crypto/sha256" "fmt" "io" "sync" @@ -14,6 +15,8 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // DHT is an implementation of Kademlia with S/Kademlia modifications. @@ -67,7 +70,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { log: cfg.Logger, } - nid := nodeID(d.host.ID()) + nid := kadt.PeerID(d.host.ID()) // Use the configured routing table if it was provided if cfg.RoutingTable != nil { @@ -255,3 +258,8 @@ func (d *DHT) logErr(err error, msg string) { d.log.Warn(msg, "err", err.Error()) } + +func newSHA256Key(data []byte) key.Key256 { + b := sha256.Sum256(data) + return key.NewKey256(b[:]) +} diff --git a/v2/handlers.go b/v2/handlers.go index a0215433..e3268bbd 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -13,7 +13,8 @@ import ( "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" - pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) // handleFindPeer handles FIND_NODE requests from remote peers. @@ -36,12 +37,12 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag // if the remote is asking for us, short-circuit and return us only if target == d.host.ID() { - resp.CloserPeers = []pb.Message_Peer{pb.FromAddrInfo(pstore.PeerInfo(d.host.ID()))} + resp.CloserPeers = []*pb.Message_Peer{pb.FromAddrInfo(pstore.PeerInfo(d.host.ID()))} return resp, nil } // gather closer peers that we know - resp.CloserPeers = d.closerPeers(ctx, remote, nodeID(target).Key()) + resp.CloserPeers = d.closerPeers(ctx, remote, kadt.PeerID(target).Key()) // if we happen to know the target peers addresses (e.g., although we are // far away in the keyspace), we add the peer to the result set. This means @@ -138,7 +139,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag pset, ok := fetched.(*providerSet) if ok { - resp.ProviderPeers = make([]pb.Message_Peer, len(pset.providers)) + resp.ProviderPeers = make([]*pb.Message_Peer, len(pset.providers)) for i, p := range pset.providers { resp.ProviderPeers[i] = pb.FromAddrInfo(p) } @@ -213,7 +214,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("expected *providerSet value type, got: %T", pset) } - pbProviders := make([]pb.Message_Peer, len(pset.providers)) + pbProviders := make([]*pb.Message_Peer, len(pset.providers)) for i, p := range pset.providers { pbProviders[i] = pb.FromAddrInfo(p) } @@ -230,7 +231,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. -func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []pb.Message_Peer { +func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { ctx, span := tracer.Start(ctx, "DHT.closerPeers") defer span.End() @@ -240,9 +241,9 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 } // pre-allocated the result set slice. - filtered := make([]pb.Message_Peer, 0, len(peers)) + filtered := make([]*pb.Message_Peer, 0, len(peers)) for _, p := range peers { - pid := peer.ID(p.(nodeID)) // TODO: type cast + pid := peer.ID(p.(kadt.PeerID)) // TODO: type cast // check for own peer ID if pid == d.host.ID() { diff --git a/v2/handlers_test.go b/v2/handlers_test.go index f38a2ce6..bbce0c10 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -12,12 +12,12 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" - pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/crypto" @@ -25,6 +25,9 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) var rng = rand.New(rand.NewSource(1337)) @@ -81,7 +84,7 @@ func fillRoutingTable(t testing.TB, d *DHT) { pid := newPeerID(t) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // craft network address for peer a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) @@ -122,7 +125,7 @@ func BenchmarkDHT_handleFindPeer(b *testing.B) { pid := newPeerID(b) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // keep track of peer peers = append(peers, pid) @@ -174,7 +177,7 @@ func TestDHT_handleFindPeer_happy_path(t *testing.T) { // closer peers. This means we can't assert for exactly 20 closer peers // below. if i > 0 { - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) } // keep track of peer @@ -208,7 +211,7 @@ func TestDHT_handleFindPeer_self_in_routing_table(t *testing.T) { // a case that shouldn't happen d := newTestDHT(t) - d.rt.AddNode(nodeID(d.host.ID())) + d.rt.AddNode(kadt.PeerID(d.host.ID())) req := &pb.Message{ Type: pb.Message_FIND_NODE, @@ -253,7 +256,7 @@ func TestDHT_handleFindPeer_unknown_addresses_but_in_routing_table(t *testing.T) pid := newPeerID(t) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // keep track of peer peers[i] = pid @@ -322,7 +325,7 @@ func TestDHT_handleFindPeer_request_for_self(t *testing.T) { pid := newPeerID(t) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // keep track of peer peers[i] = pid @@ -378,7 +381,7 @@ func TestDHT_handleFindPeer_request_for_known_but_far_peer(t *testing.T) { // don't add first peer to routing table -> the one we're asking for // don't add second peer -> the one that's requesting if i > 1 { - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) } } @@ -1172,7 +1175,7 @@ func newAddrInfo(t testing.TB) peer.AddrInfo { } func newAddProviderRequest(key []byte, addrInfos ...peer.AddrInfo) *pb.Message { - providerPeers := make([]pb.Message_Peer, len(addrInfos)) + providerPeers := make([]*pb.Message_Peer, len(addrInfos)) for i, addrInfo := range addrInfos { providerPeers[i] = pb.FromAddrInfo(addrInfo) } @@ -1311,7 +1314,7 @@ func TestDHT_handleAddProvider_empty_provider_peers(t *testing.T) { // construct request req := newAddProviderRequest([]byte("random-key")) - req.ProviderPeers = make([]pb.Message_Peer, 0) // overwrite + req.ProviderPeers = make([]*pb.Message_Peer, 0) // overwrite // do the request _, err := d.handleAddProvider(ctx, newPeerID(t), req) diff --git a/v2/kad.go b/v2/kad.go deleted file mode 100644 index 77d15cb7..00000000 --- a/v2/kad.go +++ /dev/null @@ -1,59 +0,0 @@ -package dht - -import ( - "crypto/sha256" - - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" -) - -// nodeID is a type alias for peer.ID that implements the kad.NodeID interface. -// This means we can use nodeID for any operation that interfaces with -// go-kademlia. -type nodeID peer.ID - -// assertion that nodeID implements the kad.NodeID interface -var _ kad.NodeID[key.Key256] = nodeID("") - -// Key returns the Kademlia key of nodeID. The amino DHT operates on SHA256 -// hashes of, in this case, peer.IDs. This means this Key method takes -// the peer.ID, hashes it and constructs a 256-bit key. -func (p nodeID) Key() key.Key256 { - return newSHA256Key([]byte(p)) -} - -// String calls String on the underlying peer.ID and returns a string like -// QmFoo or 12D3KooBar. -func (p nodeID) String() string { - return peer.ID(p).String() -} - -// nodeInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo -// interface. This means we can use nodeInfo for any operation that interfaces -// with go-kademlia. -type nodeInfo struct { - info peer.AddrInfo -} - -// assertion that nodeInfo implements the kad.NodeInfo interface -var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*nodeInfo)(nil) - -// ID returns the kad.NodeID of this peer's information struct. -func (ai nodeInfo) ID() kad.NodeID[key.Key256] { - return nodeID(ai.info.ID) -} - -// Addresses returns all Multiaddresses of this peer. -func (ai nodeInfo) Addresses() []ma.Multiaddr { - addrs := make([]ma.Multiaddr, len(ai.info.Addrs)) - copy(addrs, ai.info.Addrs) - return addrs -} - -// newSHA256Key SHA256 hashes the given bytes and returns a new 256-bit key. -func newSHA256Key(data []byte) key.Key256 { - h := sha256.Sum256(data) - return key.NewKey256(h[:]) -} diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go new file mode 100644 index 00000000..b2d586f9 --- /dev/null +++ b/v2/kadt/kadt.go @@ -0,0 +1,61 @@ +// Package kadt contains the kademlia types for interacting with go-kademlia. +// It would be nicer to have these types in the top-level DHT package, however +// we also need these types in, e.g., the dht_pb package to let the Message +// type conform to certain interfaces. +package kadt + +import ( + "crypto/sha256" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +// PeerID is a type alias for peer.ID that implements the kad.NodeID interface. +// This means we can use PeerID for any operation that interfaces with +// go-kademlia. +type PeerID peer.ID + +// assertion that PeerID implements the kad.NodeID interface +var _ kad.NodeID[key.Key256] = PeerID("") + +// Key returns the Kademlia key of PeerID. The amino DHT operates on SHA256 +// hashes of, in this case, peer.IDs. This means this Key method takes +// the peer.ID, hashes it and constructs a 256-bit key. +func (p PeerID) Key() key.Key256 { + h := sha256.Sum256([]byte(p)) + return key.NewKey256(h[:]) +} + +// String calls String on the underlying peer.ID and returns a string like +// QmFoo or 12D3KooBar. +func (p PeerID) String() string { + return peer.ID(p).String() +} + +// AddrInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo +// interface. This means we can use AddrInfo for any operation that interfaces +// with go-kademlia. +// +// A more accurate name would be PeerInfo or NodeInfo. However, for consistency +// and coherence with [peer.AddrInfo] we also name it AddrInfo. +type AddrInfo struct { + Info peer.AddrInfo +} + +// assertion that AddrInfo implements the kad.NodeInfo interface +var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*AddrInfo)(nil) + +// ID returns the kad.NodeID of this peer's information struct. +func (ai AddrInfo) ID() kad.NodeID[key.Key256] { + return PeerID(ai.Info.ID) +} + +// Addresses returns all Multiaddresses of this peer. +func (ai AddrInfo) Addresses() []ma.Multiaddr { + addrs := make([]ma.Multiaddr, len(ai.Info.Addrs)) + copy(addrs, ai.Info.Addrs) + return addrs +} diff --git a/v2/pb/Makefile b/v2/pb/Makefile index eb14b576..0d5b6d67 100644 --- a/v2/pb/Makefile +++ b/v2/pb/Makefile @@ -1,11 +1,15 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) +all: clone reset build clean -all: $(GO) +build: + protoc --go_out=./ --go_opt=Mgithub.com/libp2p/go-libp2p-record/pb/record.proto=github.com/libp2p/go-libp2p-record/pb ./msg.proto -%.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< +clone: + git clone --depth 1 --branch v0.2.0 git@github.com:libp2p/go-libp2p-record.git github.com/libp2p/go-libp2p-record || true + +reset: + rm msg.pb.go || true clean: - rm -f *.pb.go - rm -f *.go + rm -rf github.com + +.PHONY: all build clone reset clean \ No newline at end of file diff --git a/v2/pb/README.md b/v2/pb/README.md new file mode 100644 index 00000000..afeaab1b --- /dev/null +++ b/v2/pb/README.md @@ -0,0 +1,18 @@ +# Protocol Buffers + +To generate the protobuf definitions run: + +```shell +make +``` + +This command will clone the `libp2p/go-libp2p-record` repository into this +directory (git-ignored) and run the `protoc` command to generate the `dht.pb.go` file for the +`dht.proto` protobuf definition. We need `go-libp2p-record` because `dht.proto` +reference the `Record` protobuf definition from that repository. + +To clean up after you have generated the `dht.pb.go` file, you can run: + +```shell +make clean +``` \ No newline at end of file diff --git a/v2/pb/bytestring.go b/v2/pb/bytestring.go deleted file mode 100644 index 5099a991..00000000 --- a/v2/pb/bytestring.go +++ /dev/null @@ -1,52 +0,0 @@ -package dht_pb - -import ( - "encoding/json" - "fmt" -) - -type byteString string - -func (b *byteString) MarshalTo(data []byte) (int, error) { - return copy(data, *b), nil -} - -func (b *byteString) Size() int { - return len(*b) -} - -func (b *byteString) Marshal() ([]byte, error) { - if b == nil { - return nil, fmt.Errorf("empty byte string") - } - return []byte(*b), nil -} - -func (b *byteString) Unmarshal(data []byte) error { - *b = byteString(data) - return nil -} - -func (b *byteString) Equal(other *byteString) bool { - if b != nil && other != nil { - return *b == *other - } - return b == nil && other == nil -} - -func (b *byteString) MarshalJSON() ([]byte, error) { - if b == nil { - return nil, fmt.Errorf("empty byte string") - } - return json.Marshal([]byte(*b)) -} - -func (b *byteString) UnmarshalJSON(data []byte) error { - var buf []byte - err := json.Unmarshal(data, &buf) - if err != nil { - return err - } - *b = byteString(buf) - return nil -} diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go deleted file mode 100644 index abc589c1..00000000 --- a/v2/pb/dht.aux.go +++ /dev/null @@ -1,72 +0,0 @@ -package dht_pb - -import ( - "golang.org/x/exp/slog" - - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -// FromAddrInfo constructs a Message_Peer from the given peer.AddrInfo -func FromAddrInfo(p peer.AddrInfo) Message_Peer { - mp := Message_Peer{ - Id: byteString(p.ID), - Addrs: make([][]byte, len(p.Addrs)), - } - - for i, maddr := range p.Addrs { - mp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. - } - - return mp -} - -// ContainsCloserPeer returns true if the provided peer ID is among the -// list of closer peers contained in this message. -func (m *Message) ContainsCloserPeer(pid peer.ID) bool { - b := byteString(pid) - for _, cp := range m.CloserPeers { - if cp.Id.Equal(&b) { - return true - } - } - return false -} - -// ProviderAddrInfos returns the peer.AddrInfo's of the provider peers in this -// message. -func (m *Message) ProviderAddrInfos() []peer.AddrInfo { - if m == nil { - return nil - } - - addrInfos := make([]peer.AddrInfo, 0, len(m.ProviderPeers)) - for _, p := range m.ProviderPeers { - addrInfos = append(addrInfos, peer.AddrInfo{ - ID: peer.ID(p.Id), - Addrs: p.Addresses(), - }) - } - - return addrInfos -} - -// Addresses returns the Multiaddresses associated with the Message_Peer entry -func (m *Message_Peer) Addresses() []ma.Multiaddr { - if m == nil { - return nil - } - - maddrs := make([]ma.Multiaddr, 0, len(m.Addrs)) - for _, addr := range m.Addrs { - maddr, err := ma.NewMultiaddrBytes(addr) - if err != nil { - slog.Debug("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "err", err) - continue - } - - maddrs = append(maddrs, maddr) - } - - return maddrs -} diff --git a/v2/pb/dht.pb.go b/v2/pb/dht.pb.go deleted file mode 100644 index dd317f5e..00000000 --- a/v2/pb/dht.pb.go +++ /dev/null @@ -1,976 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: dht.proto - -package dht_pb - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - pb "github.com/libp2p/go-libp2p-record/pb" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = proto.Marshal - _ = fmt.Errorf - _ = math.Inf -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Message_MessageType int32 - -const ( - Message_PUT_VALUE Message_MessageType = 0 - Message_GET_VALUE Message_MessageType = 1 - Message_ADD_PROVIDER Message_MessageType = 2 - Message_GET_PROVIDERS Message_MessageType = 3 - Message_FIND_NODE Message_MessageType = 4 - Message_PING Message_MessageType = 5 -) - -var Message_MessageType_name = map[int32]string{ - 0: "PUT_VALUE", - 1: "GET_VALUE", - 2: "ADD_PROVIDER", - 3: "GET_PROVIDERS", - 4: "FIND_NODE", - 5: "PING", -} - -var Message_MessageType_value = map[string]int32{ - "PUT_VALUE": 0, - "GET_VALUE": 1, - "ADD_PROVIDER": 2, - "GET_PROVIDERS": 3, - "FIND_NODE": 4, - "PING": 5, -} - -func (x Message_MessageType) String() string { - return proto.EnumName(Message_MessageType_name, int32(x)) -} - -func (Message_MessageType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0, 0} -} - -type Message_ConnectionType int32 - -const ( - // sender does not have a connection to peer, and no extra information (default) - Message_NOT_CONNECTED Message_ConnectionType = 0 - // sender has a live connection to peer - Message_CONNECTED Message_ConnectionType = 1 - // sender recently connected to peer - Message_CAN_CONNECT Message_ConnectionType = 2 - // sender recently tried to connect to peer repeatedly but failed to connect - // ("try" here is loose, but this should signal "made strong effort, failed") - Message_CANNOT_CONNECT Message_ConnectionType = 3 -) - -var Message_ConnectionType_name = map[int32]string{ - 0: "NOT_CONNECTED", - 1: "CONNECTED", - 2: "CAN_CONNECT", - 3: "CANNOT_CONNECT", -} - -var Message_ConnectionType_value = map[string]int32{ - "NOT_CONNECTED": 0, - "CONNECTED": 1, - "CAN_CONNECT": 2, - "CANNOT_CONNECT": 3, -} - -func (x Message_ConnectionType) String() string { - return proto.EnumName(Message_ConnectionType_name, int32(x)) -} - -func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0, 1} -} - -type Message struct { - // defines what type of message it is. - Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"` - // defines what coral cluster level this query/response belongs to. - // in case we want to implement coral's cluster rings in the future. - ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"` - // Used to specify the key associated with this message. - // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - // Used to return a value - // PUT_VALUE, GET_VALUE - Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"` - // Used to return peers closer to a key in a query - // GET_VALUE, GET_PROVIDERS, FIND_NODE - CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"` - // Used to return Providers - // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS - ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0} -} - -func (m *Message) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} - -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} - -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} - -func (m *Message) XXX_Size() int { - return m.Size() -} - -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetType() Message_MessageType { - if m != nil { - return m.Type - } - return Message_PUT_VALUE -} - -func (m *Message) GetClusterLevelRaw() int32 { - if m != nil { - return m.ClusterLevelRaw - } - return 0 -} - -func (m *Message) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Message) GetRecord() *pb.Record { - if m != nil { - return m.Record - } - return nil -} - -func (m *Message) GetCloserPeers() []Message_Peer { - if m != nil { - return m.CloserPeers - } - return nil -} - -func (m *Message) GetProviderPeers() []Message_Peer { - if m != nil { - return m.ProviderPeers - } - return nil -} - -type Message_Peer struct { - // ID of a given peer. - Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"` - // multiaddrs for a given peer - Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` - // used to signal the sender's connection capabilities to the peer - Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message_Peer) Reset() { *m = Message_Peer{} } -func (m *Message_Peer) String() string { return proto.CompactTextString(m) } -func (*Message_Peer) ProtoMessage() {} -func (*Message_Peer) Descriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0, 0} -} - -func (m *Message_Peer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} - -func (m *Message_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_Peer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} - -func (m *Message_Peer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Peer.Merge(m, src) -} - -func (m *Message_Peer) XXX_Size() int { - return m.Size() -} - -func (m *Message_Peer) XXX_DiscardUnknown() { - xxx_messageInfo_Message_Peer.DiscardUnknown(m) -} - -var xxx_messageInfo_Message_Peer proto.InternalMessageInfo - -func (m *Message_Peer) GetAddrs() [][]byte { - if m != nil { - return m.Addrs - } - return nil -} - -func (m *Message_Peer) GetConnection() Message_ConnectionType { - if m != nil { - return m.Connection - } - return Message_NOT_CONNECTED -} - -func init() { - proto.RegisterEnum("dht.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value) - proto.RegisterEnum("dht.pb.Message_ConnectionType", Message_ConnectionType_name, Message_ConnectionType_value) - proto.RegisterType((*Message)(nil), "dht.pb.Message") - proto.RegisterType((*Message_Peer)(nil), "dht.pb.Message.Peer") -} - -func init() { proto.RegisterFile("dht.proto", fileDescriptor_616a434b24c97ff4) } - -var fileDescriptor_616a434b24c97ff4 = []byte{ - // 469 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x6f, 0x9b, 0x40, - 0x18, 0xc5, 0x73, 0x80, 0xdd, 0xf8, 0x03, 0x3b, 0xe4, 0x94, 0x01, 0xb9, 0x92, 0x83, 0x3c, 0xd1, - 0xc1, 0x20, 0xd1, 0xb5, 0xaa, 0x6a, 0x03, 0x8d, 0x2c, 0xa5, 0xd8, 0xba, 0x38, 0xe9, 0x68, 0x19, - 0xb8, 0x12, 0x54, 0xd7, 0x87, 0x00, 0xa7, 0xf2, 0xd6, 0x3f, 0x2f, 0x63, 0xe7, 0x0e, 0x51, 0xe5, - 0xa9, 0x7f, 0x46, 0xc5, 0x11, 0x5a, 0xec, 0x25, 0x13, 0xef, 0x7d, 0xf7, 0x7e, 0xe2, 0xdd, 0xa7, - 0x83, 0x4e, 0x74, 0x5f, 0x98, 0x69, 0xc6, 0x0a, 0x86, 0xdb, 0x5c, 0x06, 0x7d, 0x3b, 0x4e, 0x8a, - 0xfb, 0x6d, 0x60, 0x86, 0xec, 0x9b, 0xb5, 0x4e, 0x82, 0xd4, 0x4e, 0xad, 0x98, 0x8d, 0x2a, 0x35, - 0xca, 0x68, 0xc8, 0xb2, 0xc8, 0x4a, 0x03, 0xab, 0x52, 0x15, 0xdb, 0x1f, 0x35, 0x98, 0x98, 0xc5, - 0xcc, 0xe2, 0xe3, 0x60, 0xfb, 0x85, 0x3b, 0x6e, 0xb8, 0xaa, 0xe2, 0xc3, 0x3f, 0x12, 0xbc, 0xfa, - 0x44, 0xf3, 0x7c, 0x15, 0x53, 0x6c, 0x81, 0x54, 0xec, 0x52, 0xaa, 0x21, 0x1d, 0x19, 0x3d, 0xfb, - 0xb5, 0x59, 0xb5, 0x30, 0x9f, 0x8f, 0xeb, 0xef, 0x62, 0x97, 0x52, 0xc2, 0x83, 0xd8, 0x80, 0xb3, - 0x70, 0xbd, 0xcd, 0x0b, 0x9a, 0x5d, 0xd3, 0x07, 0xba, 0x26, 0xab, 0xef, 0x1a, 0xe8, 0xc8, 0x68, - 0x91, 0xe3, 0x31, 0x56, 0x41, 0xfc, 0x4a, 0x77, 0x9a, 0xa0, 0x23, 0x43, 0x21, 0xa5, 0xc4, 0x6f, - 0xa0, 0x5d, 0xf5, 0xd6, 0x44, 0x1d, 0x19, 0xb2, 0x7d, 0x6e, 0xd6, 0xd7, 0x08, 0x4c, 0xc2, 0x15, - 0x79, 0x0e, 0xe0, 0x77, 0x20, 0x87, 0x6b, 0x96, 0xd3, 0x6c, 0x4e, 0x69, 0x96, 0x6b, 0xa7, 0xba, - 0x68, 0xc8, 0xf6, 0xc5, 0x71, 0xbd, 0xf2, 0x70, 0x22, 0x3d, 0x3e, 0x5d, 0x9e, 0x90, 0x66, 0x1c, - 0x7f, 0x80, 0x6e, 0x9a, 0xb1, 0x87, 0x24, 0xaa, 0xf9, 0xce, 0x8b, 0xfc, 0x21, 0xd0, 0xff, 0x81, - 0x40, 0x2a, 0x15, 0x1e, 0x82, 0x90, 0x44, 0x7c, 0x3d, 0xca, 0x04, 0x97, 0xc9, 0x5f, 0x4f, 0x97, - 0x10, 0xec, 0x0a, 0x7a, 0x53, 0x64, 0xc9, 0x26, 0x26, 0x42, 0x12, 0xe1, 0x0b, 0x68, 0xad, 0xa2, - 0x28, 0xcb, 0x35, 0x41, 0x17, 0x0d, 0x85, 0x54, 0x06, 0xbf, 0x07, 0x08, 0xd9, 0x66, 0x43, 0xc3, - 0x22, 0x61, 0x1b, 0x7e, 0xe3, 0x9e, 0x3d, 0x38, 0x6e, 0xe0, 0xfc, 0x4b, 0xf0, 0x1d, 0x37, 0x88, - 0x61, 0x02, 0x72, 0x63, 0xfd, 0xb8, 0x0b, 0x9d, 0xf9, 0xed, 0x62, 0x79, 0x37, 0xbe, 0xbe, 0xf5, - 0xd4, 0x93, 0xd2, 0x5e, 0x79, 0xb5, 0x45, 0x58, 0x05, 0x65, 0xec, 0xba, 0xcb, 0x39, 0x99, 0xdd, - 0x4d, 0x5d, 0x8f, 0xa8, 0x02, 0x3e, 0x87, 0x6e, 0x19, 0xa8, 0x27, 0x37, 0xaa, 0x58, 0x32, 0x1f, - 0xa7, 0xbe, 0xbb, 0xf4, 0x67, 0xae, 0xa7, 0x4a, 0xf8, 0x14, 0xa4, 0xf9, 0xd4, 0xbf, 0x52, 0x5b, - 0xc3, 0xcf, 0xd0, 0x3b, 0x2c, 0x52, 0xd2, 0xfe, 0x6c, 0xb1, 0x74, 0x66, 0xbe, 0xef, 0x39, 0x0b, - 0xcf, 0xad, 0xfe, 0xf8, 0xdf, 0x22, 0x7c, 0x06, 0xb2, 0x33, 0xf6, 0xeb, 0x84, 0x2a, 0x60, 0x0c, - 0x3d, 0x67, 0xec, 0x37, 0x28, 0x55, 0x9c, 0x28, 0x8f, 0xfb, 0x01, 0xfa, 0xb9, 0x1f, 0xa0, 0xdf, - 0xfb, 0x01, 0x0a, 0xda, 0xfc, 0xfd, 0xbd, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x1a, 0xa1, - 0xbe, 0xf7, 0x02, 0x00, 0x00, -} - -func (m *Message) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ClusterLevelRaw != 0 { - i = encodeVarintDht(dAtA, i, uint64(m.ClusterLevelRaw)) - i-- - dAtA[i] = 0x50 - } - if len(m.ProviderPeers) > 0 { - for iNdEx := len(m.ProviderPeers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProviderPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - if len(m.CloserPeers) > 0 { - for iNdEx := len(m.CloserPeers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CloserPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.Record != nil { - { - size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintDht(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintDht(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Message_Peer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_Peer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Connection != 0 { - i = encodeVarintDht(dAtA, i, uint64(m.Connection)) - i-- - dAtA[i] = 0x18 - } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarintDht(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - { - size := m.Id.Size() - i -= size - if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintDht(dAtA []byte, offset int, v uint64) int { - offset -= sovDht(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} - -func (m *Message) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovDht(uint64(m.Type)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovDht(uint64(l)) - } - if m.Record != nil { - l = m.Record.Size() - n += 1 + l + sovDht(uint64(l)) - } - if len(m.CloserPeers) > 0 { - for _, e := range m.CloserPeers { - l = e.Size() - n += 1 + l + sovDht(uint64(l)) - } - } - if len(m.ProviderPeers) > 0 { - for _, e := range m.ProviderPeers { - l = e.Size() - n += 1 + l + sovDht(uint64(l)) - } - } - if m.ClusterLevelRaw != 0 { - n += 1 + sovDht(uint64(m.ClusterLevelRaw)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Message_Peer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Id.Size() - n += 1 + l + sovDht(uint64(l)) - if len(m.Addrs) > 0 { - for _, b := range m.Addrs { - l = len(b) - n += 1 + l + sovDht(uint64(l)) - } - } - if m.Connection != 0 { - n += 1 + sovDht(uint64(m.Connection)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovDht(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} - -func sozDht(x uint64) (n int) { - return sovDht(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func (m *Message) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Message_MessageType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Record == nil { - m.Record = &pb.Record{} - } - if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CloserPeers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CloserPeers = append(m.CloserPeers, Message_Peer{}) - if err := m.CloserPeers[len(m.CloserPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderPeers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProviderPeers = append(m.ProviderPeers, Message_Peer{}) - if err := m.ProviderPeers[len(m.ProviderPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterLevelRaw", wireType) - } - m.ClusterLevelRaw = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterLevelRaw |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDht(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDht - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func (m *Message_Peer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Peer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) - copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) - } - m.Connection = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Connection |= Message_ConnectionType(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDht(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDht - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func skipDht(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDht - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDht - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDht - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDht - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDht - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDht - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDht = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDht = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDht = fmt.Errorf("proto: unexpected end of group") -) diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go new file mode 100644 index 00000000..cd9f5588 --- /dev/null +++ b/v2/pb/msg.aux.go @@ -0,0 +1,128 @@ +package pb + +import ( + "bytes" + "crypto/sha256" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +// this file contains auxiliary methods to augment the protobuf generated types. +// It is used to let these types conform to interfaces or add convenience methods. + +var _ kad.Request[key.Key256, ma.Multiaddr] = (*Message)(nil) + +func (m *Message) Target() key.Key256 { + b := sha256.Sum256(m.Key) + return key.NewKey256(b[:]) +} + +func (m *Message) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { + return &Message{ + Type: m.Type, + Key: m.Key, + } +} + +// FromAddrInfo constructs a [Message_Peer] from the given [peer.AddrInfo]. +func FromAddrInfo(p peer.AddrInfo) *Message_Peer { + mp := &Message_Peer{ + Id: []byte(p.ID), + Addrs: make([][]byte, len(p.Addrs)), + } + + for i, maddr := range p.Addrs { + mp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed. + } + + return mp +} + +// ContainsCloserPeer returns true if the provided peer ID is among the +// list of closer peers contained in this message. +func (m *Message) ContainsCloserPeer(pid peer.ID) bool { + for _, cp := range m.CloserPeers { + if bytes.Equal(cp.Id, []byte(pid)) { + return true + } + } + return false +} + +// ProviderAddrInfos returns the peer.AddrInfo's of the provider peers in this +// message. +func (m *Message) ProviderAddrInfos() []peer.AddrInfo { + if m == nil { + return nil + } + + addrInfos := make([]peer.AddrInfo, 0, len(m.ProviderPeers)) + for _, p := range m.ProviderPeers { + addrInfos = append(addrInfos, peer.AddrInfo{ + ID: peer.ID(p.Id), + Addrs: p.Addresses(), + }) + } + + return addrInfos +} + +// CloserPeersAddrInfos returns the peer.AddrInfo's of the closer peers in this +// message. +func (m *Message) CloserPeersAddrInfos() []peer.AddrInfo { + if m == nil { + return nil + } + + addrInfos := make([]peer.AddrInfo, 0, len(m.CloserPeers)) + for _, p := range m.CloserPeers { + addrInfos = append(addrInfos, peer.AddrInfo{ + ID: peer.ID(p.Id), + Addrs: p.Addresses(), + }) + } + + return addrInfos +} + +func (m *Message) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { + if m == nil { + return nil + } + + infos := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], 0, len(m.CloserPeers)) + for _, p := range m.CloserPeers { + infos = append(infos, &kadt.AddrInfo{Info: peer.AddrInfo{ + ID: peer.ID(p.Id), + Addrs: p.Addresses(), + }}) + } + + return infos +} + +// Addresses returns the Multiaddresses associated with the Message_Peer entry +func (m *Message_Peer) Addresses() []ma.Multiaddr { + if m == nil { + return nil + } + + maddrs := make([]ma.Multiaddr, 0, len(m.Addrs)) + for _, addr := range m.Addrs { + maddr, err := ma.NewMultiaddrBytes(addr) + if err != nil { + slog.Debug("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "err", err) + continue + } + + maddrs = append(maddrs, maddr) + } + + return maddrs +} diff --git a/v2/pb/dht.aux_test.go b/v2/pb/msg.aux_test.go similarity index 93% rename from v2/pb/dht.aux_test.go rename to v2/pb/msg.aux_test.go index f092e8fd..dc3bd016 100644 --- a/v2/pb/dht.aux_test.go +++ b/v2/pb/msg.aux_test.go @@ -1,4 +1,4 @@ -package dht_pb +package pb import ( "testing" diff --git a/v2/pb/msg.pb.go b/v2/pb/msg.pb.go new file mode 100644 index 00000000..0acb59b6 --- /dev/null +++ b/v2/pb/msg.pb.go @@ -0,0 +1,447 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: msg.proto + +package pb + +import ( + reflect "reflect" + sync "sync" + + pb "github.com/libp2p/go-libp2p-record/pb" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// MessageType represents the type of RPC being called. Based on the message +// type different fields of this message will be populated. The response +// of a message with a certain type will have the same type. +type Message_MessageType int32 + +const ( + Message_PUT_VALUE Message_MessageType = 0 + Message_GET_VALUE Message_MessageType = 1 + Message_ADD_PROVIDER Message_MessageType = 2 + Message_GET_PROVIDERS Message_MessageType = 3 + Message_FIND_NODE Message_MessageType = 4 + Message_PING Message_MessageType = 5 +) + +// Enum value maps for Message_MessageType. +var ( + Message_MessageType_name = map[int32]string{ + 0: "PUT_VALUE", + 1: "GET_VALUE", + 2: "ADD_PROVIDER", + 3: "GET_PROVIDERS", + 4: "FIND_NODE", + 5: "PING", + } + Message_MessageType_value = map[string]int32{ + "PUT_VALUE": 0, + "GET_VALUE": 1, + "ADD_PROVIDER": 2, + "GET_PROVIDERS": 3, + "FIND_NODE": 4, + "PING": 5, + } +) + +func (x Message_MessageType) Enum() *Message_MessageType { + p := new(Message_MessageType) + *p = x + return p +} + +func (x Message_MessageType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor { + return file_msg_proto_enumTypes[0].Descriptor() +} + +func (Message_MessageType) Type() protoreflect.EnumType { + return &file_msg_proto_enumTypes[0] +} + +func (x Message_MessageType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Message_MessageType.Descriptor instead. +func (Message_MessageType) EnumDescriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{0, 0} +} + +type Message_ConnectionType int32 + +const ( + // sender does not have a connection to peer, and no extra information (default) + Message_NOT_CONNECTED Message_ConnectionType = 0 + // sender has a live connection to peer + Message_CONNECTED Message_ConnectionType = 1 + // sender recently connected to peer + Message_CAN_CONNECT Message_ConnectionType = 2 + // sender recently tried to connect to peer repeatedly but failed to connect + // ("try" here is loose, but this should signal "made strong effort, failed") + Message_CANNOT_CONNECT Message_ConnectionType = 3 +) + +// Enum value maps for Message_ConnectionType. +var ( + Message_ConnectionType_name = map[int32]string{ + 0: "NOT_CONNECTED", + 1: "CONNECTED", + 2: "CAN_CONNECT", + 3: "CANNOT_CONNECT", + } + Message_ConnectionType_value = map[string]int32{ + "NOT_CONNECTED": 0, + "CONNECTED": 1, + "CAN_CONNECT": 2, + "CANNOT_CONNECT": 3, + } +) + +func (x Message_ConnectionType) Enum() *Message_ConnectionType { + p := new(Message_ConnectionType) + *p = x + return p +} + +func (x Message_ConnectionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message_ConnectionType) Descriptor() protoreflect.EnumDescriptor { + return file_msg_proto_enumTypes[1].Descriptor() +} + +func (Message_ConnectionType) Type() protoreflect.EnumType { + return &file_msg_proto_enumTypes[1] +} + +func (x Message_ConnectionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Message_ConnectionType.Descriptor instead. +func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{0, 1} +} + +// Message is the top-level envelope for exchanging +// information with the DHT protocol. +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // defines what type of message it is. + Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"` + // defines what coral cluster level this query/response belongs to. + // in case we want to implement coral's cluster rings in the future. + // + // Deprecated: Marked as deprecated in msg.proto. + ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=cluster_level_raw,json=clusterLevelRaw,proto3" json:"cluster_level_raw,omitempty"` + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Used to return a value + // PUT_VALUE, GET_VALUE + Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"` + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + CloserPeers []*Message_Peer `protobuf:"bytes,8,rep,name=closer_peers,json=closerPeers,proto3" json:"closer_peers,omitempty"` + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + ProviderPeers []*Message_Peer `protobuf:"bytes,9,rep,name=provider_peers,json=providerPeers,proto3" json:"provider_peers,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{0} +} + +func (x *Message) GetType() Message_MessageType { + if x != nil { + return x.Type + } + return Message_PUT_VALUE +} + +// Deprecated: Marked as deprecated in msg.proto. +func (x *Message) GetClusterLevelRaw() int32 { + if x != nil { + return x.ClusterLevelRaw + } + return 0 +} + +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Message) GetRecord() *pb.Record { + if x != nil { + return x.Record + } + return nil +} + +func (x *Message) GetCloserPeers() []*Message_Peer { + if x != nil { + return x.CloserPeers + } + return nil +} + +func (x *Message) GetProviderPeers() []*Message_Peer { + if x != nil { + return x.ProviderPeers + } + return nil +} + +type Message_Peer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID of a given peer. + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // multiaddrs for a given peer + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` + // used to signal the sender's connection capabilities to the peer + Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` +} + +func (x *Message_Peer) Reset() { + *x = Message_Peer{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message_Peer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message_Peer) ProtoMessage() {} + +func (x *Message_Peer) ProtoReflect() protoreflect.Message { + mi := &file_msg_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message_Peer.ProtoReflect.Descriptor instead. +func (*Message_Peer) Descriptor() ([]byte, []int) { + return file_msg_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Message_Peer) GetId() []byte { + if x != nil { + return x.Id + } + return nil +} + +func (x *Message_Peer) GetAddrs() [][]byte { + if x != nil { + return x.Addrs + } + return nil +} + +func (x *Message_Peer) GetConnection() Message_ConnectionType { + if x != nil { + return x.Connection + } + return Message_NOT_CONNECTED +} + +var File_msg_proto protoreflect.FileDescriptor + +var file_msg_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x6d, 0x73, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x64, 0x68, 0x74, + 0x2e, 0x70, 0x62, 0x1a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, + 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1b, 0x2e, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x61, 0x77, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x61, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, + 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x12, 0x37, 0x0a, 0x0c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0b, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x0e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x73, 0x1a, 0x6c, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, + 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x64, 0x68, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x69, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x50, 0x55, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x45, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x44, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x56, + 0x49, 0x44, 0x45, 0x52, 0x53, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x49, 0x4e, 0x44, 0x5f, + 0x4e, 0x4f, 0x44, 0x45, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x22, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x41, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, + 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, 0x42, 0x08, 0x5a, 0x06, 0x2e, 0x2f, 0x3b, + 0x64, 0x68, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_msg_proto_rawDescOnce sync.Once + file_msg_proto_rawDescData = file_msg_proto_rawDesc +) + +func file_msg_proto_rawDescGZIP() []byte { + file_msg_proto_rawDescOnce.Do(func() { + file_msg_proto_rawDescData = protoimpl.X.CompressGZIP(file_msg_proto_rawDescData) + }) + return file_msg_proto_rawDescData +} + +var ( + file_msg_proto_enumTypes = make([]protoimpl.EnumInfo, 2) + file_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 2) + file_msg_proto_goTypes = []interface{}{ + (Message_MessageType)(0), // 0: dht.pb.Message.MessageType + (Message_ConnectionType)(0), // 1: dht.pb.Message.ConnectionType + (*Message)(nil), // 2: dht.pb.Message + (*Message_Peer)(nil), // 3: dht.pb.Message.Peer + (*pb.Record)(nil), // 4: record.pb.Record + } +) + +var file_msg_proto_depIdxs = []int32{ + 0, // 0: dht.pb.Message.type:type_name -> dht.pb.Message.MessageType + 4, // 1: dht.pb.Message.record:type_name -> record.pb.Record + 3, // 2: dht.pb.Message.closer_peers:type_name -> dht.pb.Message.Peer + 3, // 3: dht.pb.Message.provider_peers:type_name -> dht.pb.Message.Peer + 1, // 4: dht.pb.Message.Peer.connection:type_name -> dht.pb.Message.ConnectionType + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_msg_proto_init() } +func file_msg_proto_init() { + if File_msg_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_msg_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_msg_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message_Peer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_msg_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_msg_proto_goTypes, + DependencyIndexes: file_msg_proto_depIdxs, + EnumInfos: file_msg_proto_enumTypes, + MessageInfos: file_msg_proto_msgTypes, + }.Build() + File_msg_proto = out.File + file_msg_proto_rawDesc = nil + file_msg_proto_goTypes = nil + file_msg_proto_depIdxs = nil +} diff --git a/v2/pb/dht.proto b/v2/pb/msg.proto similarity index 65% rename from v2/pb/dht.proto rename to v2/pb/msg.proto index 18bfd741..08d249bb 100644 --- a/v2/pb/dht.proto +++ b/v2/pb/msg.proto @@ -1,17 +1,17 @@ -// In order to re-generate the golang packages for `Message` you will need... -// 1. Protobuf binary (tested with protoc 3.0.0). - https://github.com/gogo/protobuf/releases -// 2. Gogo Protobuf (tested with gogo 0.3). - https://github.com/gogo/protobuf -// 3. To have cloned `libp2p/go-libp2p-{record,kad-dht}` under the same directory. -// Now from `libp2p/go-libp2p-kad-dht/pb` you can run... -// `protoc --gogo_out=. --proto_path=../../go-libp2p-record/pb/ --proto_path=./ dht.proto` - syntax = "proto3"; package dht.pb; +option go_package = "./;dht"; + import "github.com/libp2p/go-libp2p-record/pb/record.proto"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +// Message is the top-level envelope for exchanging +// information with the DHT protocol. message Message { + + // MessageType represents the type of RPC being called. Based on the message + // type different fields of this message will be populated. The response + // of a message with a certain type will have the same type. enum MessageType { PUT_VALUE = 0; GET_VALUE = 1; @@ -38,7 +38,7 @@ message Message { message Peer { // ID of a given peer. - bytes id = 1 [(gogoproto.customtype) = "byteString", (gogoproto.nullable) = false]; + bytes id = 1; // multiaddrs for a given peer repeated bytes addrs = 2; @@ -52,7 +52,7 @@ message Message { // defines what coral cluster level this query/response belongs to. // in case we want to implement coral's cluster rings in the future. - int32 clusterLevelRaw = 10; + int32 cluster_level_raw = 10 [deprecated = true]; // Used to specify the key associated with this message. // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS @@ -64,9 +64,9 @@ message Message { // Used to return peers closer to a key in a query // GET_VALUE, GET_PROVIDERS, FIND_NODE - repeated Peer closerPeers = 8 [(gogoproto.nullable) = false]; + repeated Peer closer_peers = 8; // Used to return Providers // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS - repeated Peer providerPeers = 9 [(gogoproto.nullable) = false]; + repeated Peer provider_peers = 9; } diff --git a/v2/router.go b/v2/router.go index 1879795d..bc22ae50 100644 --- a/v2/router.go +++ b/v2/router.go @@ -11,12 +11,14 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) var _ kademlia.Router[key.Key256, ma.Multiaddr] = (*DHT)(nil) func (d *DHT) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], protoID address.ProtocolID, req kad.Request[key.Key256, ma.Multiaddr]) (kad.Response[key.Key256, ma.Multiaddr], error) { - s, err := d.host.NewStream(ctx, peer.ID(to.ID().(nodeID)), d.cfg.ProtocolID) + s, err := d.host.NewStream(ctx, peer.ID(to.ID().(kadt.PeerID)), d.cfg.ProtocolID) if err != nil { return nil, fmt.Errorf("new stream: %w", err) } diff --git a/v2/stream.go b/v2/stream.go index b5fb1341..a05bc0f0 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -10,6 +10,8 @@ import ( "sync" "time" + "google.golang.org/protobuf/proto" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio" @@ -187,7 +189,7 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data defer span.End() var req pb.Message - if err := req.Unmarshal(data); err != nil { + if err := proto.Unmarshal(data, &req); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("err", err.Error())) _ = stats.RecordWithTags(ctx, diff --git a/v2/stream_test.go b/v2/stream_test.go index c10d48e1..26e932ee 100644 --- a/v2/stream_test.go +++ b/v2/stream_test.go @@ -6,14 +6,17 @@ import ( "runtime" "testing" + "google.golang.org/protobuf/proto" + "github.com/libp2p/go-libp2p" - pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) type testReadWriter struct { @@ -35,12 +38,12 @@ func (trw testReadWriter) ReadMsg() (*pb.Message, error) { } resp := &pb.Message{} - err = resp.Unmarshal(msg) + err = proto.Unmarshal(msg, resp) return resp, err } func (trw testReadWriter) WriteMsg(msg *pb.Message) error { - data, err := msg.Marshal() + data, err := proto.Marshal(msg) if err != nil { return err } From 99dbdc673e129eb6421a3165889ac3b01b31bf5d Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 1 Sep 2023 15:58:21 +0200 Subject: [PATCH 37/64] Add backend tracing --- v2/backend_trace.go | 49 ++++++++++++++++++++++++++++++++++++++++ v2/dht.go | 55 ++++++++++++++++++++++++++++++++++++++++----- v2/handlers.go | 4 +++- v2/handlers_test.go | 50 ++++++++++++++++++++--------------------- 4 files changed, 127 insertions(+), 31 deletions(-) create mode 100644 v2/backend_trace.go diff --git a/v2/backend_trace.go b/v2/backend_trace.go new file mode 100644 index 00000000..e8ad77e5 --- /dev/null +++ b/v2/backend_trace.go @@ -0,0 +1,49 @@ +package dht + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + otel "go.opentelemetry.io/otel/trace" +) + +type tracedBackend struct { + namespace string + backend Backend +} + +var _ Backend = (*tracedBackend)(nil) + +func traceWrapBackend(namespace string, backend Backend) Backend { + return &tracedBackend{ + namespace: namespace, + backend: backend, + } +} + +func (t tracedBackend) Store(ctx context.Context, key string, value any) (any, error) { + ctx, span := tracer.Start(ctx, "Store", otel.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) + defer span.End() + + result, err := t.backend.Store(ctx, key, value) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + + return result, err +} + +func (t tracedBackend) Fetch(ctx context.Context, key string) (any, error) { + ctx, span := tracer.Start(ctx, "Fetch", otel.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) + defer span.End() + + result, err := t.backend.Fetch(ctx, key) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + + return result, err +} diff --git a/v2/dht.go b/v2/dht.go index 41154e2b..3c0a24fc 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -112,6 +112,14 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } } + // wrap all backends with tracing + for ns, backend := range d.backends { + d.backends[ns] = &tracedBackend{ + namespace: ns, + backend: backend, + } + } + // instantiate a new Kademlia DHT coordinator. d.kad, err = kademlia.NewDht[key.Key256, ma.Multiaddr](nid, d, d.rt, nil) if err != nil { @@ -166,11 +174,9 @@ func (d *DHT) Close() error { // and the get hold of a reference to that datastore by looking in our // backends map and casting one to one of our known providers. if d.cfg.ProtocolID == ProtocolIPFS && d.cfg.Datastore == nil { - if b, found := d.backends[namespaceProviders]; found { - if pbe, ok := b.(*ProvidersBackend); ok { - if err := pbe.datastore.Close(); err != nil { - d.log.Warn("failed closing in memory datastore", "err", err.Error()) - } + if pbe, err := typedBackend[*ProvidersBackend](d, namespaceProviders); err == nil { + if err := pbe.datastore.Close(); err != nil { + d.log.Warn("failed closing in memory datastore", "err", err.Error()) } } } @@ -259,7 +265,46 @@ func (d *DHT) logErr(err error, msg string) { d.log.Warn(msg, "err", err.Error()) } +// newSHA256Key returns a [key.Key256] that conforms to the [kad.Key] interface by +// SHA256 hashing the given bytes and wrapping them in a [key.Key256]. func newSHA256Key(data []byte) key.Key256 { b := sha256.Sum256(data) return key.NewKey256(b[:]) } + +// typedBackend returns the backend at the given namespace. It is casted to the +// provided type. If the namespace doesn't exist or the type cast failed, this +// function returns an error. Can't be a method on [DHT] because of the generic +// type constraint [0]. +// +// This method is only used in tests and the [DHT.Close] method. It would be +// great if we wouldn't need this method. +// +// [0]: https://github.com/golang/go/issues/49085 +func typedBackend[T Backend](d *DHT, namespace string) (T, error) { + // check if backend was registered + be, found := d.backends[namespace] + if !found { + return *new(T), fmt.Errorf("backend for namespace %s not found", namespace) + } + + // try to cast to the desired type + cbe, ok := be.(T) // casted backend + if !ok { + // that didn't work... check if the desired backend was wrapped + // into a traced backend + tbe, ok := be.(*tracedBackend) + if !ok { + return *new(T), fmt.Errorf("backend at namespace is no traced backend nor %T", *new(T)) + } + + cbe, ok := tbe.backend.(T) + if !ok { + return *new(T), fmt.Errorf("traced backend doesn't contain %T", *new(T)) + } + + return cbe, nil + } + + return cbe, nil +} diff --git a/v2/handlers.go b/v2/handlers.go index e3268bbd..9a37c17b 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -11,6 +11,8 @@ import ( recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/plprobelab/go-kademlia/key" + "go.opentelemetry.io/otel/attribute" + otel "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -232,7 +234,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { - ctx, span := tracer.Start(ctx, "DHT.closerPeers") + ctx, span := tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) defer span.End() peers := d.rt.NearestNodes(target, d.cfg.BucketSize) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index bbce0c10..5c9b0c89 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -675,10 +675,10 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) dsKey := newDatastoreKey(namespaceIPNS, string(remote)) // string(remote) is the key suffix - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) + rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) + require.NoError(t, err) - err := rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-record")) + err = rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-record")) require.NoError(t, err) // put the correct record through handler @@ -912,8 +912,8 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) + rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) + require.NoError(t, err) data, err := putReq.Record.Marshal() require.NoError(t, err) @@ -969,13 +969,13 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { for _, ns := range []string{namespaceIPNS, namespacePublicKey} { t.Run(ns, func(t *testing.T) { - rbe, ok := d.backends[ns].(*RecordBackend) - require.True(t, ok) + rbe, err := typedBackend[*RecordBackend](d, ns) + require.NoError(t, err) key := []byte(fmt.Sprintf("/%s/record-key", ns)) dsKey := newDatastoreKey(ns, "record-key") - err := rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-data")) + err = rbe.datastore.Put(context.Background(), dsKey, []byte("corrupt-data")) require.NoError(t, err) req := &pb.Message{ @@ -1009,8 +1009,8 @@ func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) + rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) + require.NoError(t, err) dsKey := newDatastoreKey(namespaceIPNS, string(remote)) @@ -1047,8 +1047,8 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { fillRoutingTable(t, d) - rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) - require.True(t, ok) + rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) + require.NoError(t, err) remote, priv := newIdentity(t) @@ -1133,13 +1133,13 @@ func TestDHT_handleGetValue_supports_providers(t *testing.T) { // add to addresses peerstore d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) - be, ok := d.backends[namespaceProviders].(*ProvidersBackend) - require.True(t, ok) + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) // write to datastore dsKey := newDatastoreKey(namespaceProviders, string(key), string(p.ID)) rec := expiryRecord{expiry: time.Now()} - err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + err = be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) req := &pb.Message{ @@ -1230,8 +1230,8 @@ func TestDHT_handleAddProvider_happy_path(t *testing.T) { assert.Equal(t, addrs[0], addrInfo.Addrs[0]) // check if the record was store in the datastore - be, ok := d.backends[namespaceProviders].(*ProvidersBackend) - require.True(t, ok) + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) dsKey := newDatastoreKey(be.namespace, string(key), string(addrInfo.ID)) @@ -1405,8 +1405,8 @@ func TestDHT_handleGetProviders_happy_path(t *testing.T) { key := []byte("random-key") - be, ok := d.backends[namespaceProviders].(*ProvidersBackend) - require.True(t, ok) + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) providers := []peer.AddrInfo{ newAddrInfo(t), @@ -1457,8 +1457,8 @@ func TestDHT_handleGetProviders_do_not_return_expired_records(t *testing.T) { key := []byte("random-key") // check if the record was store in the datastore - be, ok := d.backends[namespaceProviders].(*ProvidersBackend) - require.True(t, ok) + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) provider1 := newAddrInfo(t) provider2 := newAddrInfo(t) @@ -1469,7 +1469,7 @@ func TestDHT_handleGetProviders_do_not_return_expired_records(t *testing.T) { // write valid record dsKey := newDatastoreKey(namespaceProviders, string(key), string(provider1.ID)) rec := expiryRecord{expiry: time.Now()} - err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + err = be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) // write expired record @@ -1515,8 +1515,8 @@ func TestDHT_handleGetProviders_only_serve_filtered_addresses(t *testing.T) { key := []byte("random-key") - be, ok := d.backends[namespaceProviders].(*ProvidersBackend) - require.True(t, ok) + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) p := newAddrInfo(t) require.True(t, len(p.Addrs) > 0, "need addr info with at least one address") @@ -1527,7 +1527,7 @@ func TestDHT_handleGetProviders_only_serve_filtered_addresses(t *testing.T) { // write to datastore dsKey := newDatastoreKey(namespaceProviders, string(key), string(p.ID)) rec := expiryRecord{expiry: time.Now()} - err := be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + err = be.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) req := &pb.Message{ From f16d79e7e580be6791b7a266e67e6b4165400301 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 1 Sep 2023 16:05:40 +0200 Subject: [PATCH 38/64] improve kadt package doc --- v2/kadt/kadt.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index b2d586f9..d3b33db5 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -1,7 +1,7 @@ // Package kadt contains the kademlia types for interacting with go-kademlia. -// It would be nicer to have these types in the top-level DHT package, however -// we also need these types in, e.g., the dht_pb package to let the Message -// type conform to certain interfaces. +// It would be nicer to have these types in the top-level DHT package; however, +// we also need these types in, e.g., the pb package to let the +// [pb.Message] type conform to certain interfaces. package kadt import ( From 2610f89266590cde13469610008445120175cb38 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 1 Sep 2023 16:48:33 +0200 Subject: [PATCH 39/64] document tracedBackend --- v2/backend_trace.go | 31 ++++++++++++++++++------------- v2/go.mod | 10 +++++----- v2/go.sum | 12 ++++++------ 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/v2/backend_trace.go b/v2/backend_trace.go index e8ad77e5..ea9211ab 100644 --- a/v2/backend_trace.go +++ b/v2/backend_trace.go @@ -8,22 +8,20 @@ import ( otel "go.opentelemetry.io/otel/trace" ) +// tracedBackend wraps a [Backend] in calls to open telemetry tracing +// directives. In [New] all backends configured in [Config] or automatically +// configured if none are given will be wrapped with this tracedBackend. type tracedBackend struct { - namespace string - backend Backend + namespace string // the namespace the backend operates in. Used as a tracing attribute. + backend Backend // the [Backend] to be traced } var _ Backend = (*tracedBackend)(nil) -func traceWrapBackend(namespace string, backend Backend) Backend { - return &tracedBackend{ - namespace: namespace, - backend: backend, - } -} - -func (t tracedBackend) Store(ctx context.Context, key string, value any) (any, error) { - ctx, span := tracer.Start(ctx, "Store", otel.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) +// Store implements the [Backend] interface, forwards the call to the wrapped +// backend and manages the trace span. +func (t *tracedBackend) Store(ctx context.Context, key string, value any) (any, error) { + ctx, span := tracer.Start(ctx, "Store", t.traceAttributes(key)) defer span.End() result, err := t.backend.Store(ctx, key, value) @@ -35,8 +33,10 @@ func (t tracedBackend) Store(ctx context.Context, key string, value any) (any, e return result, err } -func (t tracedBackend) Fetch(ctx context.Context, key string) (any, error) { - ctx, span := tracer.Start(ctx, "Fetch", otel.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) +// Fetch implements the [Backend] interface, forwards the call to the wrapped +// backend and manages the trace span. +func (t *tracedBackend) Fetch(ctx context.Context, key string) (any, error) { + ctx, span := tracer.Start(ctx, "Fetch", t.traceAttributes(key)) defer span.End() result, err := t.backend.Fetch(ctx, key) @@ -47,3 +47,8 @@ func (t tracedBackend) Fetch(ctx context.Context, key string) (any, error) { return result, err } + +// traceAttributes is a helper to build the trace attributes. +func (t *tracedBackend) traceAttributes(key string) otel.SpanStartEventOption { + return otel.WithAttributes(attribute.String("namespace", t.namespace), attribute.String("key", key)) +} diff --git a/v2/go.mod b/v2/go.mod index 32888ee7..9c805bf0 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -4,7 +4,6 @@ go 1.20 require ( github.com/benbjohnson/clock v1.3.5 - github.com/gogo/protobuf v1.3.2 github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb github.com/ipfs/boxo v0.12.0 @@ -20,10 +19,11 @@ require ( github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.16.0 - go.opentelemetry.io/otel/trace v1.16.0 + go.opentelemetry.io/otel v1.17.0 + go.opentelemetry.io/otel/trace v1.17.0 go.uber.org/zap/exp v0.1.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + google.golang.org/protobuf v1.31.0 ) require ( @@ -42,6 +42,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect @@ -99,7 +100,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.17.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -111,7 +112,6 @@ require ( golang.org/x/sys v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect - google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/v2/go.sum b/v2/go.sum index 24ae5122..8a635d8c 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -377,12 +377,12 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= +go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= +go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= +go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= +go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= +go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= From 54f20b5c0f8e408d5ca07cbb6a9c12c98798cbbd Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 7 Sep 2023 18:10:27 +0200 Subject: [PATCH 40/64] Integrate Zikade/go-kademlia in v2 (#880) * WIP * revise protobuf * remove gogo protobuf dependency * WIP * add kadt package * Add routing test * add custom zikade dependency * Import zikade code * Remove generics from zikade imported code * Update to latest go-kademlia * Cleanup naming of events * Minor naming cleanup * Change maintainers for v2 while being developed * remove Zikade dependency * Consolidate type parameters * Change config test structure * use opentelemetry * use convenience attribute methods * let coord package use tele * fix golint warnings * use clock.Clock * add telemetry context tests * Improve telemetry documentation * fix test race * fix garbage collection race * Add AddAddresses method to DHT (#879) * Add AddAddresses method to DHT * Add AddAddresses method to DHT * go mod tidy * Rename Query Skip errors * go fmt coordinator.go * Fix test flakes * Fix lint errors --------- Co-authored-by: Ian Davis <18375+iand@users.noreply.github.com> --- CODEOWNERS | 3 + v2/.gitignore | 1 + v2/backend.go | 26 +- v2/backend_provider.go | 40 +-- v2/backend_provider_test.go | 18 +- v2/backend_record.go | 22 +- v2/backend_trace.go | 23 +- v2/config.go | 37 +- v2/config_test.go | 204 +++++------ v2/coord/behaviour.go | 152 +++++++++ v2/coord/behaviour_test.go | 28 ++ v2/coord/conversion.go | 62 ++++ v2/coord/coordinator.go | 453 +++++++++++++++++++++++++ v2/coord/coordinator_test.go | 360 ++++++++++++++++++++ v2/coord/coretypes.go | 93 +++++ v2/coord/event.go | 152 +++++++++ v2/coord/event_test.go | 25 ++ v2/coord/internal/nettest/layouts.go | 65 ++++ v2/coord/internal/nettest/routing.go | 174 ++++++++++ v2/coord/internal/nettest/topology.go | 140 ++++++++ v2/coord/network.go | 264 ++++++++++++++ v2/coord/network_test.go | 34 ++ v2/coord/query.go | 182 ++++++++++ v2/coord/routing.go | 358 +++++++++++++++++++ v2/coord/routing_test.go | 315 +++++++++++++++++ v2/dht.go | 88 +++-- v2/dht_test.go | 62 ++++ v2/go.mod | 13 +- v2/go.sum | 66 +--- v2/handlers.go | 2 +- v2/handlers_test.go | 35 +- v2/internal/kadtest/context.go | 31 ++ v2/internal/kadtest/tracing.go | 33 ++ v2/kadt/kadt.go | 23 +- v2/metrics/metrics.go | 118 ------- v2/pb/msg.aux.go | 3 +- v2/query_test.go | 74 ++++ v2/router.go | 123 ++++++- v2/{routing_test.go => router_test.go} | 0 v2/routing.go | 46 ++- v2/stream.go | 99 +++--- v2/stream_test.go | 5 +- v2/tele/tele.go | 234 +++++++++++++ v2/tele/tele_test.go | 41 +++ 44 files changed, 3861 insertions(+), 466 deletions(-) create mode 100644 v2/.gitignore create mode 100644 v2/coord/behaviour.go create mode 100644 v2/coord/behaviour_test.go create mode 100644 v2/coord/conversion.go create mode 100644 v2/coord/coordinator.go create mode 100644 v2/coord/coordinator_test.go create mode 100644 v2/coord/coretypes.go create mode 100644 v2/coord/event.go create mode 100644 v2/coord/event_test.go create mode 100644 v2/coord/internal/nettest/layouts.go create mode 100644 v2/coord/internal/nettest/routing.go create mode 100644 v2/coord/internal/nettest/topology.go create mode 100644 v2/coord/network.go create mode 100644 v2/coord/network_test.go create mode 100644 v2/coord/query.go create mode 100644 v2/coord/routing.go create mode 100644 v2/coord/routing_test.go create mode 100644 v2/internal/kadtest/context.go create mode 100644 v2/internal/kadtest/tracing.go delete mode 100644 v2/metrics/metrics.go create mode 100644 v2/query_test.go rename v2/{routing_test.go => router_test.go} (100%) create mode 100644 v2/tele/tele.go create mode 100644 v2/tele/tele_test.go diff --git a/CODEOWNERS b/CODEOWNERS index 43a81df8..1ee0652f 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -13,3 +13,6 @@ # records are IPFS specific /records.go @libp2p/kubo-maintainers @guillaumemichel /records_test.go @libp2p/kubo-maintainers @guillaumemichel + + +/v2/ @dennis-tra @iand diff --git a/v2/.gitignore b/v2/.gitignore new file mode 100644 index 00000000..c1addd0d --- /dev/null +++ b/v2/.gitignore @@ -0,0 +1 @@ +github.com \ No newline at end of file diff --git a/v2/backend.go b/v2/backend.go index 48dab7a4..a8c7775a 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -2,11 +2,11 @@ package dht import ( "context" + "fmt" lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/autobatch" record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/peerstore" ) @@ -60,9 +60,11 @@ type Backend interface { // store and fetch IPNS records from the given datastore. The stored and // returned records must be of type [*recpb.Record]. The cfg parameter can be // nil, in which case the [DefaultRecordBackendConfig] will be used. -func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackendConfig) *RecordBackend { +func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackendConfig) (be *RecordBackend, err error) { if cfg == nil { - cfg = DefaultRecordBackendConfig() + if cfg, err = DefaultRecordBackendConfig(); err != nil { + return nil, fmt.Errorf("default ipns backend config: %w", err) + } } return &RecordBackend{ @@ -71,16 +73,18 @@ func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackend namespace: namespaceIPNS, datastore: ds, validator: ipns.Validator{KeyBook: kb}, - } + }, nil } // NewBackendPublicKey initializes a new backend for the "pk" namespace that can // store and fetch public key records from the given datastore. The stored and // returned records must be of type [*recpb.Record]. The cfg parameter can be // nil, in which case the [DefaultRecordBackendConfig] will be used. -func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBackend { +func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) (be *RecordBackend, err error) { if cfg == nil { - cfg = DefaultRecordBackendConfig() + if cfg, err = DefaultRecordBackendConfig(); err != nil { + return nil, fmt.Errorf("default public key backend config: %w", err) + } } return &RecordBackend{ @@ -89,7 +93,7 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBa namespace: namespacePublicKey, datastore: ds, validator: record.PublicKeyValidator{}, - } + }, nil } // NewBackendProvider initializes a new backend for the "providers" namespace @@ -98,9 +102,11 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBa // The values returned from [ProvidersBackend.Fetch] will be of type // [*providerSet] (unexported). The cfg parameter can be nil, in which case the // [DefaultProviderBackendConfig] will be used. -func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProvidersBackendConfig) (*ProvidersBackend, error) { +func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProvidersBackendConfig) (be *ProvidersBackend, err error) { if cfg == nil { - cfg = DefaultProviderBackendConfig() + if cfg, err = DefaultProviderBackendConfig(); err != nil { + return nil, fmt.Errorf("default provider backend config: %w", err) + } } cache, err := lru.New[string, providerSet](cfg.CacheSize) @@ -114,7 +120,7 @@ func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *Pro cache: cache, namespace: namespaceProviders, addrBook: pstore, - datastore: autobatch.NewAutoBatching(dstore, cfg.BatchSize), + datastore: dstore, } return p, nil diff --git a/v2/backend_provider.go b/v2/backend_provider.go index be318b59..aee03322 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "path" - "strconv" "strings" "sync" "time" @@ -14,16 +13,14 @@ import ( "github.com/benbjohnson/clock" lru "github.com/hashicorp/golang-lru/v2" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/autobatch" dsq "github.com/ipfs/go-datastore/query" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-base32" - "go.opencensus.io/stats" - "go.opencensus.io/tag" + "go.opentelemetry.io/otel/metric" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // ProvidersBackend implements the [Backend] interface and handles provider @@ -48,8 +45,9 @@ type ProvidersBackend struct { // fetch peer multiaddresses from (we don't save them in the datastore). addrBook peerstore.AddrBook - // datastore is where we save the peer IDs providing a certain multihash - datastore *autobatch.Datastore + // datastore is where we save the peer IDs providing a certain multihash. + // The datastore must be thread-safe. + datastore ds.Datastore // gcSkip is a sync map that marks records as to-be-skipped by the garbage // collection process. TODO: this is a sub-optimal pattern. @@ -83,9 +81,6 @@ type ProvidersBackendConfig struct { // requesting peers' side. AddressTTL time.Duration - // BatchSize specifies how many provider record writes should be batched - BatchSize int - // CacheSize specifies the LRU cache size CacheSize int @@ -95,6 +90,10 @@ type ProvidersBackendConfig struct { // Logger is the logger to use Logger *slog.Logger + // Tele holds a reference to the telemetry struct to capture metrics and + // traces. + Tele *tele.Telemetry + // AddressFilter is a filter function that any addresses that we attempt to // store or fetch from the peerstore's address book need to pass through. // If you're manually configuring this backend, make sure to align the @@ -106,17 +105,22 @@ type ProvidersBackendConfig struct { // configuration. Use this as a starting point and modify it. If a nil // configuration is passed to [NewBackendProvider], this default configuration // here is used. -func DefaultProviderBackendConfig() *ProvidersBackendConfig { +func DefaultProviderBackendConfig() (*ProvidersBackendConfig, error) { + telemetry, err := tele.NewWithGlobalProviders() + if err != nil { + return nil, fmt.Errorf("new telemetry: %w", err) + } + return &ProvidersBackendConfig{ clk: clock.New(), ProvideValidity: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md AddressTTL: 24 * time.Hour, // MAGIC - BatchSize: 256, // MAGIC CacheSize: 256, // MAGIC GCInterval: time.Hour, // MAGIC Logger: slog.Default(), + Tele: telemetry, AddressFilter: AddrFilterIdentity, // verify alignment with [Config.AddressFilter] - } + }, nil } // Store implements the [Backend] interface. In the case of a [ProvidersBackend] @@ -346,13 +350,11 @@ func (p *ProvidersBackend) collectGarbage(ctx context.Context) { // trackCacheQuery updates the prometheus metrics about cache hit/miss performance func (p *ProvidersBackend) trackCacheQuery(ctx context.Context, hit bool) { - _ = stats.RecordWithTags(ctx, - []tag.Mutator{ - tag.Upsert(metrics.KeyCacheHit, strconv.FormatBool(hit)), - tag.Upsert(metrics.KeyRecordType, "provider"), - }, - metrics.LRUCache.M(1), + set := tele.FromContext(ctx, + tele.AttrCacheHit(hit), + tele.AttrRecordType("provider"), ) + p.cfg.Tele.LRUCache.Add(ctx, 1, metric.WithAttributeSet(set)) } // delete is a convenience method to delete the record at the given datastore diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index fab0a6cd..b87cf488 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -42,7 +42,9 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac func TestProvidersBackend_GarbageCollection(t *testing.T) { mockClock := clock.NewMock() - cfg := DefaultProviderBackendConfig() + cfg, err := DefaultProviderBackendConfig() + require.NoError(t, err) + cfg.clk = mockClock cfg.Logger = devnull @@ -58,28 +60,28 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { // write to datastore dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID)) rec := expiryRecord{expiry: mockClock.Now()} - err := b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) // write to peerstore b.addrBook.AddAddrs(p.ID, p.Addrs, time.Hour) - // advance clock half the gc time and check if record is still there + // advance clock half the validity time and check if record is still there mockClock.Add(cfg.ProvideValidity / 2) // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey(namespaceProviders)) + err = b.datastore.Sync(ctx, ds.NewKey("")) require.NoError(t, err) // we expect the record to still be there after half the ProvideValidity _, err = b.datastore.Get(ctx, dsKey) require.NoError(t, err) - // advance clock another gc time and check if record was GC'd now + // advance clock another time and check if the record was GC'd now mockClock.Add(cfg.ProvideValidity + cfg.GCInterval) // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey(namespaceProviders)) + err = b.datastore.Sync(ctx, ds.NewKey("")) require.NoError(t, err) // we expect the record to be GC'd now @@ -90,7 +92,9 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { } func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) { - cfg := DefaultProviderBackendConfig() + cfg, err := DefaultProviderBackendConfig() + require.NoError(t, err) + cfg.Logger = devnull b := newBackendProvider(t, cfg) diff --git a/v2/backend_record.go b/v2/backend_record.go index 072afaf9..9655d2b7 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -6,10 +6,13 @@ import ( "fmt" "time" + "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) type RecordBackend struct { @@ -23,15 +26,24 @@ type RecordBackend struct { var _ Backend = (*RecordBackend)(nil) type RecordBackendConfig struct { + clk clock.Clock MaxRecordAge time.Duration Logger *slog.Logger + Tele *tele.Telemetry } -func DefaultRecordBackendConfig() *RecordBackendConfig { +func DefaultRecordBackendConfig() (*RecordBackendConfig, error) { + telemetry, err := tele.NewWithGlobalProviders() + if err != nil { + return nil, fmt.Errorf("new telemetry: %w", err) + } + return &RecordBackendConfig{ + clk: clock.New(), Logger: slog.Default(), + Tele: telemetry, MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md - } + }, nil } func (r *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { @@ -59,7 +71,7 @@ func (r *RecordBackend) Store(ctx context.Context, key string, value any) (any, } // avoid storing arbitrary data, so overwrite that field - rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + rec.TimeReceived = r.cfg.clk.Now().UTC().Format(time.RFC3339Nano) data, err := rec.Marshal() if err != nil { @@ -101,7 +113,7 @@ func (r *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { // validate that we don't serve stale records. receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) - if err != nil || time.Since(receivedAt) > r.cfg.MaxRecordAge { + if err != nil || r.cfg.clk.Since(receivedAt) > r.cfg.MaxRecordAge { errStr := "" if err != nil { errStr = err.Error() @@ -128,7 +140,7 @@ func (r *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { // If unmarshalling or validation fails, this function (alongside an error) also // returns true because the existing record should be replaced. func (r *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, value []byte) (bool, error) { - ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") + ctx, span := r.cfg.Tele.Tracer.Start(ctx, "RecordBackend.shouldReplaceExistingRecord") defer span.End() existingBytes, err := txn.Get(ctx, dsKey) diff --git a/v2/backend_trace.go b/v2/backend_trace.go index ea9211ab..72335c35 100644 --- a/v2/backend_trace.go +++ b/v2/backend_trace.go @@ -5,23 +5,32 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - otel "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace" ) // tracedBackend wraps a [Backend] in calls to open telemetry tracing // directives. In [New] all backends configured in [Config] or automatically // configured if none are given will be wrapped with this tracedBackend. type tracedBackend struct { - namespace string // the namespace the backend operates in. Used as a tracing attribute. - backend Backend // the [Backend] to be traced + namespace string // the namespace the backend operates in. Used as a tracing attribute. + backend Backend // the [Backend] to be traced + tracer trace.Tracer // the tracer to be used } var _ Backend = (*tracedBackend)(nil) +func traceWrapBackend(namespace string, backend Backend, tracer trace.Tracer) Backend { + return &tracedBackend{ + namespace: namespace, + backend: backend, + tracer: tracer, + } +} + // Store implements the [Backend] interface, forwards the call to the wrapped // backend and manages the trace span. func (t *tracedBackend) Store(ctx context.Context, key string, value any) (any, error) { - ctx, span := tracer.Start(ctx, "Store", t.traceAttributes(key)) + ctx, span := t.tracer.Start(ctx, "Store", t.traceAttributes(key)) defer span.End() result, err := t.backend.Store(ctx, key, value) @@ -36,7 +45,7 @@ func (t *tracedBackend) Store(ctx context.Context, key string, value any) (any, // Fetch implements the [Backend] interface, forwards the call to the wrapped // backend and manages the trace span. func (t *tracedBackend) Fetch(ctx context.Context, key string) (any, error) { - ctx, span := tracer.Start(ctx, "Fetch", t.traceAttributes(key)) + ctx, span := t.tracer.Start(ctx, "Fetch", t.traceAttributes(key)) defer span.End() result, err := t.backend.Fetch(ctx, key) @@ -49,6 +58,6 @@ func (t *tracedBackend) Fetch(ctx context.Context, key string) (any, error) { } // traceAttributes is a helper to build the trace attributes. -func (t *tracedBackend) traceAttributes(key string) otel.SpanStartEventOption { - return otel.WithAttributes(attribute.String("namespace", t.namespace), attribute.String("key", key)) +func (t *tracedBackend) traceAttributes(key string) trace.SpanStartEventOption { + return trace.WithAttributes(attribute.String("namespace", t.namespace), attribute.String("key", key)) } diff --git a/v2/config.go b/v2/config.go index e1e18347..98136cca 100644 --- a/v2/config.go +++ b/v2/config.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" leveldb "github.com/ipfs/go-ds-leveldb" logging "github.com/ipfs/go-log/v2" @@ -13,8 +14,11 @@ import ( "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing" "github.com/plprobelab/go-kademlia/routing/triert" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" ) @@ -35,9 +39,6 @@ const ( ProtocolFilecoin protocol.ID = "/fil/kad/testnetnet/kad/1.0.0" ) -// tracer is an open telemetry tracing instance -var tracer = otel.Tracer("go-libp2p-kad-dht") - type ( // ModeOpt describes in which mode this [DHT] process should operate in. // Possible options are client, server, and any variant that switches @@ -103,6 +104,9 @@ const ( // to build up your own configuration struct. The [DHT] constructor [New] uses the // below method [*Config.Validate] to test for violations of configuration invariants. type Config struct { + // Clock + Clock clock.Clock + // Mode defines if the DHT should operate as a server or client or switch // between both automatically (see ModeOpt). Mode ModeOpt @@ -123,7 +127,7 @@ type Config struct { // [triert.TrieRT] routing table will be used. This field will be nil // in the default configuration because a routing table requires information // about the local node. - RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + RoutingTable routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] // The Backends field holds a map of key namespaces to their corresponding // backend implementation. For example, if we received an IPNS record, the @@ -145,6 +149,8 @@ type Config struct { // construct them individually and register them with the above Backends // map. Note that if you configure the DHT to use [ProtocolIPFS] it is // required to register backends for the ipns, pk, and providers namespaces. + // + // This datastore must be thread-safe. Datastore Datastore // Logger can be used to configure a custom structured logger instance. @@ -160,6 +166,12 @@ type Config struct { // also fetch from the peer store and serve to other peers. It is mainly // used to filter out private addresses. AddressFilter AddressFilter + + // MeterProvider . + MeterProvider metric.MeterProvider + + // TracerProvider . + TracerProvider trace.TracerProvider } // DefaultConfig returns a configuration struct that can be used as-is to @@ -168,6 +180,7 @@ type Config struct { // fields come from separate top-level methods prefixed with Default. func DefaultConfig() *Config { return &Config{ + Clock: clock.New(), Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), BucketSize: 20, // MAGIC @@ -178,13 +191,15 @@ func DefaultConfig() *Config { Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), TimeoutStreamIdle: time.Minute, // MAGIC AddressFilter: AddrFilterPrivate, + MeterProvider: otel.GetMeterProvider(), + TracerProvider: otel.GetTracerProvider(), } } // DefaultRoutingTable returns a triert.TrieRT routing table. This routing table // cannot be initialized in [DefaultConfig] because it requires information // about the local peer. -func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]], error) { +func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]], error) { rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() rt, err := triert.New[key.Key256, kad.NodeID[key.Key256]](nodeID, rtCfg) if err != nil { @@ -202,6 +217,10 @@ func InMemoryDatastore() (Datastore, error) { // an error if any configuration issue was detected and nil if this is // a valid configuration. func (c *Config) Validate() error { + if c.Clock == nil { + return fmt.Errorf("clock must not be nil") + } + switch c.Mode { case ModeOptClient: case ModeOptServer: @@ -253,6 +272,14 @@ func (c *Config) Validate() error { return fmt.Errorf("address filter must not be nil - use AddrFilterIdentity to disable filtering") } + if c.MeterProvider == nil { + return fmt.Errorf("opentelemetry meter provider must not be nil") + } + + if c.TracerProvider == nil { + return fmt.Errorf("opentelemetry tracer provider must not be nil") + } + return nil } diff --git a/v2/config_test.go b/v2/config_test.go index 670dcc84..892f9f8a 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -3,117 +3,101 @@ package dht import ( "testing" "time" + + "github.com/stretchr/testify/assert" ) func TestConfig_Validate(t *testing.T) { - tests := []struct { - name string - mutate func(*Config) *Config - wantErr bool - }{ - { - name: "happy path", - wantErr: false, - mutate: func(c *Config) *Config { return c }, - }, - { - name: "invalid mode", - wantErr: true, - mutate: func(c *Config) *Config { - c.Mode = "invalid" - return c - }, - }, - { - name: "nil Kademlia configuration", - wantErr: true, - mutate: func(c *Config) *Config { - c.Kademlia = nil - return c - }, - }, - { - name: "invalid Kademlia configuration", - wantErr: true, - mutate: func(c *Config) *Config { - c.Kademlia.Clock = nil - return c - }, - }, - { - name: "empty protocol", - wantErr: true, - mutate: func(c *Config) *Config { - c.ProtocolID = "" - return c - }, - }, - { - name: "nil logger", - wantErr: true, - mutate: func(c *Config) *Config { - c.Logger = nil - return c - }, - }, - { - name: "0 stream idle timeout", - wantErr: true, - mutate: func(c *Config) *Config { - c.TimeoutStreamIdle = time.Duration(0) - return c - }, - }, - { - name: "negative stream idle timeout", - wantErr: true, - mutate: func(c *Config) *Config { - c.TimeoutStreamIdle = time.Duration(-1) - return c - }, - }, - { - // When we're using the IPFS protocol, we always require support - // for ipns, pk, and provider records. - // If the Backends map is empty and the IPFS protocol is configured, - // we automatically populate the DHT backends for these record - // types. - name: "incompatible backends with ipfs protocol", - wantErr: true, - mutate: func(c *Config) *Config { - c.ProtocolID = ProtocolIPFS - c.Backends["another"] = &RecordBackend{} - return c - }, - }, - { - name: "additional backends for ipfs protocol", - wantErr: true, - mutate: func(c *Config) *Config { - c.ProtocolID = ProtocolIPFS - c.Backends[namespaceProviders] = &RecordBackend{} - c.Backends[namespaceIPNS] = &RecordBackend{} - c.Backends[namespacePublicKey] = &RecordBackend{} - c.Backends["another"] = &RecordBackend{} - return c - }, - }, - { - name: "nil address filter", - wantErr: true, - mutate: func(c *Config) *Config { - c.AddressFilter = nil - return c - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := DefaultConfig() - c = tt.mutate(c) - if err := c.Validate(); (err != nil) != tt.wantErr { - t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } + t.Run("happy path", func(t *testing.T) { + cfg := DefaultConfig() + assert.NoError(t, cfg.Validate()) + }) + + t.Run("invalid mode", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Mode = "invalid" + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil Kademlia configuration", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Kademlia = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("invalid Kademlia configuration", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Kademlia.Clock = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("empty protocol", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = "" + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil logger", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Logger = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("0 stream idle timeout", func(t *testing.T) { + cfg := DefaultConfig() + cfg.TimeoutStreamIdle = time.Duration(0) + assert.Error(t, cfg.Validate()) + }) + + t.Run("negative stream idle timeout", func(t *testing.T) { + cfg := DefaultConfig() + cfg.TimeoutStreamIdle = time.Duration(-1) + assert.Error(t, cfg.Validate()) + }) + + t.Run("incompatible backends with ipfs protocol", func(t *testing.T) { + // When we're using the IPFS protocol, we always require support + // for ipns, pk, and provider records. + // If the Backends map is empty and the IPFS protocol is configured, + // we automatically populate the DHT backends for these record + // types. + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends["another"] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + + t.Run("additional backends for ipfs protocol", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends[namespaceProviders] = &RecordBackend{} + cfg.Backends[namespaceIPNS] = &RecordBackend{} + cfg.Backends[namespacePublicKey] = &RecordBackend{} + cfg.Backends["another"] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil address filter", func(t *testing.T) { + cfg := DefaultConfig() + cfg.AddressFilter = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil meter provider", func(t *testing.T) { + cfg := DefaultConfig() + cfg.MeterProvider = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil tracer provider", func(t *testing.T) { + cfg := DefaultConfig() + cfg.TracerProvider = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil clock", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Clock = nil + assert.Error(t, cfg.Validate()) + }) } diff --git a/v2/coord/behaviour.go b/v2/coord/behaviour.go new file mode 100644 index 00000000..aa69917f --- /dev/null +++ b/v2/coord/behaviour.go @@ -0,0 +1,152 @@ +package coord + +import ( + "context" + "sync" + "sync/atomic" +) + +// Notify is the interface that a components to implement to be notified of +// [BehaviourEvent]'s. +type Notify[E BehaviourEvent] interface { + Notify(ctx context.Context, ev E) +} + +type NotifyCloser[E BehaviourEvent] interface { + Notify[E] + Close() +} + +type NotifyFunc[E BehaviourEvent] func(ctx context.Context, ev E) + +func (f NotifyFunc[E]) Notify(ctx context.Context, ev E) { + f(ctx, ev) +} + +type Behaviour[I BehaviourEvent, O BehaviourEvent] interface { + // Ready returns a channel that signals when the behaviour is ready to perform work. + Ready() <-chan struct{} + + // Notify informs the behaviour of an event. The behaviour may perform the event + // immediately and queue the result, causing the behaviour to become ready. + // It is safe to call Notify from the Perform method. + Notify(ctx context.Context, ev I) + + // Perform gives the behaviour the opportunity to perform work or to return a queued + // result as an event. + Perform(ctx context.Context) (O, bool) +} + +type SM[E any, S any] interface { + Advance(context.Context, E) S +} + +type WorkQueueFunc[E BehaviourEvent] func(context.Context, E) bool + +// WorkQueue is buffered queue of work to be performed. +// The queue automatically drains the queue sequentially by calling a +// WorkQueueFunc for each work item, passing the original context +// and event. +type WorkQueue[E BehaviourEvent] struct { + pending chan pendingEvent[E] + fn WorkQueueFunc[E] + done atomic.Bool + once sync.Once +} + +func NewWorkQueue[E BehaviourEvent](fn WorkQueueFunc[E]) *WorkQueue[E] { + w := &WorkQueue[E]{ + pending: make(chan pendingEvent[E], 1), + fn: fn, + } + return w +} + +type pendingEvent[E any] struct { + Ctx context.Context + Event E +} + +// Enqueue queues work to be perfomed. It will block if the +// queue has reached its maximum capacity for pending work. While +// blocking it will return a context cancellation error if the work +// item's context is cancelled. +func (w *WorkQueue[E]) Enqueue(ctx context.Context, cmd E) error { + if w.done.Load() { + return nil + } + w.once.Do(func() { + go func() { + defer w.done.Store(true) + for cc := range w.pending { + if cc.Ctx.Err() != nil { + return + } + if done := w.fn(cc.Ctx, cc.Event); done { + w.done.Store(true) + return + } + } + }() + }) + + select { + case <-ctx.Done(): // this is the context for the work item + return ctx.Err() + case w.pending <- pendingEvent[E]{ + Ctx: ctx, + Event: cmd, + }: + return nil + + } +} + +// A Waiter is a Notifiee whose Notify method forwards the +// notified event to a channel which a client can wait on. +type Waiter[E BehaviourEvent] struct { + pending chan WaiterEvent[E] + done atomic.Bool +} + +var _ Notify[BehaviourEvent] = (*Waiter[BehaviourEvent])(nil) + +func NewWaiter[E BehaviourEvent]() *Waiter[E] { + w := &Waiter[E]{ + pending: make(chan WaiterEvent[E], 1), + } + return w +} + +type WaiterEvent[E BehaviourEvent] struct { + Ctx context.Context + Event E +} + +func (w *Waiter[E]) Notify(ctx context.Context, ev E) { + if w.done.Load() { + return + } + select { + case <-ctx.Done(): // this is the context for the work item + return + case w.pending <- WaiterEvent[E]{ + Ctx: ctx, + Event: ev, + }: + return + + } +} + +// Close signals that the waiter should not forward and further calls to Notify. +// It closes the waiter channel so a client selecting on it will receive the close +// operation. +func (w *Waiter[E]) Close() { + w.done.Store(true) + close(w.pending) +} + +func (w *Waiter[E]) Chan() <-chan WaiterEvent[E] { + return w.pending +} diff --git a/v2/coord/behaviour_test.go b/v2/coord/behaviour_test.go new file mode 100644 index 00000000..20464c30 --- /dev/null +++ b/v2/coord/behaviour_test.go @@ -0,0 +1,28 @@ +package coord + +import ( + "context" +) + +type NullSM[E any, S any] struct{} + +func (NullSM[E, S]) Advance(context.Context, E) S { + var v S + return v +} + +type RecordingSM[E any, S any] struct { + State S + Received E +} + +func NewRecordingSM[E any, S any](response S) *RecordingSM[E, S] { + return &RecordingSM[E, S]{ + State: response, + } +} + +func (r *RecordingSM[E, S]) Advance(ctx context.Context, e E) S { + r.Received = e + return r.State +} diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go new file mode 100644 index 00000000..3a6b0ba8 --- /dev/null +++ b/v2/coord/conversion.go @@ -0,0 +1,62 @@ +package coord + +import ( + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +// NodeInfoToAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. +// This function will panic if info.ID() does not return a kadt.PeerID +func NodeInfoToAddrInfo(info kad.NodeInfo[KadKey, ma.Multiaddr]) peer.AddrInfo { + peerID := info.ID().(kadt.PeerID) + return peer.AddrInfo{ + ID: peer.ID(peerID), + Addrs: info.Addresses(), + } +} + +// NodeIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. +// This function will panic if id's underlying type is not kadt.PeerID +func NodeIDToAddrInfo(id kad.NodeID[KadKey]) peer.AddrInfo { + peerID := id.(kadt.PeerID) + return peer.AddrInfo{ + ID: peer.ID(peerID), + } +} + +// AddrInfoToNodeID converts a peer.AddrInfo to a kad.NodeID. +func AddrInfoToNodeID(ai peer.AddrInfo) kad.NodeID[KadKey] { + return kadt.PeerID(ai.ID) +} + +// SliceOfNodeInfoToSliceOfAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. +// This function will panic if any info.ID() does not return a kadt.PeerID +func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[KadKey, ma.Multiaddr]) []peer.AddrInfo { + peers := make([]peer.AddrInfo, len(infos)) + for i := range infos { + peerID := infos[i].ID().(kadt.PeerID) + peers[i] = peer.AddrInfo{ + ID: peer.ID(peerID), + Addrs: infos[i].Addresses(), + } + } + return peers +} + +// SliceOfPeerIDToSliceOfNodeID converts a slice peer.ID to a slice of kad.NodeID +func SliceOfPeerIDToSliceOfNodeID(peers []peer.ID) []kad.NodeID[KadKey] { + nodes := make([]kad.NodeID[KadKey], len(peers)) + for i := range peers { + nodes[i] = kadt.PeerID(peers[i]) + } + return nodes +} + +// NodeIDToPeerID converts a kad.NodeID to a peer.ID. +// This function will panic if id's underlying type is not kadt.PeerID +func NodeIDToPeerID(id kad.NodeID[KadKey]) peer.ID { + return peer.ID(id.(kadt.PeerID)) +} diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go new file mode 100644 index 00000000..579701a4 --- /dev/null +++ b/v2/coord/coordinator.go @@ -0,0 +1,453 @@ +package coord + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/benbjohnson/clock" + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/query" + "github.com/plprobelab/go-kademlia/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap/exp/zapslog" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// A Coordinator coordinates the state machines that comprise a Kademlia DHT +type Coordinator struct { + // self is the peer id of the system the dht is running on + self peer.ID + + // cancel is used to cancel all running goroutines when the coordinator is cleaning up + cancel context.CancelFunc + + // cfg is a copy of the optional configuration supplied to the dht + cfg CoordinatorConfig + + // rt is the routing table used to look up nodes by distance + rt kad.RoutingTable[KadKey, kad.NodeID[KadKey]] + + // rtr is the message router used to send messages + rtr Router + + routingNotifications chan RoutingNotification + + // networkBehaviour is the behaviour responsible for communicating with the network + networkBehaviour *NetworkBehaviour + + // routingBehaviour is the behaviour responsible for maintaining the routing table + routingBehaviour Behaviour[BehaviourEvent, BehaviourEvent] + + // queryBehaviour is the behaviour responsible for running user-submitted queries + queryBehaviour Behaviour[BehaviourEvent, BehaviourEvent] +} + +type CoordinatorConfig struct { + PeerstoreTTL time.Duration // duration for which a peer is kept in the peerstore + + Clock clock.Clock // a clock that may replaced by a mock when testing + + QueryConcurrency int // the maximum number of queries that may be waiting for message responses at any one time + QueryTimeout time.Duration // the time to wait before terminating a query that is not making progress + + RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight + RequestTimeout time.Duration // the timeout queries should use for contacting a single node + + Logger *slog.Logger // a structured logger that should be used when logging. + Tele *tele.Telemetry // a struct holding a reference to various metric counters/histograms and a tracer +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *CoordinatorConfig) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.QueryConcurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("query concurrency must be greater than zero"), + } + } + if cfg.QueryTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("query timeout must be greater than zero"), + } + } + + if cfg.RequestConcurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("request concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + + if cfg.Logger == nil { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("logger must not be nil"), + } + } + + if cfg.Tele == nil { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("telemetry must not be nil"), + } + } + + return nil +} + +func DefaultCoordinatorConfig() (*CoordinatorConfig, error) { + telemetry, err := tele.NewWithGlobalProviders() + if err != nil { + return nil, fmt.Errorf("new telemetry: %w", err) + } + + return &CoordinatorConfig{ + Clock: clock.New(), + PeerstoreTTL: 10 * time.Minute, + QueryConcurrency: 3, + QueryTimeout: 5 * time.Minute, + RequestConcurrency: 3, + RequestTimeout: time.Minute, + Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), + Tele: telemetry, + }, nil +} + +func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, kad.NodeID[KadKey]], cfg *CoordinatorConfig) (*Coordinator, error) { + if cfg == nil { + c, err := DefaultCoordinatorConfig() + if err != nil { + return nil, fmt.Errorf("default config: %w", err) + } + cfg = c + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + qpCfg := query.DefaultPoolConfig() + qpCfg.Clock = cfg.Clock + qpCfg.Concurrency = cfg.QueryConcurrency + qpCfg.Timeout = cfg.QueryTimeout + qpCfg.QueryConcurrency = cfg.RequestConcurrency + qpCfg.RequestTimeout = cfg.RequestTimeout + + qp, err := query.NewPool[KadKey, ma.Multiaddr](kadt.PeerID(self), qpCfg) + if err != nil { + return nil, fmt.Errorf("query pool: %w", err) + } + queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger, cfg.Tele.Tracer) + + bootstrapCfg := routing.DefaultBootstrapConfig[KadKey, ma.Multiaddr]() + bootstrapCfg.Clock = cfg.Clock + bootstrapCfg.Timeout = cfg.QueryTimeout + bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency + bootstrapCfg.RequestTimeout = cfg.RequestTimeout + + bootstrap, err := routing.NewBootstrap[KadKey, ma.Multiaddr](kadt.PeerID(self), bootstrapCfg) + if err != nil { + return nil, fmt.Errorf("bootstrap: %w", err) + } + + includeCfg := routing.DefaultIncludeConfig() + includeCfg.Clock = cfg.Clock + includeCfg.Timeout = cfg.QueryTimeout + + // TODO: expose config + // includeCfg.QueueCapacity = cfg.IncludeQueueCapacity + // includeCfg.Concurrency = cfg.IncludeConcurrency + // includeCfg.Timeout = cfg.IncludeTimeout + + include, err := routing.NewInclude[KadKey, ma.Multiaddr](rt, includeCfg) + if err != nil { + return nil, fmt.Errorf("include: %w", err) + } + + probeCfg := routing.DefaultProbeConfig() + probeCfg.Clock = cfg.Clock + probeCfg.Timeout = cfg.QueryTimeout + + // TODO: expose config + // probeCfg.Concurrency = cfg.ProbeConcurrency + probe, err := routing.NewProbe[KadKey, ma.Multiaddr](rt, probeCfg) + if err != nil { + return nil, fmt.Errorf("probe: %w", err) + } + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger, cfg.Tele.Tracer) + + networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, cfg.Tele.Tracer) + + ctx, cancel := context.WithCancel(context.Background()) + + d := &Coordinator{ + self: self, + cfg: *cfg, + rtr: rtr, + rt: rt, + cancel: cancel, + + networkBehaviour: networkBehaviour, + routingBehaviour: routingBehaviour, + queryBehaviour: queryBehaviour, + + routingNotifications: make(chan RoutingNotification, 20), // buffered mainly to allow tests to read the channel after running an operation + } + go d.eventLoop(ctx) + + return d, nil +} + +// Close cleans up all resources associated with this Coordinator. +func (c *Coordinator) Close() error { + c.cancel() + return nil +} + +func (c *Coordinator) ID() peer.ID { + return c.self +} + +func (c *Coordinator) Addresses() []ma.Multiaddr { + // TODO: return configured listen addresses + info, err := c.rtr.GetNodeInfo(context.TODO(), c.self) + if err != nil { + return nil + } + return info.Addrs +} + +// RoutingNotifications returns a channel that may be read to be notified of routing updates +func (c *Coordinator) RoutingNotifications() <-chan RoutingNotification { + return c.routingNotifications +} + +func (c *Coordinator) eventLoop(ctx context.Context) { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.eventLoop") + defer span.End() + for { + var ev BehaviourEvent + var ok bool + select { + case <-ctx.Done(): + // coordinator is closing + return + case <-c.networkBehaviour.Ready(): + ev, ok = c.networkBehaviour.Perform(ctx) + case <-c.routingBehaviour.Ready(): + ev, ok = c.routingBehaviour.Perform(ctx) + case <-c.queryBehaviour.Ready(): + ev, ok = c.queryBehaviour.Perform(ctx) + } + + if ok { + c.dispatchEvent(ctx, ev) + } + } +} + +func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.dispatchEvent", trace.WithAttributes(attribute.String("event_type", fmt.Sprintf("%T", ev)))) + defer span.End() + + switch ev := ev.(type) { + case NetworkCommand: + c.networkBehaviour.Notify(ctx, ev) + case QueryCommand: + c.queryBehaviour.Notify(ctx, ev) + case RoutingCommand: + c.routingBehaviour.Notify(ctx, ev) + case RoutingNotification: + select { + case <-ctx.Done(): + case c.routingNotifications <- ev: + default: + } + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } +} + +// GetNode retrieves the node associated with the given node id from the DHT's local routing table. +// If the node isn't found in the table, it returns ErrNodeNotFound. +func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { + if _, exists := c.rt.GetNode(kadt.PeerID(id).Key()); !exists { + return nil, ErrNodeNotFound + } + + nh, err := c.networkBehaviour.getNodeHandler(ctx, id) + if err != nil { + return nil, err + } + return nh, nil +} + +// GetClosestNodes requests the n closest nodes to the key from the node's local routing table. +func (c *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { + closest := c.rt.NearestNodes(k, n) + nodes := make([]Node, 0, len(closest)) + for _, id := range closest { + nh, err := c.networkBehaviour.getNodeHandler(ctx, NodeIDToPeerID(id)) + if err != nil { + return nil, err + } + nodes = append(nodes, nh) + } + return nodes, nil +} + +// GetValue requests that the node return any value associated with the supplied key. +// If the node does not have a value for the key it returns ErrValueNotFound. +func (c *Coordinator) GetValue(ctx context.Context, k KadKey) (Value, error) { + panic("not implemented") +} + +// PutValue requests that the node stores a value to be associated with the supplied key. +// If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. +func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { + panic("not implemented") +} + +// Query traverses the DHT calling fn for each node visited. +func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (QueryStats, error) { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.Query") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + seeds, err := c.GetClosestNodes(ctx, target, 20) + if err != nil { + return QueryStats{}, err + } + + seedIDs := make([]peer.ID, 0, len(seeds)) + for _, s := range seeds { + seedIDs = append(seedIDs, s.ID()) + } + + waiter := NewWaiter[BehaviourEvent]() + queryID := query.QueryID("foo") // TODO: choose query ID + + cmd := &EventStartQuery{ + QueryID: queryID, + Target: target, + ProtocolID: address.ProtocolID("TODO"), + Message: &fakeMessage{key: target}, + KnownClosestNodes: seedIDs, + Notify: waiter, + } + + // queue the start of the query + c.queryBehaviour.Notify(ctx, cmd) + + var lastStats QueryStats + for { + select { + case <-ctx.Done(): + return lastStats, ctx.Err() + case wev := <-waiter.Chan(): + ctx, ev := wev.Ctx, wev.Event + switch ev := ev.(type) { + case *EventQueryProgressed: + lastStats = QueryStats{ + Start: ev.Stats.Start, + Requests: ev.Stats.Requests, + Success: ev.Stats.Success, + Failure: ev.Stats.Failure, + } + nh, err := c.networkBehaviour.getNodeHandler(ctx, ev.NodeID) + if err != nil { + // ignore unknown node + break + } + + err = fn(ctx, nh, lastStats) + if errors.Is(err, ErrSkipRemaining) { + // done + c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) + return lastStats, nil + } + if errors.Is(err, ErrSkipNode) { + // TODO: don't add closer nodes from this node + break + } + if err != nil { + // user defined error that terminates the query + c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) + return lastStats, err + } + + case *EventQueryFinished: + // query is done + lastStats.Exhausted = true + return lastStats, nil + + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } + } + } +} + +// AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. +// If the routing table is updated as a result of this operation an EventRoutingUpdated notification +// is emitted on the routing notification channel. +func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo, ttl time.Duration) error { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.AddNodes") + defer span.End() + for _, ai := range ais { + if ai.ID == c.self { + // skip self + continue + } + + // TODO: apply address filter + + c.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ + NodeInfo: ai, + TTL: ttl, + }) + + } + + return nil +} + +// Bootstrap instructs the dht to begin bootstrapping the routing table. +func (c *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.Bootstrap") + defer span.End() + c.routingBehaviour.Notify(ctx, &EventStartBootstrap{ + // Bootstrap state machine uses the message + Message: &fakeMessage{key: kadt.PeerID(c.self).Key()}, + SeedNodes: seeds, + }) + + return nil +} diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go new file mode 100644 index 00000000..235828cc --- /dev/null +++ b/v2/coord/coordinator_test.go @@ -0,0 +1,360 @@ +package coord + +import ( + "context" + "fmt" + "log" + "reflect" + "sync" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +const peerstoreTTL = 10 * time.Minute + +type notificationWatcher struct { + mu sync.Mutex + buffered []RoutingNotification + signal chan struct{} +} + +func (w *notificationWatcher) Watch(t *testing.T, ctx context.Context, ch <-chan RoutingNotification) { + t.Helper() + w.signal = make(chan struct{}, 1) + go func() { + for { + select { + case <-ctx.Done(): + return + case ev := <-ch: + w.mu.Lock() + t.Logf("buffered routing notification: %T\n", ev) + w.buffered = append(w.buffered, ev) + select { + case w.signal <- struct{}{}: + default: + } + w.mu.Unlock() + + } + } + }() +} + +func (w *notificationWatcher) Expect(ctx context.Context, expected RoutingNotification) (RoutingNotification, error) { + for { + // look in buffered events + w.mu.Lock() + for i, ev := range w.buffered { + if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + // remove first from buffer and return it + w.buffered = w.buffered[:i+copy(w.buffered[i:], w.buffered[i+1:])] + w.mu.Unlock() + return ev, nil + } + } + w.mu.Unlock() + + // wait to be signaled that there is a new event + select { + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) + case <-w.signal: + } + } +} + +// TracingTelemetry may be used to create a Telemetry that traces a test +func TracingTelemetry(t *testing.T) *tele.Telemetry { + telemetry, err := tele.New(otel.GetMeterProvider(), kadtest.JaegerTracerProvider(t)) + if err != nil { + t.Fatalf("unexpected error creating telemetry: %v", err) + } + + return telemetry +} + +func TestConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("query concurrency positive", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.QueryConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.QueryConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("query timeout positive", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.QueryTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.QueryTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.RequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.QueryConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("logger not nil", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.Logger = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("telemetry not nil", func(t *testing.T) { + cfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + cfg.Tele = nil + require.Error(t, cfg.Validate()) + }) +} + +func TestExhaustiveQuery(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + ccfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + // A (ids[0]) is looking for D (ids[3]) + // A will first ask B, B will reply with C's address (and A's address) + // A will then ask C, C will reply with D's address (and B's address) + self := nodes[0].NodeInfo.ID + c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + require.NoError(t, err) + + target := kadt.PeerID(nodes[3].NodeInfo.ID).Key() + + visited := make(map[string]int) + + // Record the nodes as they are visited + qfn := func(ctx context.Context, node Node, stats QueryStats) error { + visited[node.ID().String()]++ + return nil + } + + // Run a query to find the value + _, err = c.Query(ctx, target, qfn) + require.NoError(t, err) + + require.Equal(t, 3, len(visited)) + require.Contains(t, visited, nodes[1].NodeInfo.ID.String()) + require.Contains(t, visited, nodes[2].NodeInfo.ID.String()) + require.Contains(t, visited, nodes[3].NodeInfo.ID.String()) +} + +func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + ccfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + // A (ids[0]) is looking for D (ids[3]) + // A will first ask B, B will reply with C's address (and A's address) + // A will then ask C, C will reply with D's address (and B's address) + self := nodes[0].NodeInfo.ID + c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + if err != nil { + log.Fatalf("unexpected error creating coordinator: %v", err) + } + + w := new(notificationWatcher) + w.Watch(t, ctx, c.RoutingNotifications()) + + qfn := func(ctx context.Context, node Node, stats QueryStats) error { + return nil + } + + // Run a query to find the value + target := kadt.PeerID(nodes[3].NodeInfo.ID).Key() + _, err = c.Query(ctx, target, qfn) + require.NoError(t, err) + + // the query run by the dht should have received a response from nodes[1] with closer nodes + // nodes[0] and nodes[2] which should trigger a routing table update since nodes[2] was + // not in the dht's routing table. + // the query then continues and should have received a response from nodes[2] with closer nodes + // nodes[1] and nodes[3] which should trigger a routing table update since nodes[3] was + // not in the dht's routing table. + + // no EventRoutingUpdated is sent for the self node + + // However the order in which these events are emitted may vary depending on timing. + + ev1, err := w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + tev1 := ev1.(*EventRoutingUpdated) + + ev2, err := w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + tev2 := ev2.(*EventRoutingUpdated) + + if tev1.NodeInfo.ID == nodes[2].NodeInfo.ID { + require.Equal(t, nodes[3].NodeInfo.ID, tev2.NodeInfo.ID) + } else if tev2.NodeInfo.ID == nodes[2].NodeInfo.ID { + require.Equal(t, nodes[3].NodeInfo.ID, tev1.NodeInfo.ID) + } else { + require.Failf(t, "did not see routing updated event for %s", nodes[2].NodeInfo.ID.String()) + } +} + +func TestBootstrap(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + ccfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + self := nodes[0].NodeInfo.ID + d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + require.NoError(t, err) + + w := new(notificationWatcher) + w.Watch(t, ctx, d.RoutingNotifications()) + + seeds := []peer.ID{nodes[1].NodeInfo.ID} + err = d.Bootstrap(ctx, seeds) + require.NoError(t, err) + + // the query run by the dht should have completed + ev, err := w.Expect(ctx, &EventBootstrapFinished{}) + require.NoError(t, err) + + require.IsType(t, &EventBootstrapFinished{}, ev) + tevf := ev.(*EventBootstrapFinished) + require.Equal(t, 3, tevf.Stats.Requests) + require.Equal(t, 3, tevf.Stats.Success) + require.Equal(t, 0, tevf.Stats.Failure) + + _, err = w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + + _, err = w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + + // coordinator will have node1 in its routing table + _, err = d.GetNode(ctx, nodes[1].NodeInfo.ID) + require.NoError(t, err) + + // coordinator should now have node2 in its routing table + _, err = d.GetNode(ctx, nodes[2].NodeInfo.ID) + require.NoError(t, err) + + // coordinator should now have node3 in its routing table + _, err = d.GetNode(ctx, nodes[3].NodeInfo.ID) + require.NoError(t, err) +} + +func TestIncludeNode(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + ccfg, err := DefaultCoordinatorConfig() + require.NoError(t, err) + + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + candidate := nodes[len(nodes)-1].NodeInfo // not in nodes[0] routing table + + self := nodes[0].NodeInfo.ID + d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + if err != nil { + log.Fatalf("unexpected error creating dht: %v", err) + } + + // the routing table should not contain the node yet + _, err = d.GetNode(ctx, candidate.ID) + require.ErrorIs(t, err, ErrNodeNotFound) + + w := new(notificationWatcher) + w.Watch(t, ctx, d.RoutingNotifications()) + + // inject a new node into the dht's includeEvents queue + err = d.AddNodes(ctx, []peer.AddrInfo{candidate}, time.Minute) + require.NoError(t, err) + + // the include state machine runs in the background and eventually should add the node to routing table + ev, err := w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + + tev := ev.(*EventRoutingUpdated) + require.Equal(t, candidate.ID, tev.NodeInfo.ID) + + // the routing table should now contain the node + _, err = d.GetNode(ctx, candidate.ID) + require.NoError(t, err) +} diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go new file mode 100644 index 00000000..fe72d90f --- /dev/null +++ b/v2/coord/coretypes.go @@ -0,0 +1,93 @@ +package coord + +import ( + "context" + "errors" + "time" + + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" +) + +// KadKey is a type alias for the type of key that's used with this DHT +// implementation. +type KadKey = key.Key256 + +// Value is a value that may be stored in the DHT. +type Value interface { + Key() KadKey + MarshalBinary() ([]byte, error) +} + +// Node represents the local or a remote node participating in the DHT. +type Node interface { + // ID returns the peer ID identifying this node. + ID() peer.ID + + // Addresses returns the network addresses associated with the given node. + Addresses() []ma.Multiaddr + + // GetClosestNodes requests the n closest nodes to the key from the node's + // local routing table. The node may return fewer nodes than requested. + GetClosestNodes(ctx context.Context, key KadKey, n int) ([]Node, error) + + // GetValue requests that the node return any value associated with the + // supplied key. If the node does not have a value for the key it returns + // ErrValueNotFound. + GetValue(ctx context.Context, key KadKey) (Value, error) + + // PutValue requests that the node stores a value to be associated with the supplied key. + // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. + PutValue(ctx context.Context, r Value, q int) error +} + +var ( + ErrNodeNotFound = errors.New("node not found") + ErrValueNotFound = errors.New("value not found") + ErrValueNotAccepted = errors.New("value not accepted") +) + +// QueryFunc is the type of the function called by Query to visit each node. +// +// The error result returned by the function controls how Query proceeds. If the function returns the special value +// SkipNode, Query skips fetching closer nodes from the current node. If the function returns the special value +// SkipRemaining, Query skips all visiting all remaining nodes. Otherwise, if the function returns a non-nil error, +// Query stops entirely and returns that error. +// +// The stats argument contains statistics on the progress of the query so far. +type QueryFunc func(ctx context.Context, node Node, stats QueryStats) error + +type QueryStats struct { + Start time.Time // Start is the time the query began executing. + End time.Time // End is the time the query stopped executing. + Requests int // Requests is a count of the number of requests made by the query. + Success int // Success is a count of the number of nodes the query succesfully contacted. + Failure int // Failure is a count of the number of nodes the query received an error response from. + Exhausted bool // Exhausted is true if the query ended after visiting every node it could. +} + +var ( + // ErrSkipNode is used as a return value from a QueryFunc to indicate that the node is to be skipped. + ErrSkipNode = errors.New("skip node") + + // ErrSkipRemaining is used as a return value a QueryFunc to indicate that all remaining nodes are to be skipped. + ErrSkipRemaining = errors.New("skip remaining nodes") +) + +// Router its a work in progress +// TODO figure out the role of protocol identifiers +type Router interface { + // SendMessage attempts to send a request to another node. The Router will absorb the addresses in to into its + // internal nodestore. This method blocks until a response is received or an error is encountered. + SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) + + AddNodeInfo(ctx context.Context, info peer.AddrInfo, ttl time.Duration) error + GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) + + // GetClosestNodes attempts to send a request to another node asking it for nodes that it considers to be + // closest to the target key. + GetClosestNodes(ctx context.Context, to peer.AddrInfo, target KadKey) ([]peer.AddrInfo, error) +} diff --git a/v2/coord/event.go b/v2/coord/event.go new file mode 100644 index 00000000..4d5790f7 --- /dev/null +++ b/v2/coord/event.go @@ -0,0 +1,152 @@ +package coord + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/query" +) + +type BehaviourEvent interface { + behaviourEvent() +} + +// RoutingCommand is a type of [BehaviourEvent] that instructs a [RoutingBehaviour] to perform an action. +type RoutingCommand interface { + BehaviourEvent + routingCommand() +} + +// NetworkCommand is a type of [BehaviourEvent] that instructs a [NetworkBehaviour] to perform an action. +type NetworkCommand interface { + BehaviourEvent + networkCommand() +} + +// QueryCommand is a type of [BehaviourEvent] that instructs a [QueryBehaviour] to perform an action. +type QueryCommand interface { + BehaviourEvent + queryCommand() +} + +type NodeHandlerRequest interface { + BehaviourEvent + nodeHandlerRequest() +} + +type NodeHandlerResponse interface { + BehaviourEvent + nodeHandlerResponse() +} + +type RoutingNotification interface { + BehaviourEvent + routingNotification() +} + +type EventStartBootstrap struct { + ProtocolID address.ProtocolID + Message kad.Request[KadKey, ma.Multiaddr] + SeedNodes []peer.ID // TODO: peer.AddrInfo +} + +func (*EventStartBootstrap) behaviourEvent() {} +func (*EventStartBootstrap) routingCommand() {} + +type EventOutboundGetCloserNodes struct { + QueryID query.QueryID + To peer.AddrInfo + Target KadKey + Notify Notify[BehaviourEvent] +} + +func (*EventOutboundGetCloserNodes) behaviourEvent() {} +func (*EventOutboundGetCloserNodes) nodeHandlerRequest() {} +func (*EventOutboundGetCloserNodes) networkCommand() {} + +type EventStartQuery struct { + QueryID query.QueryID + Target KadKey + ProtocolID address.ProtocolID + Message kad.Request[KadKey, ma.Multiaddr] + KnownClosestNodes []peer.ID + Notify NotifyCloser[BehaviourEvent] +} + +func (*EventStartQuery) behaviourEvent() {} +func (*EventStartQuery) queryCommand() {} + +type EventStopQuery struct { + QueryID query.QueryID +} + +func (*EventStopQuery) behaviourEvent() {} +func (*EventStopQuery) queryCommand() {} + +type EventAddAddrInfo struct { + NodeInfo peer.AddrInfo + TTL time.Duration +} + +func (*EventAddAddrInfo) behaviourEvent() {} +func (*EventAddAddrInfo) routingCommand() {} + +type EventGetCloserNodesSuccess struct { + QueryID query.QueryID + To peer.AddrInfo + Target KadKey + CloserNodes []peer.AddrInfo +} + +func (*EventGetCloserNodesSuccess) behaviourEvent() {} +func (*EventGetCloserNodesSuccess) nodeHandlerResponse() {} + +type EventGetCloserNodesFailure struct { + QueryID query.QueryID + To peer.AddrInfo + Target KadKey + Err error +} + +func (*EventGetCloserNodesFailure) behaviourEvent() {} +func (*EventGetCloserNodesFailure) nodeHandlerResponse() {} + +// EventQueryProgressed is emitted by the coordinator when a query has received a +// response from a node. +type EventQueryProgressed struct { + QueryID query.QueryID + NodeID peer.ID + Response kad.Response[KadKey, ma.Multiaddr] + Stats query.QueryStats +} + +func (*EventQueryProgressed) behaviourEvent() {} + +// EventQueryFinished is emitted by the coordinator when a query has finished, either through +// running to completion or by being canceled. +type EventQueryFinished struct { + QueryID query.QueryID + Stats query.QueryStats +} + +func (*EventQueryFinished) behaviourEvent() {} + +// EventRoutingUpdated is emitted by the coordinator when a new node has been verified and added to the routing table. +type EventRoutingUpdated struct { + NodeInfo peer.AddrInfo +} + +func (*EventRoutingUpdated) behaviourEvent() {} +func (*EventRoutingUpdated) routingNotification() {} + +// EventBootstrapFinished is emitted by the coordinator when a bootstrap has finished, either through +// running to completion or by being canceled. +type EventBootstrapFinished struct { + Stats query.QueryStats +} + +func (*EventBootstrapFinished) behaviourEvent() {} +func (*EventBootstrapFinished) routingNotification() {} diff --git a/v2/coord/event_test.go b/v2/coord/event_test.go new file mode 100644 index 00000000..b6afdd4a --- /dev/null +++ b/v2/coord/event_test.go @@ -0,0 +1,25 @@ +package coord + +var _ NetworkCommand = (*EventOutboundGetCloserNodes)(nil) + +var ( + _ RoutingCommand = (*EventAddAddrInfo)(nil) + _ RoutingCommand = (*EventStartBootstrap)(nil) +) + +var ( + _ QueryCommand = (*EventStartQuery)(nil) + _ QueryCommand = (*EventStopQuery)(nil) +) + +var ( + _ RoutingNotification = (*EventRoutingUpdated)(nil) + _ RoutingNotification = (*EventBootstrapFinished)(nil) +) + +var _ NodeHandlerRequest = (*EventOutboundGetCloserNodes)(nil) + +var ( + _ NodeHandlerResponse = (*EventGetCloserNodesSuccess)(nil) + _ NodeHandlerResponse = (*EventGetCloserNodesFailure)(nil) +) diff --git a/v2/coord/internal/nettest/layouts.go b/v2/coord/internal/nettest/layouts.go new file mode 100644 index 00000000..f5236dc1 --- /dev/null +++ b/v2/coord/internal/nettest/layouts.go @@ -0,0 +1,65 @@ +package nettest + +import ( + "context" + "fmt" + + "github.com/benbjohnson/clock" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing/simplert" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +// LinearTopology creates a network topology consisting of n nodes peered in a linear chain. +// The nodes are configured with routing tables that contain immediate neighbours. +// It returns the topology and the nodes ordered such that nodes[x] has nodes[x-1] and nodes[x+1] in its routing table +// The topology is not a ring: nodes[0] only has nodes[1] in its table and nodes[n-1] only has nodes[n-2] in its table. +// nodes[1] has nodes[0] and nodes[2] in its routing table. +// If n > 2 then the first and last nodes will not have one another in their routing tables. +func LinearTopology(n int, clk clock.Clock) (*Topology, []*Node, error) { + nodes := make([]*Node, n) + + top := NewTopology(clk) + for i := range nodes { + + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + if err != nil { + return nil, nil, err + } + + ai, err := NewAddrInfo([]ma.Multiaddr{a}) + if err != nil { + return nil, nil, err + } + + nodes[i] = &Node{ + NodeInfo: ai, + Router: NewRouter(ai.ID, top), + RoutingTable: simplert.New[key.Key256, kad.NodeID[key.Key256]](kadt.PeerID(ai.ID), 20), + } + } + + // Define the network topology, with default network links between every node + for i := 0; i < len(nodes); i++ { + for j := i + 1; j < len(nodes); j++ { + top.ConnectNodes(nodes[i], nodes[j]) + } + } + + // Connect nodes in a chain + for i := 0; i < len(nodes); i++ { + if i > 0 { + nodes[i].Router.AddNodeInfo(context.Background(), nodes[i-1].NodeInfo, 0) + nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i-1].NodeInfo.ID)) + } + if i < len(nodes)-1 { + nodes[i].Router.AddNodeInfo(context.Background(), nodes[i+1].NodeInfo, 0) + nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i+1].NodeInfo.ID)) + } + } + + return top, nodes, nil +} diff --git a/v2/coord/internal/nettest/routing.go b/v2/coord/internal/nettest/routing.go new file mode 100644 index 00000000..e0217052 --- /dev/null +++ b/v2/coord/internal/nettest/routing.go @@ -0,0 +1,174 @@ +package nettest + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/network/endpoint" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) + +var rng = rand.New(rand.NewSource(6283185)) + +func NewAddrInfo(addrs []ma.Multiaddr) (peer.AddrInfo, error) { + _, pub, err := crypto.GenerateEd25519Key(rng) + if err != nil { + return peer.AddrInfo{}, err + } + pid, err := peer.IDFromPublicKey(pub) + if err != nil { + return peer.AddrInfo{}, err + } + + return peer.AddrInfo{ + ID: pid, + Addrs: addrs, + }, nil +} + +// Link represents the route between two nodes. It allows latency and transport failures to be simulated. +type Link interface { + ConnLatency() time.Duration // the simulated time taken to return an error or successful outcome + DialLatency() time.Duration // the simulated time taken to connect to a node + DialErr() error // an error that should be returned on dial, nil if the dial is successful +} + +// DefaultLink is the default link used if none is specified. +// It has zero latency and always succeeds. +type DefaultLink struct{} + +func (l *DefaultLink) DialErr() error { return nil } +func (l *DefaultLink) ConnLatency() time.Duration { return 0 } +func (l *DefaultLink) DialLatency() time.Duration { return 0 } + +type Router struct { + self peer.ID + top *Topology + mu sync.Mutex // guards nodes + nodes map[peer.ID]*nodeStatus +} + +type nodeStatus struct { + NodeInfo peer.AddrInfo + Connectedness endpoint.Connectedness +} + +func NewRouter(self peer.ID, top *Topology) *Router { + return &Router{ + self: self, + top: top, + nodes: make(map[peer.ID]*nodeStatus), + } +} + +func (r *Router) NodeID() kad.NodeID[key.Key256] { + return kadt.PeerID(r.self) +} + +func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { + if err := r.AddNodeInfo(ctx, to, 0); err != nil { + return nil, fmt.Errorf("add node info: %w", err) + } + + if err := r.Dial(ctx, to); err != nil { + return nil, fmt.Errorf("dial: %w", err) + } + + return r.top.RouteMessage(ctx, r.self, to.ID, protoID, req) +} + +func (r *Router) HandleMessage(ctx context.Context, n peer.ID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { + closer := make([]*pb.Message_Peer, 0) + + r.mu.Lock() + for _, n := range r.nodes { + // only include self if it was the target of the request + if n.NodeInfo.ID == r.self && !key.Equal(kadt.PeerID(n.NodeInfo.ID).Key(), req.Target()) { + continue + } + closer = append(closer, pb.FromAddrInfo(n.NodeInfo)) + } + r.mu.Unlock() + + // initialize the response message + resp := &pb.Message{ + Type: req.GetType(), + Key: req.GetKey(), + } + resp.CloserPeers = closer + return resp, nil +} + +func (r *Router) Dial(ctx context.Context, to peer.AddrInfo) error { + r.mu.Lock() + status, ok := r.nodes[to.ID] + r.mu.Unlock() + + if ok { + switch status.Connectedness { + case endpoint.Connected: + return nil + case endpoint.CanConnect: + if _, err := r.top.Dial(ctx, r.self, to.ID); err != nil { + return err + } + + status.Connectedness = endpoint.Connected + r.mu.Lock() + r.nodes[to.ID] = status + r.mu.Unlock() + return nil + } + } + return endpoint.ErrUnknownPeer +} + +func (r *Router) AddNodeInfo(ctx context.Context, info peer.AddrInfo, ttl time.Duration) error { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.nodes[info.ID]; !ok { + r.nodes[info.ID] = &nodeStatus{ + NodeInfo: info, + Connectedness: endpoint.CanConnect, + } + } + return nil +} + +func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { + r.mu.Lock() + defer r.mu.Unlock() + + status, ok := r.nodes[id] + if !ok { + return peer.AddrInfo{}, fmt.Errorf("unknown node") + } + return status.NodeInfo, nil +} + +func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) { + protoID := address.ProtocolID("/test/1.0.0") + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := r.SendMessage(ctx, to, protoID, req) + if err != nil { + return nil, err + } + return resp.CloserPeersAddrInfos(), nil +} diff --git a/v2/coord/internal/nettest/topology.go b/v2/coord/internal/nettest/topology.go new file mode 100644 index 00000000..c7aae8d5 --- /dev/null +++ b/v2/coord/internal/nettest/topology.go @@ -0,0 +1,140 @@ +package nettest + +import ( + "context" + "fmt" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/routing" + + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) + +type Node struct { + NodeInfo peer.AddrInfo + Router *Router + RoutingTable routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] +} + +type Topology struct { + clk clock.Clock + links map[string]Link + nodes []*Node + nodeIndex map[peer.ID]*Node + routers map[peer.ID]*Router +} + +func NewTopology(clk clock.Clock) *Topology { + return &Topology{ + clk: clk, + links: make(map[string]Link), + nodeIndex: make(map[peer.ID]*Node), + routers: make(map[peer.ID]*Router), + } +} + +func (t *Topology) Nodes() []*Node { + return t.nodes +} + +func (t *Topology) ConnectNodes(a *Node, b *Node) { + t.ConnectNodesWithRoute(a, b, &DefaultLink{}) +} + +func (t *Topology) ConnectNodesWithRoute(a *Node, b *Node, l Link) { + akey := a.NodeInfo.ID + if _, exists := t.nodeIndex[akey]; !exists { + t.nodeIndex[akey] = a + t.nodes = append(t.nodes, a) + t.routers[akey] = a.Router + } + + bkey := b.NodeInfo.ID + if _, exists := t.nodeIndex[bkey]; !exists { + t.nodeIndex[bkey] = b + t.nodes = append(t.nodes, b) + t.routers[bkey] = b.Router + } + + atob := fmt.Sprintf("%s->%s", akey, bkey) + t.links[atob] = l + + // symmetrical routing assumed + btoa := fmt.Sprintf("%s->%s", bkey, akey) + t.links[btoa] = l +} + +func (t *Topology) findRoute(ctx context.Context, from peer.ID, to peer.ID) (Link, error) { + key := fmt.Sprintf("%s->%s", from, to) + + route, ok := t.links[key] + if !ok { + return nil, fmt.Errorf("no route to node") + } + + return route, nil +} + +func (t *Topology) Dial(ctx context.Context, from peer.ID, to peer.ID) (peer.AddrInfo, error) { + if from == to { + node, ok := t.nodeIndex[to] + if !ok { + return peer.AddrInfo{}, fmt.Errorf("unknown node") + } + + return node.NodeInfo, nil + } + + route, err := t.findRoute(ctx, from, to) + if err != nil { + return peer.AddrInfo{}, fmt.Errorf("find route: %w", err) + } + + latency := route.DialLatency() + if latency > 0 { + t.clk.Sleep(latency) + } + + if err := route.DialErr(); err != nil { + return peer.AddrInfo{}, err + } + + node, ok := t.nodeIndex[to] + if !ok { + return peer.AddrInfo{}, fmt.Errorf("unknown node") + } + + return node.NodeInfo, nil +} + +func (t *Topology) RouteMessage(ctx context.Context, from peer.ID, to peer.ID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { + if from == to { + node, ok := t.nodeIndex[to] + if !ok { + return nil, fmt.Errorf("unknown node") + } + + return node.Router.HandleMessage(ctx, from, protoID, req) + } + + route, err := t.findRoute(ctx, from, to) + if err != nil { + return nil, fmt.Errorf("find route: %w", err) + } + + latency := route.ConnLatency() + if latency > 0 { + t.clk.Sleep(latency) + } + + node, ok := t.nodeIndex[to] + if !ok { + return nil, fmt.Errorf("no route to node") + } + + return node.Router.HandleMessage(ctx, from, protoID, req) +} diff --git a/v2/coord/network.go b/v2/coord/network.go new file mode 100644 index 00000000..eeb05402 --- /dev/null +++ b/v2/coord/network.go @@ -0,0 +1,264 @@ +package coord + +import ( + "context" + "fmt" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/query" + "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +type NetworkBehaviour struct { + // rtr is the message router used to send messages + rtr Router + + nodeHandlersMu sync.Mutex + nodeHandlers map[peer.ID]*NodeHandler // TODO: garbage collect node handlers + + pendingMu sync.Mutex + pending []BehaviourEvent + ready chan struct{} + + logger *slog.Logger + tracer trace.Tracer +} + +func NewNetworkBehaviour(rtr Router, logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { + b := &NetworkBehaviour{ + rtr: rtr, + nodeHandlers: make(map[peer.ID]*NodeHandler), + ready: make(chan struct{}, 1), + logger: logger, + tracer: tracer, + } + + return b +} + +func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := b.tracer.Start(ctx, "NetworkBehaviour.Notify") + defer span.End() + + b.pendingMu.Lock() + defer b.pendingMu.Unlock() + + switch ev := ev.(type) { + case *EventOutboundGetCloserNodes: + b.nodeHandlersMu.Lock() + nh, ok := b.nodeHandlers[ev.To.ID] + if !ok { + nh = NewNodeHandler(ev.To, b.rtr, b.logger, b.tracer) + b.nodeHandlers[ev.To.ID] = nh + } + b.nodeHandlersMu.Unlock() + nh.Notify(ctx, ev) + default: + panic(fmt.Sprintf("unexpected dht event: %T", ev)) + } + + if len(b.pending) > 0 { + select { + case b.ready <- struct{}{}: + default: + } + } +} + +func (b *NetworkBehaviour) Ready() <-chan struct{} { + return b.ready +} + +func (b *NetworkBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { + _, span := b.tracer.Start(ctx, "NetworkBehaviour.Perform") + defer span.End() + // No inbound work can be done until Perform is complete + b.pendingMu.Lock() + defer b.pendingMu.Unlock() + + // drain queued events. + if len(b.pending) > 0 { + var ev BehaviourEvent + ev, b.pending = b.pending[0], b.pending[1:] + + if len(b.pending) > 0 { + select { + case b.ready <- struct{}{}: + default: + } + } + return ev, true + } + + return nil, false +} + +func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id peer.ID) (*NodeHandler, error) { + b.nodeHandlersMu.Lock() + nh, ok := b.nodeHandlers[id] + if !ok || len(nh.Addresses()) == 0 { + info, err := b.rtr.GetNodeInfo(ctx, id) + if err != nil { + return nil, err + } + nh = NewNodeHandler(info, b.rtr, b.logger, b.tracer) + b.nodeHandlers[id] = nh + } + b.nodeHandlersMu.Unlock() + return nh, nil +} + +type NodeHandler struct { + self peer.AddrInfo + rtr Router + queue *WorkQueue[NodeHandlerRequest] + logger *slog.Logger + tracer trace.Tracer +} + +func NewNodeHandler(self peer.AddrInfo, rtr Router, logger *slog.Logger, tracer trace.Tracer) *NodeHandler { + h := &NodeHandler{ + self: self, + rtr: rtr, + logger: logger, + tracer: tracer, + } + + h.queue = NewWorkQueue(h.send) + + return h +} + +func (h *NodeHandler) Notify(ctx context.Context, ev NodeHandlerRequest) { + ctx, span := h.tracer.Start(ctx, "NodeHandler.Notify") + defer span.End() + h.queue.Enqueue(ctx, ev) +} + +func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { + switch cmd := ev.(type) { + case *EventOutboundGetCloserNodes: + if cmd.Notify == nil { + break + } + nodes, err := h.rtr.GetClosestNodes(ctx, h.self, cmd.Target) + if err != nil { + cmd.Notify.Notify(ctx, &EventGetCloserNodesFailure{ + QueryID: cmd.QueryID, + To: h.self, + Target: cmd.Target, + Err: fmt.Errorf("NodeHandler: %w", err), + }) + return false + } + + cmd.Notify.Notify(ctx, &EventGetCloserNodesSuccess{ + QueryID: cmd.QueryID, + To: h.self, + Target: cmd.Target, + CloserNodes: nodes, + }) + default: + panic(fmt.Sprintf("unexpected command type: %T", cmd)) + } + + return false +} + +func (h *NodeHandler) ID() peer.ID { + return h.self.ID +} + +func (h *NodeHandler) Addresses() []ma.Multiaddr { + return h.self.Addrs +} + +// GetClosestNodes requests the n closest nodes to the key from the node's local routing table. +// The node may return fewer nodes than requested. +func (h *NodeHandler) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { + ctx, span := h.tracer.Start(ctx, "NodeHandler.GetClosestNodes") + defer span.End() + w := NewWaiter[BehaviourEvent]() + + ev := &EventOutboundGetCloserNodes{ + QueryID: query.QueryID(key.HexString(k)), + To: h.self, + Target: k, + Notify: w, + } + + h.queue.Enqueue(ctx, ev) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case we := <-w.Chan(): + + switch res := we.Event.(type) { + case *EventGetCloserNodesSuccess: + nodes := make([]Node, 0, len(res.CloserNodes)) + for _, info := range res.CloserNodes { + // TODO use a global registry of node handlers + nodes = append(nodes, NewNodeHandler(info, h.rtr, h.logger, h.tracer)) + n-- + if n == 0 { + break + } + } + return nodes, nil + + case *EventGetCloserNodesFailure: + return nil, res.Err + default: + panic(fmt.Sprintf("unexpected node handler event: %T", ev)) + } + } +} + +// GetValue requests that the node return any value associated with the supplied key. +// If the node does not have a value for the key it returns ErrValueNotFound. +func (h *NodeHandler) GetValue(ctx context.Context, key KadKey) (Value, error) { + panic("not implemented") +} + +// PutValue requests that the node stores a value to be associated with the supplied key. +// If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. +func (h *NodeHandler) PutValue(ctx context.Context, r Value, q int) error { + panic("not implemented") +} + +func CloserNodesResponse(k KadKey, nodes []peer.AddrInfo) kad.Response[KadKey, ma.Multiaddr] { + infos := make([]kad.NodeInfo[KadKey, ma.Multiaddr], len(nodes)) + for i := range nodes { + infos[i] = kadt.AddrInfo{Info: nodes[i]} + } + + return &fakeMessage{ + key: k, + infos: infos, + } +} + +type fakeMessage struct { + key KadKey + infos []kad.NodeInfo[KadKey, ma.Multiaddr] +} + +func (r fakeMessage) Target() KadKey { + return r.key +} + +func (r fakeMessage) CloserNodes() []kad.NodeInfo[KadKey, ma.Multiaddr] { + return r.infos +} + +func (r fakeMessage) EmptyResponse() kad.Response[KadKey, ma.Multiaddr] { + return &fakeMessage{} +} diff --git a/v2/coord/network_test.go b/v2/coord/network_test.go new file mode 100644 index 00000000..ad0f3146 --- /dev/null +++ b/v2/coord/network_test.go @@ -0,0 +1,34 @@ +package coord + +import ( + "testing" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +// TODO: this is just a basic is-it-working test that needs to be improved +func TestGetClosestNodes(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + h := NewNodeHandler(nodes[1].NodeInfo, nodes[1].Router, slog.Default(), trace.NewNoopTracerProvider().Tracer("")) + + // node 1 has node 2 in its routing table so it will return it along with node 0 + found, err := h.GetClosestNodes(ctx, kadt.PeerID(nodes[2].NodeInfo.ID).Key(), 2) + require.NoError(t, err) + for _, f := range found { + t.Logf("found node %v", f.ID()) + } + require.Equal(t, 2, len(found)) +} diff --git a/v2/coord/query.go b/v2/coord/query.go new file mode 100644 index 00000000..8ef2bdfc --- /dev/null +++ b/v2/coord/query.go @@ -0,0 +1,182 @@ +package coord + +import ( + "context" + "fmt" + "sync" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/query" + "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" +) + +type PooledQueryBehaviour struct { + pool *query.Pool[KadKey, ma.Multiaddr] + waiters map[query.QueryID]NotifyCloser[BehaviourEvent] + + pendingMu sync.Mutex + pending []BehaviourEvent + ready chan struct{} + + logger *slog.Logger + tracer trace.Tracer +} + +func NewPooledQueryBehaviour(pool *query.Pool[KadKey, ma.Multiaddr], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { + h := &PooledQueryBehaviour{ + pool: pool, + waiters: make(map[query.QueryID]NotifyCloser[BehaviourEvent]), + ready: make(chan struct{}, 1), + logger: logger, + tracer: tracer, + } + return h +} + +func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.Notify") + defer span.End() + + p.pendingMu.Lock() + defer p.pendingMu.Unlock() + + var cmd query.PoolEvent + switch ev := ev.(type) { + case *EventStartQuery: + cmd = &query.EventPoolAddQuery[KadKey, ma.Multiaddr]{ + QueryID: ev.QueryID, + Target: ev.Target, + ProtocolID: ev.ProtocolID, + Message: ev.Message, + KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.KnownClosestNodes), + } + if ev.Notify != nil { + p.waiters[ev.QueryID] = ev.Notify + } + + case *EventStopQuery: + cmd = &query.EventPoolStopQuery{ + QueryID: ev.QueryID, + } + + case *EventGetCloserNodesSuccess: + for _, info := range ev.CloserNodes { + // TODO: do this after advancing pool + p.pending = append(p.pending, &EventAddAddrInfo{ + NodeInfo: info, + }) + } + waiter, ok := p.waiters[ev.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryProgressed{ + NodeID: ev.To.ID, + QueryID: ev.QueryID, + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + // Stats: stats, + }) + } + cmd = &query.EventPoolMessageResponse[KadKey, ma.Multiaddr]{ + NodeID: kadt.PeerID(ev.To.ID), + QueryID: ev.QueryID, + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + } + case *EventGetCloserNodesFailure: + cmd = &query.EventPoolMessageFailure[KadKey]{ + NodeID: kadt.PeerID(ev.To.ID), + QueryID: ev.QueryID, + Error: ev.Err, + } + default: + panic(fmt.Sprintf("unexpected dht event: %T", ev)) + } + + // attempt to advance the query pool + ev, ok := p.advancePool(ctx, cmd) + if ok { + p.pending = append(p.pending, ev) + } + if len(p.pending) > 0 { + select { + case p.ready <- struct{}{}: + default: + } + } +} + +func (p *PooledQueryBehaviour) Ready() <-chan struct{} { + return p.ready +} + +func (p *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.Perform") + defer span.End() + + // No inbound work can be done until Perform is complete + p.pendingMu.Lock() + defer p.pendingMu.Unlock() + + for { + // drain queued events first. + if len(p.pending) > 0 { + var ev BehaviourEvent + ev, p.pending = p.pending[0], p.pending[1:] + + if len(p.pending) > 0 { + select { + case p.ready <- struct{}{}: + default: + } + } + return ev, true + } + + // attempt to advance the query pool + ev, ok := p.advancePool(ctx, &query.EventPoolPoll{}) + if ok { + return ev, true + } + + if len(p.pending) == 0 { + return nil, false + } + } +} + +func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (BehaviourEvent, bool) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.advancePool") + defer span.End() + + pstate := p.pool.Advance(ctx, ev) + switch st := pstate.(type) { + case *query.StatePoolQueryMessage[KadKey, ma.Multiaddr]: + return &EventOutboundGetCloserNodes{ + QueryID: st.QueryID, + To: NodeIDToAddrInfo(st.NodeID), + Target: st.Message.Target(), + Notify: p, + }, true + case *query.StatePoolWaitingAtCapacity: + // nothing to do except wait for message response or timeout + case *query.StatePoolWaitingWithCapacity: + // nothing to do except wait for message response or timeout + case *query.StatePoolQueryFinished: + waiter, ok := p.waiters[st.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryFinished{ + QueryID: st.QueryID, + Stats: st.Stats, + }) + waiter.Close() + } + case *query.StatePoolQueryTimeout: + // TODO + case *query.StatePoolIdle: + // nothing to do + default: + panic(fmt.Sprintf("unexpected pool state: %T", st)) + } + + return nil, false +} diff --git a/v2/coord/routing.go b/v2/coord/routing.go new file mode 100644 index 00000000..488ac689 --- /dev/null +++ b/v2/coord/routing.go @@ -0,0 +1,358 @@ +package coord + +import ( + "context" + "fmt" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +// A RoutingBehaviour provices the behaviours for bootstrapping and maintaining a DHT's routing table. +type RoutingBehaviour struct { + // self is the peer id of the system the dht is running on + self peer.ID + // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table + bootstrap SM[routing.BootstrapEvent, routing.BootstrapState] + + // include is the inclusion state machine, responsible for vetting nodes before including them in the routing table + include SM[routing.IncludeEvent, routing.IncludeState] + + // probe is the node probing state machine, responsible for periodically checking connectivity of nodes in the routing table + probe SM[routing.ProbeEvent, routing.ProbeState] + + pendingMu sync.Mutex + pending []BehaviourEvent + ready chan struct{} + + logger *slog.Logger + tracer trace.Tracer +} + +func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger, tracer trace.Tracer) *RoutingBehaviour { + r := &RoutingBehaviour{ + self: self, + bootstrap: bootstrap, + include: include, + probe: probe, + ready: make(chan struct{}, 1), + logger: logger, + tracer: tracer, + } + return r +} + +func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.Notify") + defer span.End() + + r.pendingMu.Lock() + defer r.pendingMu.Unlock() + r.notify(ctx, ev) +} + +// notify must only be called while r.pendingMu is held +func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.notify") + defer span.End() + switch ev := ev.(type) { + case *EventStartBootstrap: + span.SetAttributes(attribute.String("event", "EventStartBootstrap")) + cmd := &routing.EventBootstrapStart[KadKey, ma.Multiaddr]{ + ProtocolID: ev.ProtocolID, + Message: ev.Message, + KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), + } + // attempt to advance the bootstrap + next, ok := r.advanceBootstrap(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case *EventAddAddrInfo: + span.SetAttributes(attribute.String("event", "EventAddAddrInfo")) + // Ignore self + if ev.NodeInfo.ID == r.self { + break + } + // TODO: apply ttl + cmd := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, + } + // attempt to advance the include + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case *EventRoutingUpdated: + span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) + cmd := &routing.EventProbeAdd[KadKey]{ + NodeID: AddrInfoToNodeID(ev.NodeInfo), + } + // attempt to advance the probe state machine + next, ok := r.advanceProbe(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case *EventGetCloserNodesSuccess: + span.SetAttributes(attribute.String("event", "EventGetCloserNodesSuccess"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) + switch ev.QueryID { + case "bootstrap": + for _, info := range ev.CloserNodes { + // TODO: do this after advancing bootstrap + r.pending = append(r.pending, &EventAddAddrInfo{ + NodeInfo: info, + }) + } + cmd := &routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]{ + NodeID: kadt.PeerID(ev.To.ID), + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + } + // attempt to advance the bootstrap + next, ok := r.advanceBootstrap(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case "include": + cmd := &routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + } + // attempt to advance the include + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case "probe": + cmd := &routing.EventProbeMessageResponse[KadKey, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + } + // attempt to advance the probe state machine + next, ok := r.advanceProbe(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + default: + panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) + } + case *EventGetCloserNodesFailure: + span.SetAttributes(attribute.String("event", "EventGetCloserNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) + span.RecordError(ev.Err) + switch ev.QueryID { + case "bootstrap": + cmd := &routing.EventBootstrapMessageFailure[KadKey]{ + NodeID: kadt.PeerID(ev.To.ID), + Error: ev.Err, + } + // attempt to advance the bootstrap + next, ok := r.advanceBootstrap(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + case "include": + cmd := &routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, + Error: ev.Err, + } + // attempt to advance the include state machine + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + case "probe": + cmd := &routing.EventProbeMessageFailure[KadKey, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, + Error: ev.Err, + } + // attempt to advance the probe state machine + next, ok := r.advanceProbe(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + default: + panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) + } + default: + panic(fmt.Sprintf("unexpected dht event: %T", ev)) + } + + if len(r.pending) > 0 { + select { + case r.ready <- struct{}{}: + default: + } + } +} + +func (r *RoutingBehaviour) Ready() <-chan struct{} { + return r.ready +} + +func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.Perform") + defer span.End() + + // No inbound work can be done until Perform is complete + r.pendingMu.Lock() + defer r.pendingMu.Unlock() + + for { + // drain queued events first. + if len(r.pending) > 0 { + var ev BehaviourEvent + ev, r.pending = r.pending[0], r.pending[1:] + + if len(r.pending) > 0 { + select { + case r.ready <- struct{}{}: + default: + } + } + return ev, true + } + + // poll the child state machines in priority order to give each an opportunity to perform work + + ev, ok := r.advanceBootstrap(ctx, &routing.EventBootstrapPoll{}) + if ok { + return ev, true + } + + ev, ok = r.advanceInclude(ctx, &routing.EventIncludePoll{}) + if ok { + return ev, true + } + + ev, ok = r.advanceProbe(ctx, &routing.EventProbePoll{}) + if ok { + return ev, true + } + + // finally check if any pending events were accumulated in the meantime + if len(r.pending) == 0 { + return nil, false + } + } +} + +func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (BehaviourEvent, bool) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceBootstrap") + defer span.End() + bstate := r.bootstrap.Advance(ctx, ev) + switch st := bstate.(type) { + + case *routing.StateBootstrapMessage[KadKey, ma.Multiaddr]: + return &EventOutboundGetCloserNodes{ + QueryID: "bootstrap", + To: NodeIDToAddrInfo(st.NodeID), + Target: st.Message.Target(), + Notify: r, + }, true + + case *routing.StateBootstrapWaiting: + // bootstrap waiting for a message response, nothing to do + case *routing.StateBootstrapFinished: + return &EventBootstrapFinished{ + Stats: st.Stats, + }, true + case *routing.StateBootstrapIdle: + // bootstrap not running, nothing to do + default: + panic(fmt.Sprintf("unexpected bootstrap state: %T", st)) + } + + return nil, false +} + +func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (BehaviourEvent, bool) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceInclude") + defer span.End() + + istate := r.include.Advance(ctx, ev) + switch st := istate.(type) { + case *routing.StateIncludeFindNodeMessage[KadKey, ma.Multiaddr]: + span.SetAttributes(attribute.String("out_event", "EventOutboundGetCloserNodes")) + // include wants to send a find node message to a node + return &EventOutboundGetCloserNodes{ + QueryID: "include", + To: NodeInfoToAddrInfo(st.NodeInfo), + Target: st.NodeInfo.ID().Key(), + Notify: r, + }, true + + case *routing.StateIncludeRoutingUpdated[KadKey, ma.Multiaddr]: + // a node has been included in the routing table + + // notify other routing state machines that there is a new node in the routing table + r.notify(ctx, &EventRoutingUpdated{ + NodeInfo: NodeInfoToAddrInfo(st.NodeInfo), + }) + + // return the event to notify outwards too + span.SetAttributes(attribute.String("out_event", "EventRoutingUpdated")) + return &EventRoutingUpdated{ + NodeInfo: NodeInfoToAddrInfo(st.NodeInfo), + }, true + case *routing.StateIncludeWaitingAtCapacity: + // nothing to do except wait for message response or timeout + case *routing.StateIncludeWaitingWithCapacity: + // nothing to do except wait for message response or timeout + case *routing.StateIncludeWaitingFull: + // nothing to do except wait for message response or timeout + case *routing.StateIncludeIdle: + // nothing to do except wait for new nodes to be added to queue + default: + panic(fmt.Sprintf("unexpected include state: %T", st)) + } + + return nil, false +} + +func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (BehaviourEvent, bool) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceProbe") + defer span.End() + st := r.probe.Advance(ctx, ev) + switch st := st.(type) { + case *routing.StateProbeConnectivityCheck[KadKey]: + // include wants to send a find node message to a node + return &EventOutboundGetCloserNodes{ + QueryID: "probe", + To: NodeIDToAddrInfo(st.NodeID), + Target: st.NodeID.Key(), + Notify: r, + }, true + case *routing.StateProbeNodeFailure[KadKey]: + // a node has failed a connectivity check been removed from the routing table and the probe list + // add the node to the inclusion list for a second chance + r.notify(ctx, &EventAddAddrInfo{ + NodeInfo: NodeIDToAddrInfo(st.NodeID), + }) + case *routing.StateProbeWaitingAtCapacity: + // the probe state machine is waiting for responses for checks and the maximum number of concurrent checks has been reached. + // nothing to do except wait for message response or timeout + case *routing.StateProbeWaitingWithCapacity: + // the probe state machine is waiting for responses for checks but has capacity to perform more + // nothing to do except wait for message response or timeout + case *routing.StateProbeIdle: + // the probe state machine is not running any checks. + // nothing to do except wait for message response or timeout + default: + panic(fmt.Sprintf("unexpected include state: %T", st)) + } + + return nil, false +} diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go new file mode 100644 index 00000000..e1342c7c --- /dev/null +++ b/v2/coord/routing_test.go @@ -0,0 +1,315 @@ +package coord + +import ( + "errors" + "testing" + "time" + + "go.opentelemetry.io/otel" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/query" + "github.com/plprobelab/go-kademlia/routing" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) + +func TestRoutingStartBootstrapSendsEvent(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + + // records the event passed to bootstrap + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte(self), + } + + ev := &EventStartBootstrap{ + ProtocolID: address.ProtocolID("test"), + Message: req, + SeedNodes: []peer.ID{nodes[1].NodeInfo.ID}, + } + + routingBehaviour.Notify(ctx, ev) + + // the event that should be passed to the bootstrap state machine + expected := &routing.EventBootstrapStart[KadKey, ma.Multiaddr]{ + ProtocolID: ev.ProtocolID, + Message: ev.Message, + KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), + } + require.Equal(t, expected, bootstrap.Received) +} + +func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + + // records the event passed to bootstrap + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + ev := &EventGetCloserNodesSuccess{ + QueryID: query.QueryID("bootstrap"), + To: nodes[1].NodeInfo, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, + } + + routingBehaviour.Notify(ctx, ev) + + // bootstrap should receive message response event + require.IsType(t, &routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]{}, bootstrap.Received) + + rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]) + require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) + require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) +} + +func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + + // records the event passed to bootstrap + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + failure := errors.New("failed") + ev := &EventGetCloserNodesFailure{ + QueryID: query.QueryID("bootstrap"), + To: nodes[1].NodeInfo, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + Err: failure, + } + + routingBehaviour.Notify(ctx, ev) + + // bootstrap should receive message response event + require.IsType(t, &routing.EventBootstrapMessageFailure[KadKey]{}, bootstrap.Received) + + rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[KadKey]) + require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) + require.Equal(t, failure, rev.Error) +} + +func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + + // records the event passed to include + include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) + + bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + ev := &EventAddAddrInfo{ + NodeInfo: nodes[2].NodeInfo, + } + + routingBehaviour.Notify(ctx, ev) + + // the event that should be passed to the include state machine + expected := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, + } + require.Equal(t, expected, include.Received) +} + +func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + + // records the event passed to include + include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) + + bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + ev := &EventGetCloserNodesSuccess{ + QueryID: query.QueryID("include"), + To: nodes[1].NodeInfo, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, + } + + routingBehaviour.Notify(ctx, ev) + + // include should receive message response event + require.IsType(t, &routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]{}, include.Received) + + rev := include.Received.(*routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]) + require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) + require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) +} + +func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + + // records the event passed to include + include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) + + bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + failure := errors.New("failed") + ev := &EventGetCloserNodesFailure{ + QueryID: query.QueryID("include"), + To: nodes[1].NodeInfo, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + Err: failure, + } + + routingBehaviour.Notify(ctx, ev) + + // include should receive message response event + require.IsType(t, &routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]{}, include.Received) + + rev := include.Received.(*routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]) + require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) + require.Equal(t, failure, rev.Error) +} + +func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeInfo.ID + rt := nodes[0].RoutingTable + + includeCfg := routing.DefaultIncludeConfig() + includeCfg.Clock = clk + include, err := routing.NewInclude[KadKey, ma.Multiaddr](rt, includeCfg) + require.NoError(t, err) + + probeCfg := routing.DefaultProbeConfig() + probeCfg.Clock = clk + probeCfg.CheckInterval = 5 * time.Minute + probe, err := routing.NewProbe[KadKey, ma.Multiaddr](rt, probeCfg) + require.NoError(t, err) + + // ensure bootstrap is always idle + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + + // a new node to be included + candidate := nodes[len(nodes)-1].NodeInfo + + // the routing table should not contain the node yet + _, intable := rt.GetNode(kadt.PeerID(candidate.ID).Key()) + require.False(t, intable) + + // notify that there is a new node to be included + routingBehaviour.Notify(ctx, &EventAddAddrInfo{ + NodeInfo: candidate, + }) + + // collect the result of the notify + dev, ok := routingBehaviour.Perform(ctx) + require.True(t, ok) + + // include should be asking to send a message to the node + require.IsType(t, &EventOutboundGetCloserNodes{}, dev) + + oev := dev.(*EventOutboundGetCloserNodes) + + // advance time a little + clk.Add(time.Second) + + // notify a successful response back (best to use the notify included in the event even though it will be the behaviour's Notify method) + oev.Notify.Notify(ctx, &EventGetCloserNodesSuccess{ + QueryID: oev.QueryID, + To: oev.To, + Target: oev.Target, + CloserNodes: []peer.AddrInfo{nodes[1].NodeInfo}, // must include one for include check to pass + }) + + // the routing table should now contain the node + _, intable = rt.GetNode(kadt.PeerID(candidate.ID).Key()) + require.True(t, intable) + + // routing update event should be emitted from the include state machine + dev, ok = routingBehaviour.Perform(ctx) + require.True(t, ok) + require.IsType(t, &EventRoutingUpdated{}, dev) + + // advance time past the probe check interval + clk.Add(probeCfg.CheckInterval) + + // routing update event should be emitted from the include state machine + dev, ok = routingBehaviour.Perform(ctx) + require.True(t, ok) + require.IsType(t, &EventOutboundGetCloserNodes{}, dev) + + // confirm that the message is for the correct node + oev = dev.(*EventOutboundGetCloserNodes) + require.Equal(t, query.QueryID("probe"), oev.QueryID) + require.Equal(t, candidate.ID, oev.To.ID) +} diff --git a/v2/dht.go b/v2/dht.go index 3c0a24fc..31787ae5 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -1,22 +1,26 @@ package dht import ( + "context" "crypto/sha256" "fmt" "io" "sync" + "time" - "github.com/iand/zikade/kademlia" "github.com/ipfs/go-datastore/trace" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" - ma "github.com/multiformats/go-multiaddr" + "github.com/libp2p/go-libp2p/core/peer" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // DHT is an implementation of Kademlia with S/Kademlia modifications. @@ -35,11 +39,11 @@ type DHT struct { mode mode // kad is a reference to the go-kademlia coordinator - kad *kademlia.Dht[key.Key256, ma.Multiaddr] + kad *coord.Coordinator // rt holds a reference to the routing table implementation. This can be // configured via the Config struct. - rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + rt routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] // backends backends map[string]Backend @@ -52,6 +56,9 @@ type DHT struct { // these events in networkEventsSubscription and consumes them // asynchronously in consumeNetworkEvents. sub event.Subscription + + // tele holds a reference to a telemetry struct + tele *tele.Telemetry } // New constructs a new [DHT] for the given underlying host and with the given @@ -59,8 +66,9 @@ type DHT struct { func New(h host.Host, cfg *Config) (*DHT, error) { var err error - // check if the configuration is valid - if err = cfg.Validate(); err != nil { + if cfg == nil { + cfg = DefaultConfig() + } else if err = cfg.Validate(); err != nil { return nil, fmt.Errorf("validate DHT config: %w", err) } @@ -79,6 +87,12 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("new trie routing table: %w", err) } + // initialize a new telemetry struct + d.tele, err = tele.New(cfg.MeterProvider, cfg.TracerProvider) + if err != nil { + return nil, fmt.Errorf("init telemetry: %w", err) + } + if len(cfg.Backends) != 0 { d.backends = cfg.Backends } else if cfg.ProtocolID == ProtocolIPFS { @@ -91,37 +105,60 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // wrap datastore in open telemetry tracing - dstore = trace.New(dstore, tracer) + dstore = trace.New(dstore, d.tele.Tracer) - pbeCfg := DefaultProviderBackendConfig() + pbeCfg, err := DefaultProviderBackendConfig() + if err != nil { + return nil, fmt.Errorf("default provider config: %w", err) + } pbeCfg.Logger = cfg.Logger pbeCfg.AddressFilter = cfg.AddressFilter + pbeCfg.Tele = d.tele + pbeCfg.clk = d.cfg.Clock pbe, err := NewBackendProvider(h.Peerstore(), dstore, pbeCfg) if err != nil { return nil, fmt.Errorf("new provider backend: %w", err) } - rbeCfg := DefaultRecordBackendConfig() + rbeCfg, err := DefaultRecordBackendConfig() + if err != nil { + return nil, fmt.Errorf("default provider config: %w", err) + } rbeCfg.Logger = cfg.Logger + rbeCfg.Tele = d.tele + rbeCfg.clk = d.cfg.Clock + + ipnsBe, err := NewBackendIPNS(dstore, h.Peerstore(), rbeCfg) + if err != nil { + return nil, fmt.Errorf("new ipns backend: %w", err) + } + + pkBe, err := NewBackendPublicKey(dstore, rbeCfg) + if err != nil { + return nil, fmt.Errorf("new public key backend: %w", err) + } d.backends = map[string]Backend{ - "ipns": NewBackendIPNS(dstore, h.Peerstore(), rbeCfg), - "pk": NewBackendPublicKey(dstore, rbeCfg), + "ipns": ipnsBe, + "pk": pkBe, "providers": pbe, } } // wrap all backends with tracing - for ns, backend := range d.backends { - d.backends[ns] = &tracedBackend{ - namespace: ns, - backend: backend, - } + for ns, be := range d.backends { + d.backends[ns] = traceWrapBackend(ns, be, d.tele.Tracer) } // instantiate a new Kademlia DHT coordinator. - d.kad, err = kademlia.NewDht[key.Key256, ma.Multiaddr](nid, d, d.rt, nil) + coordCfg, err := coord.DefaultCoordinatorConfig() + if err != nil { + return nil, fmt.Errorf("new coordinator config: %w", err) + } + coordCfg.Tele = d.tele + + d.kad, err = coord.NewCoordinator(d.host.ID(), &Router{host: h}, d.rt, coordCfg) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } @@ -155,6 +192,10 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } + if err := d.kad.Close(); err != nil { + d.log.With("err", err).Debug("failed closing coordinator") + } + for ns, b := range d.backends { closer, ok := b.(io.Closer) if !ok { @@ -265,11 +306,20 @@ func (d *DHT) logErr(err error, msg string) { d.log.Warn(msg, "err", err.Error()) } +// AddAddresses suggests peers and their associated addresses to be added to the routing table. +// Addresses will be added to the peerstore with the supplied time to live. +func (d *DHT) AddAddresses(ctx context.Context, ais []peer.AddrInfo, ttl time.Duration) error { + ctx, span := d.tele.Tracer.Start(ctx, "DHT.AddAddresses") + defer span.End() + + return d.kad.AddNodes(ctx, ais, ttl) +} + // newSHA256Key returns a [key.Key256] that conforms to the [kad.Key] interface by // SHA256 hashing the given bytes and wrapping them in a [key.Key256]. func newSHA256Key(data []byte) key.Key256 { - b := sha256.Sum256(data) - return key.NewKey256(b[:]) + h := sha256.Sum256(data) + return key.NewKey256(h[:]) } // typedBackend returns the backend at the given namespace. It is casted to the diff --git a/v2/dht_test.go b/v2/dht_test.go index 832c8ed1..b42b77b5 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -1,10 +1,19 @@ package dht import ( + "context" + "fmt" + "reflect" "testing" + "time" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" ) func TestNew(t *testing.T) { @@ -65,3 +74,56 @@ func TestNew(t *testing.T) { }) } } + +// expectEventType selects on the event channel until an event of the expected type is sent. +func expectEventType(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, expected coord.RoutingNotification) (coord.RoutingNotification, error) { + t.Helper() + for { + select { + case ev := <-events: + t.Logf("saw event: %T\n", ev) + if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + return ev, nil + } + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) + } + } +} + +func TestAddAddresses(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + localCfg := DefaultConfig() + + local := newClientDht(t, localCfg) + + remote := newServerDht(t, nil) + + // Populate entries in remote's routing table so it passes a connectivity check + fillRoutingTable(t, remote, 1) + + // local routing table should not contain the node + _, err := local.kad.GetNode(ctx, remote.host.ID()) + require.ErrorIs(t, err, coord.ErrNodeNotFound) + + remoteAddrInfo := peer.AddrInfo{ + ID: remote.host.ID(), + Addrs: remote.host.Addrs(), + } + require.NotEmpty(t, remoteAddrInfo.ID) + require.NotEmpty(t, remoteAddrInfo.Addrs) + + // Add remote's addresss to the local dht + err = local.AddAddresses(ctx, []peer.AddrInfo{remoteAddrInfo}, time.Minute) + require.NoError(t, err) + + // the include state machine runs in the background and eventually should add the node to routing table + _, err = expectEventType(t, ctx, local.kad.RoutingNotifications(), &coord.EventRoutingUpdated{}) + require.NoError(t, err) + + // the routing table should now contain the node + _, err = local.kad.GetNode(ctx, remote.host.ID()) + require.NoError(t, err) +} diff --git a/v2/go.mod b/v2/go.mod index 9c805bf0..1473c15b 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -5,10 +5,9 @@ go 1.20 require ( github.com/benbjohnson/clock v1.3.5 github.com/hashicorp/golang-lru/v2 v2.0.5 - github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb github.com/ipfs/boxo v0.12.0 github.com/ipfs/go-cid v0.4.1 - github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-libp2p v0.30.0 @@ -16,10 +15,13 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 + github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 github.com/stretchr/testify v1.8.4 - go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.17.0 + go.opentelemetry.io/otel/exporters/jaeger v1.16.0 + go.opentelemetry.io/otel/metric v1.17.0 + go.opentelemetry.io/otel/sdk v1.17.0 + go.opentelemetry.io/otel/sdk/metric v0.40.0 go.opentelemetry.io/otel/trace v1.17.0 go.uber.org/zap/exp v0.1.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 @@ -100,7 +102,6 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect - go.opentelemetry.io/otel/metric v1.17.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -115,5 +116,3 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) - -replace github.com/ipfs/go-datastore v0.6.0 => github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a diff --git a/v2/go.sum b/v2/go.sum index 8a635d8c..bc586fb5 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -18,12 +18,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -41,8 +39,6 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a h1:YnrW4Kcy7kTIJRfL3Xg7+fIMS17izs0WWH2GdYwYhNs= -github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -50,10 +46,6 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -83,8 +75,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -92,14 +82,6 @@ github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -107,12 +89,7 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pO github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -125,7 +102,6 @@ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBB github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= @@ -141,13 +117,13 @@ github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb h1:L0sxl/vHUf/wdEX6+QJGC0cQsnn2AglFL0qbJvv8+64= -github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb/go.mod h1:9BszmzAjw3qghO/oCaTvIhQUHb3h+F0EAHecClvcUnA= github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c h1:iSyhKydtSJiEkmf5O3KizuySDB0zgyWPth76NACTMVI= +github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= @@ -283,8 +259,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 h1:qqrJgUNOCAozZDkL0gH57FUi+aXj/d/SdldaLAZUFUU= -github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= +github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 h1:fgo8NhFeL+p7atahZNtvo1BfWClUNRvAjzC2ikEwvsY= +github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= @@ -293,7 +269,6 @@ github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -350,16 +325,12 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= @@ -375,12 +346,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= +go.opentelemetry.io/otel/exporters/jaeger v1.16.0 h1:YhxxmXZ011C0aDZKoNw+juVWAmEfv/0W2XBOv9aHTaA= +go.opentelemetry.io/otel/exporters/jaeger v1.16.0/go.mod h1:grYbBo/5afWlPpdPZYhyn78Bk04hnvxn2+hvxQhKIQM= go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= +go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= +go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= +go.opentelemetry.io/otel/sdk/metric v0.40.0 h1:qOM29YaGcxipWjL5FzpyZDpCYrDREvX0mVlmXdOjCHU= +go.opentelemetry.io/otel/sdk/metric v0.40.0/go.mod h1:dWxHtdzdJvg+ciJUKLTKwrMe5P6Dv3FyDbh8UkfgkVs= go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -421,7 +396,6 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -444,7 +418,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -502,7 +475,6 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -530,25 +502,10 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -575,7 +532,6 @@ grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJd honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= diff --git a/v2/handlers.go b/v2/handlers.go index 9a37c17b..5339c3fa 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -234,7 +234,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { - ctx, span := tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) + _, span := d.tele.Tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) defer span.End() peers := d.rt.NearestNodes(target, d.cfg.BucketSize) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 5c9b0c89..115b8bc1 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -75,11 +75,11 @@ func newIdentity(t testing.TB) (peer.ID, crypto.PrivKey) { return id, priv } -func fillRoutingTable(t testing.TB, d *DHT) { +// fillRoutingTable populates d's routing table and peerstore with n random peers and addresses +func fillRoutingTable(t testing.TB, d *DHT, n int) { t.Helper() - // 250 is a common number of peers to have in the routing table - for i := 0; i < 250; i++ { + for i := 0; i < n; i++ { // generate peer ID pid := newPeerID(t) @@ -104,7 +104,7 @@ func TestMessage_noKey(t *testing.T) { pb.Message_ADD_PROVIDER, pb.Message_GET_PROVIDERS, } { - t.Run(fmt.Sprintf("%s", typ), func(t *testing.T) { + t.Run(typ.String(), func(t *testing.T) { msg := &pb.Message{Type: typ} // no key _, err := d.handleMsg(context.Background(), peer.ID(""), msg) if err == nil { @@ -803,8 +803,11 @@ func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { ds, err := InMemoryDatastore() require.NoError(t, err) + cfg, err := DefaultRecordBackendConfig() + require.NoError(t, err) + recBackend := &RecordBackend{ - cfg: DefaultRecordBackendConfig(), + cfg: cfg, log: devnull, namespace: "test", datastore: ds, @@ -862,7 +865,7 @@ func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { func BenchmarkDHT_handleGetValue(b *testing.B) { d := newTestDHT(b) - fillRoutingTable(b, d) + fillRoutingTable(b, d, 250) rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) require.True(b, ok) @@ -906,7 +909,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) remote, priv := newIdentity(t) @@ -941,7 +944,7 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { func TestDHT_handleGetValue_record_not_found(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) for _, ns := range []string{namespaceIPNS, namespacePublicKey} { t.Run(ns, func(t *testing.T) { @@ -965,7 +968,7 @@ func TestDHT_handleGetValue_record_not_found(t *testing.T) { func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) for _, ns := range []string{namespaceIPNS, namespacePublicKey} { t.Run(ns, func(t *testing.T) { @@ -1003,7 +1006,7 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) remote, priv := newIdentity(t) @@ -1045,7 +1048,7 @@ func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) require.NoError(t, err) @@ -1128,7 +1131,7 @@ func TestDHT_handleGetValue_supports_providers(t *testing.T) { p := newAddrInfo(t) key := []byte("random-key") - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) // add to addresses peerstore d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) @@ -1355,7 +1358,7 @@ func BenchmarkDHT_handleGetProviders(b *testing.B) { ctx := context.Background() d := newTestDHT(b) - fillRoutingTable(b, d) + fillRoutingTable(b, d, 250) be, ok := d.backends[namespaceIPNS].(*RecordBackend) require.True(b, ok) @@ -1401,7 +1404,7 @@ func TestDHT_handleGetProviders_happy_path(t *testing.T) { ctx := context.Background() d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) key := []byte("random-key") @@ -1452,7 +1455,7 @@ func TestDHT_handleGetProviders_do_not_return_expired_records(t *testing.T) { ctx := context.Background() d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) key := []byte("random-key") @@ -1511,7 +1514,7 @@ func TestDHT_handleGetProviders_only_serve_filtered_addresses(t *testing.T) { d := newTestDHTWithConfig(t, cfg) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) key := []byte("random-key") diff --git a/v2/internal/kadtest/context.go b/v2/internal/kadtest/context.go new file mode 100644 index 00000000..1ef31f40 --- /dev/null +++ b/v2/internal/kadtest/context.go @@ -0,0 +1,31 @@ +package kadtest + +import ( + "context" + "testing" + "time" +) + +// CtxShort returns a Context and a CancelFunc. The context will be +// cancelled after 10 seconds or just before the test binary deadline (as +// specified by the -timeout flag when running the test), whichever is +// sooner. The CancelFunc may be called to cancel the context earlier than +// the deadline. +func CtxShort(t *testing.T) (context.Context, context.CancelFunc) { + t.Helper() + + timeout := 10 * time.Second + goal := time.Now().Add(timeout) + + deadline, ok := t.Deadline() + if !ok { + deadline = goal + } else { + deadline = deadline.Add(-time.Second) + if deadline.After(goal) { + deadline = goal + } + } + + return context.WithDeadline(context.Background(), deadline) +} diff --git a/v2/internal/kadtest/tracing.go b/v2/internal/kadtest/tracing.go new file mode 100644 index 00000000..dc7c82c8 --- /dev/null +++ b/v2/internal/kadtest/tracing.go @@ -0,0 +1,33 @@ +package kadtest + +import ( + "context" + "fmt" + "testing" + + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/trace" +) + +// JaegerTracerProvider creates a tracer provider that exports traces to a Jaeger instance running +// on localhost on port 14268 +func JaegerTracerProvider(t *testing.T) *trace.TracerProvider { + t.Helper() + + traceHost := "127.0.0.1" + tracePort := 14268 + + endpoint := fmt.Sprintf("http://%s:%d/api/traces", traceHost, tracePort) + exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(endpoint))) + if err != nil { + t.Fatalf("failed to create jaeger exporter: %v", err) + } + + tp := trace.NewTracerProvider(trace.WithBatcher(exp)) + + t.Cleanup(func() { + tp.Shutdown(context.Background()) + }) + + return tp +} diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index d3b33db5..d71ecc39 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -13,23 +13,23 @@ import ( "github.com/plprobelab/go-kademlia/key" ) -// PeerID is a type alias for peer.ID that implements the kad.NodeID interface. -// This means we can use PeerID for any operation that interfaces with -// go-kademlia. +// PeerID is a type alias for [peer.ID] that implements the [kad.NodeID] +// interface. This means we can use PeerID for any operation that interfaces +// with go-kademlia. type PeerID peer.ID // assertion that PeerID implements the kad.NodeID interface var _ kad.NodeID[key.Key256] = PeerID("") -// Key returns the Kademlia key of PeerID. The amino DHT operates on SHA256 -// hashes of, in this case, peer.IDs. This means this Key method takes -// the peer.ID, hashes it and constructs a 256-bit key. +// Key returns the Kademlia [key.Key256] of PeerID. The amino DHT operates on +// SHA256 hashes of, in this case, peer.IDs. This means this Key method takes +// the [peer.ID], hashes it and constructs a 256-bit key. func (p PeerID) Key() key.Key256 { h := sha256.Sum256([]byte(p)) return key.NewKey256(h[:]) } -// String calls String on the underlying peer.ID and returns a string like +// String calls String on the underlying [peer.ID] and returns a string like // QmFoo or 12D3KooBar. func (p PeerID) String() string { return peer.ID(p).String() @@ -45,14 +45,19 @@ type AddrInfo struct { Info peer.AddrInfo } -// assertion that AddrInfo implements the kad.NodeInfo interface +// assertion that AddrInfo implements the [kad.NodeInfo] interface var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*AddrInfo)(nil) -// ID returns the kad.NodeID of this peer's information struct. +// ID returns the [kad.NodeID] of this peer's information struct. func (ai AddrInfo) ID() kad.NodeID[key.Key256] { return PeerID(ai.Info.ID) } +// PeerID returns the peer ID of this peer's information struct as a PeerID. +func (ai AddrInfo) PeerID() PeerID { + return PeerID(ai.Info.ID) +} + // Addresses returns all Multiaddresses of this peer. func (ai AddrInfo) Addresses() []ma.Multiaddr { addrs := make([]ma.Multiaddr, len(ai.Info.Addrs)) diff --git a/v2/metrics/metrics.go b/v2/metrics/metrics.go deleted file mode 100644 index 3cee215d..00000000 --- a/v2/metrics/metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -package metrics - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -var ( - defaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Keys -var ( - KeyCacheHit, _ = tag.NewKey("hit") - KeyRecordType, _ = tag.NewKey("record_type") // currently only used for the provider backend LRU cache - KeyMessageType, _ = tag.NewKey("message_type") - KeyPeerID, _ = tag.NewKey("peer_id") - // KeyInstanceID identifies a dht instance by the pointer address. - // Useful for differentiating between different dhts that have the same peer id. - KeyInstanceID, _ = tag.NewKey("instance_id") -) - -// Measures -var ( - ReceivedMessages = stats.Int64("libp2p.io/dht/kad/received_messages", "Total number of messages received per RPC", stats.UnitDimensionless) - ReceivedMessageErrors = stats.Int64("libp2p.io/dht/kad/received_message_errors", "Total number of errors for messages received per RPC", stats.UnitDimensionless) - ReceivedBytes = stats.Int64("libp2p.io/dht/kad/received_bytes", "Total received bytes per RPC", stats.UnitBytes) - InboundRequestLatency = stats.Float64("libp2p.io/dht/kad/inbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) - OutboundRequestLatency = stats.Float64("libp2p.io/dht/kad/outbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) - SentMessages = stats.Int64("libp2p.io/dht/kad/sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless) - SentMessageErrors = stats.Int64("libp2p.io/dht/kad/sent_message_errors", "Total number of errors for messages sent per RPC", stats.UnitDimensionless) - SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless) - SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless) - SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes) - LRUCache = stats.Int64("libp2p.io/dht/kad/lru_cache", "Cache hit or miss counter", stats.UnitDimensionless) - NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless) -) - -// Views -var ( - ReceivedMessagesView = &view.View{ - Measure: ReceivedMessages, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - ReceivedMessageErrorsView = &view.View{ - Measure: ReceivedMessageErrors, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - ReceivedBytesView = &view.View{ - Measure: ReceivedBytes, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultBytesDistribution, - } - InboundRequestLatencyView = &view.View{ - Measure: InboundRequestLatency, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultMillisecondsDistribution, - } - OutboundRequestLatencyView = &view.View{ - Measure: OutboundRequestLatency, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultMillisecondsDistribution, - } - SentMessagesView = &view.View{ - Measure: SentMessages, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentMessageErrorsView = &view.View{ - Measure: SentMessageErrors, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentRequestsView = &view.View{ - Measure: SentRequests, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentRequestErrorsView = &view.View{ - Measure: SentRequestErrors, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentBytesView = &view.View{ - Measure: SentBytes, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultBytesDistribution, - } - LRUCacheView = &view.View{ - Measure: LRUCache, - TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - NetworkSizeView = &view.View{ - Measure: NetworkSize, - TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } -) - -// DefaultViews with all views in it. -var DefaultViews = []*view.View{ - ReceivedMessagesView, - ReceivedMessageErrorsView, - ReceivedBytesView, - InboundRequestLatencyView, - OutboundRequestLatencyView, - SentMessagesView, - SentMessageErrorsView, - SentRequestsView, - SentRequestErrorsView, - SentBytesView, - NetworkSizeView, -} diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go index cd9f5588..14b7f6d0 100644 --- a/v2/pb/msg.aux.go +++ b/v2/pb/msg.aux.go @@ -4,13 +4,12 @@ import ( "bytes" "crypto/sha256" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" - - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // this file contains auxiliary methods to augment the protobuf generated types. diff --git a/v2/query_test.go b/v2/query_test.go new file mode 100644 index 00000000..29fa004a --- /dev/null +++ b/v2/query_test.go @@ -0,0 +1,74 @@ +package dht + +import ( + "testing" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/stretchr/testify/require" +) + +func newServerHost(t testing.TB) host.Host { + listenAddr := libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0") + + h, err := libp2p.New(listenAddr) + require.NoError(t, err) + + t.Cleanup(func() { + if err = h.Close(); err != nil { + t.Logf("unexpected error when closing host: %s", err) + } + }) + + return h +} + +func newClientHost(t testing.TB) host.Host { + h, err := libp2p.New(libp2p.NoListenAddrs) + require.NoError(t, err) + + t.Cleanup(func() { + if err = h.Close(); err != nil { + t.Logf("unexpected error when closing host: %s", err) + } + }) + + return h +} + +func newServerDht(t testing.TB, cfg *Config) *DHT { + h := newServerHost(t) + + if cfg == nil { + cfg = DefaultConfig() + } + cfg.Mode = ModeOptServer + + d, err := New(h, cfg) + require.NoError(t, err) + + t.Cleanup(func() { + if err = d.Close(); err != nil { + t.Logf("unexpected error when closing dht: %s", err) + } + }) + return d +} + +func newClientDht(t testing.TB, cfg *Config) *DHT { + h := newClientHost(t) + + if cfg == nil { + cfg = DefaultConfig() + } + cfg.Mode = ModeOptClient + d, err := New(h, cfg) + require.NoError(t, err) + + t.Cleanup(func() { + if err = d.Close(); err != nil { + t.Logf("unexpected error when closing dht: %s", err) + } + }) + return d +} diff --git a/v2/router.go b/v2/router.go index bc22ae50..8c40471d 100644 --- a/v2/router.go +++ b/v2/router.go @@ -5,39 +5,126 @@ import ( "fmt" "time" - "github.com/iand/zikade/kademlia" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-msgio" + "github.com/libp2p/go-msgio/pbio" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) -var _ kademlia.Router[key.Key256, ma.Multiaddr] = (*DHT)(nil) +type Router struct { + host host.Host +} + +var _ coord.Router = (*Router)(nil) + +func WriteMsg(s network.Stream, msg protoreflect.ProtoMessage) error { + w := pbio.NewDelimitedWriter(s) + return w.WriteMsg(msg) +} + +func ReadMsg(s network.Stream, msg proto.Message) error { + r := pbio.NewDelimitedReader(s, network.MessageSizeMax) + return r.ReadMsg(msg) +} + +type ProtoKadMessage interface { + proto.Message +} + +type ProtoKadRequestMessage[K kad.Key[K], A kad.Address[A]] interface { + ProtoKadMessage + kad.Request[K, A] +} + +type ProtoKadResponseMessage[K kad.Key[K], A kad.Address[A]] interface { + ProtoKadMessage + kad.Response[K, A] +} + +func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { + if err := r.AddNodeInfo(ctx, to, time.Hour); err != nil { + return nil, fmt.Errorf("add node info: %w", err) + } + + // TODO: what to do with addresses in peer.AddrInfo? + if len(r.host.Peerstore().Addrs(to.ID)) == 0 { + return nil, fmt.Errorf("aaah ProtoKadMessage") + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + var err error -func (d *DHT) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], protoID address.ProtocolID, req kad.Request[key.Key256, ma.Multiaddr]) (kad.Response[key.Key256, ma.Multiaddr], error) { - s, err := d.host.NewStream(ctx, peer.ID(to.ID().(kadt.PeerID)), d.cfg.ProtocolID) + var s network.Stream + s, err = r.host.NewStream(ctx, to.ID, protocol.ID(protoID)) if err != nil { - return nil, fmt.Errorf("new stream: %w", err) + return nil, fmt.Errorf("stream creation: %w", err) } - defer d.logErr(s.Close(), "failed to close stream") + defer s.Close() - return nil, nil + w := pbio.NewDelimitedWriter(s) + reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + + err = w.WriteMsg(req) + if err != nil { + return nil, fmt.Errorf("write message: %w", err) + } + + data, err := reader.ReadMsg() + if err != nil { + return nil, fmt.Errorf("read message: %w", err) + } + protoResp := pb.Message{} + if err = proto.Unmarshal(data, &protoResp); err != nil { + return nil, err + } + + for _, info := range protoResp.CloserPeersAddrInfos() { + _ = r.AddNodeInfo(ctx, info, time.Hour) + } + + return &protoResp, err } -func (d *DHT) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { - // TODO implement me - panic("implement me") +func (r *Router) AddNodeInfo(ctx context.Context, ai peer.AddrInfo, ttl time.Duration) error { + // Don't add addresses for self or our connected peers. We have better ones. + if ai.ID == r.host.ID() || r.host.Network().Connectedness(ai.ID) == network.Connected { + return nil + } + + r.host.Peerstore().AddAddrs(ai.ID, ai.Addrs, ttl) + return nil } -func (d *DHT) GetNodeInfo(ctx context.Context, id kad.NodeID[key.Key256]) (kad.NodeInfo[key.Key256, ma.Multiaddr], error) { - // TODO implement me - panic("implement me") +func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { + return r.host.Peerstore().PeerInfo(id), nil } -func (d *DHT) GetClosestNodes(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], target key.Key256) ([]kad.NodeInfo[key.Key256, ma.Multiaddr], error) { - // TODO implement me - panic("implement me") +func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) { + resp, err := r.SendMessage(ctx, to, address.ProtocolID(ProtocolIPFS), FindKeyRequest(target)) + if err != nil { + return nil, err + } + + return resp.CloserPeersAddrInfos(), nil +} + +func FindKeyRequest(k key.Key256) *pb.Message { + marshalledKey, _ := k.MarshalBinary() + return &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: marshalledKey, + } } diff --git a/v2/routing_test.go b/v2/router_test.go similarity index 100% rename from v2/routing_test.go rename to v2/router_test.go diff --git a/v2/routing.go b/v2/routing.go index 6107a30d..cc72f849 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -6,8 +6,11 @@ import ( "fmt" "time" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/network" @@ -15,15 +18,16 @@ import ( "github.com/libp2p/go-libp2p/core/routing" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" ) var _ routing.Routing = (*DHT)(nil) func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { - ctx, span := tracer.Start(ctx, "DHT.FindPeer") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.FindPeer") defer span.End() - // First check locally. If are or were recently connected to the peer, + // First check locally. If we are or were recently connected to the peer, // return the addresses from our peerstore unless the information doesn't // contain any. switch d.host.Network().Connectedness(id) { @@ -36,13 +40,35 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { // we're } - // TODO reach out to Zikade + target := kadt.PeerID(id) - panic("implement me") + var foundNode coord.Node + fn := func(ctx context.Context, node coord.Node, stats coord.QueryStats) error { + slog.Info("visiting node", "id", node.ID()) + if node.ID() == id { + foundNode = node + return coord.ErrSkipRemaining + } + return nil + } + + _, err := d.kad.Query(ctx, target.Key(), fn) + if err != nil { + return peer.AddrInfo{}, fmt.Errorf("failed to run query: %w", err) + } + + if foundNode == nil { + return peer.AddrInfo{}, fmt.Errorf("peer record not found") + } + + return peer.AddrInfo{ + ID: foundNode.ID(), + Addrs: foundNode.Addresses(), + }, nil } func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { - ctx, span := tracer.Start(ctx, "DHT.Provide", otel.WithAttributes(attribute.String("cid", c.String()))) + ctx, span := d.tele.Tracer.Start(ctx, "DHT.Provide", otel.WithAttributes(attribute.String("cid", c.String()))) defer span.End() // verify if this DHT supports provider records by checking if a "providers" @@ -73,7 +99,7 @@ func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { } func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { - ctx, span := tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) + _, span := d.tele.Tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) defer span.End() // verify if this DHT supports provider records by checking if a "providers" @@ -90,7 +116,7 @@ func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-ch } func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ...routing.Option) error { - ctx, span := tracer.Start(ctx, "DHT.PutValue") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.PutValue") defer span.End() ns, path, err := record.SplitKey(key) @@ -116,7 +142,7 @@ func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ... } func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option) ([]byte, error) { - ctx, span := tracer.Start(ctx, "DHT.GetValue") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.GetValue") defer span.End() ns, path, err := record.SplitKey(key) @@ -147,14 +173,14 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option } func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { - ctx, span := tracer.Start(ctx, "DHT.SearchValue") + _, span := d.tele.Tracer.Start(ctx, "DHT.SearchValue") defer span.End() panic("implement me") } func (d *DHT) Bootstrap(ctx context.Context) error { - ctx, span := tracer.Start(ctx, "DHT.Bootstrap") + _, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") defer span.End() panic("implement me") diff --git a/v2/stream.go b/v2/stream.go index a05bc0f0..ea8c3a8b 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -8,20 +8,19 @@ import ( "fmt" "io" "sync" - "time" - - "google.golang.org/protobuf/proto" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio" - "github.com/libp2p/go-msgio/protoio" - "go.opencensus.io/stats" - "go.opencensus.io/tag" + "github.com/libp2p/go-msgio/pbio" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "google.golang.org/protobuf/proto" - "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" - pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // streamHandler is the function that's registered with the libp2p host for @@ -29,20 +28,29 @@ import ( // actually starts handling the stream and depending on the outcome resets or // closes it. func (d *DHT) streamHandler(s network.Stream) { - ctx, _ := tag.New(context.Background(), - tag.Upsert(metrics.KeyPeerID, d.host.ID().String()), - tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", d)), - ) + attrs := []attribute.KeyValue{ + tele.AttrPeerID(d.host.ID().String()), + tele.AttrInstanceID(fmt.Sprintf("%p", d)), + } + + // start stream handler span + ctx, span := d.tele.Tracer.Start(context.Background(), "DHT.streamHandler", trace.WithAttributes(attrs...)) + defer span.End() + + // attach attribute to context to make them available to metrics below + ctx = tele.WithAttributes(ctx, attrs...) if err := s.Scope().SetService(ServiceName); err != nil { d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("err", err.Error())) d.logErr(s.Reset(), "failed to reset stream") + span.RecordError(err) return } if err := d.handleNewStream(ctx, s); err != nil { // If we exited with an error, let the remote peer know. d.logErr(s.Reset(), "failed to reset stream") + span.RecordError(err) } else { // If we exited without an error, close gracefully. d.logErr(s.Close(), "failed to close stream") @@ -68,17 +76,19 @@ func (d *DHT) streamHandler(s network.Stream) { // it will return nil indicating the end of the stream or all messages have been // processed correctly. func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { - ctx, span := tracer.Start(ctx, "DHT.handleNewStream") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.handleNewStream") defer span.End() // init structured logger that always contains the remote peers PeerID slogger := d.log.With(slog.String("from", s.Conn().RemotePeer().String())) // reset the stream after it was idle for too long - if err := s.SetDeadline(time.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { + if err := s.SetDeadline(d.cfg.Clock.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { return fmt.Errorf("set initial stream deadline: %w", err) } + // not using pbio because it doesn't support a pooled reader that optimizes + // memory allocations. reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) for { // 1. read message from stream @@ -92,7 +102,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // we have received a message, start the timer to // track inbound request latency - startTime := time.Now() + startTime := d.cfg.Clock.Now() // 2. unmarshal message into something usable req, err := d.streamUnmarshalMsg(ctx, slogger, data) @@ -104,34 +114,37 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { reader.ReleaseMsg(data) // reset stream deadline - if err = s.SetDeadline(time.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { + if err = s.SetDeadline(d.cfg.Clock.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { return fmt.Errorf("reset stream deadline: %w", err) } + ctx = tele.WithAttributes(ctx, + tele.AttrMessageType(req.GetType().String()), + tele.AttrKey(base64.StdEncoding.EncodeToString(req.GetKey())), + ) + // extend metrics context and slogger with message information. // ctx must be overwritten because in the next iteration metrics.KeyMessageType // would already exist and tag.New would return an error. - ctx, _ := tag.New(ctx, tag.Upsert(metrics.KeyMessageType, req.GetType().String())) slogger = slogger.With( slog.String("type", req.GetType().String()), slog.String("key", base64.StdEncoding.EncodeToString(req.GetKey())), ) // track message metrics - stats.Record(ctx, - metrics.ReceivedMessages.M(1), - metrics.ReceivedBytes.M(int64(len(data))), - ) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx)) + d.tele.ReceivedMessages.Add(ctx, 1, mattrs) + d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) // 3. handle the message and gather response slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") resp, err := d.handleMsg(ctx, s.Conn().RemotePeer(), req) if err != nil { - slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", time.Since(startTime)), slog.String("error", err.Error())) - stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", d.cfg.Clock.Since(startTime)), slog.String("error", err.Error())) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) return err } - slogger.LogAttrs(ctx, slog.LevelDebug, "handled message", slog.Duration("time", time.Since(startTime))) + slogger.LogAttrs(ctx, slog.LevelDebug, "handled message", slog.Duration("time", d.cfg.Clock.Since(startTime))) // if the handler didn't return a response, continue reading the stream if resp == nil { @@ -145,9 +158,9 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { } // final logging, metrics tracking - latency := time.Since(startTime) + latency := d.cfg.Clock.Since(startTime) slogger.LogAttrs(ctx, slog.LevelDebug, "responded to message", slog.Duration("time", latency)) - stats.Record(ctx, metrics.InboundRequestLatency.M(float64(latency.Milliseconds()))) + d.tele.InboundRequestLatency.Record(ctx, float64(latency.Milliseconds()), mattrs) } } @@ -155,7 +168,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // corresponding bytes. If an error occurs it, logs it, and updates the metrics. // If the bytes are empty and the error is nil, the remote peer returned func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.Reader) ([]byte, error) { - ctx, span := tracer.Start(ctx, "DHT.streamReadMsg") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.streamReadMsg") defer span.End() data, err := r.ReadMsg() @@ -167,12 +180,10 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // record any potential partial message we have received if len(data) > 0 { - _ = stats.RecordWithTags(ctx, - []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, - metrics.ReceivedMessages.M(1), - metrics.ReceivedMessageErrors.M(1), - metrics.ReceivedBytes.M(int64(len(data))), - ) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx, tele.AttrMessageType("UNKNOWN"))) + d.tele.ReceivedMessages.Add(ctx, 1, mattrs) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) + d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) } return nil, err @@ -185,19 +196,16 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // protobuf message. If an error occurs, it will be logged and the metrics will // be updated. func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data []byte) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.streamUnmarshalMsg") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.streamUnmarshalMsg") defer span.End() var req pb.Message if err := proto.Unmarshal(data, &req); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("err", err.Error())) - _ = stats.RecordWithTags(ctx, - []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, - metrics.ReceivedMessages.M(1), - metrics.ReceivedMessageErrors.M(1), - metrics.ReceivedBytes.M(int64(len(data))), - ) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx, tele.AttrMessageType("UNKNOWN"))) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) + d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) return nil, err } @@ -208,7 +216,7 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data // handleMsg handles the give protobuf message based on its type from the // given remote peer. func (d *DHT) handleMsg(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.handle_"+req.GetType().String()) + ctx, span := d.tele.Tracer.Start(ctx, "DHT.handle_"+req.GetType().String(), trace.WithAttributes(attribute.String("remote_id", remote.String()))) defer span.End() switch req.GetType() { @@ -232,12 +240,13 @@ func (d *DHT) handleMsg(ctx context.Context, remote peer.ID, req *pb.Message) (* // streamWriteMsg sends the given message over the stream and handles traces // and telemetry. func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s network.Stream, msg *pb.Message) error { - ctx, span := tracer.Start(ctx, "DHT.streamWriteMsg") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.streamWriteMsg") defer span.End() if err := writeMsg(s, msg); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error writing response", slog.String("err", err.Error())) - stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx)) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) return err } @@ -249,7 +258,7 @@ func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s networ // packet for every single write. type bufferedDelimitedWriter struct { *bufio.Writer - protoio.WriteCloser + pbio.WriteCloser } var writerPool = sync.Pool{ @@ -257,7 +266,7 @@ var writerPool = sync.Pool{ w := bufio.NewWriter(nil) return &bufferedDelimitedWriter{ Writer: w, - WriteCloser: protoio.NewDelimitedWriter(w), + WriteCloser: pbio.NewDelimitedWriter(w), } }, } diff --git a/v2/stream_test.go b/v2/stream_test.go index 26e932ee..12f61031 100644 --- a/v2/stream_test.go +++ b/v2/stream_test.go @@ -38,8 +38,7 @@ func (trw testReadWriter) ReadMsg() (*pb.Message, error) { } resp := &pb.Message{} - err = proto.Unmarshal(msg, resp) - return resp, err + return resp, proto.Unmarshal(msg, resp) } func (trw testReadWriter) WriteMsg(msg *pb.Message) error { @@ -64,7 +63,7 @@ func newPeerPair(t testing.TB) (host.Host, *DHT) { cfg.Mode = ModeOptServer serverDHT, err := New(server, cfg) - fillRoutingTable(t, serverDHT) + fillRoutingTable(t, serverDHT, 250) t.Cleanup(func() { if err = serverDHT.Close(); err != nil { diff --git a/v2/tele/tele.go b/v2/tele/tele.go new file mode 100644 index 00000000..a3571ffb --- /dev/null +++ b/v2/tele/tele.go @@ -0,0 +1,234 @@ +package tele + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk/instrumentation" + motel "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/trace" +) + +// ctxKey is an unexported type alias for the value of a context key. This is +// used to attach metric values to a context and get them out of a context. +type ctxKey struct{} + +var ( + meterName = "github.com/libp2p/go-libp2p-kad-dht/v2" + tracerName = "go-libp2p-kad-dht" + + // attrsCtxKey is the actual context key value that's used as a key for + // metric values that are attached to a context. + attrsCtxKey = ctxKey{} +) + +// Telemetry is the struct that holds a reference to all metrics and the tracer. +// Initialize this struct with [New] or [NewWithGlobalProviders]. Make sure +// to also register the [MeterProviderOpts] with your custom or the global +// [metric.MeterProvider]. +// +// To see the documentation for each metric below, check out [New] and the +// metric.WithDescription() calls when initializing each metric. +type Telemetry struct { + Tracer trace.Tracer + ReceivedMessages metric.Int64Counter + ReceivedMessageErrors metric.Int64Counter + ReceivedBytes metric.Int64Histogram + InboundRequestLatency metric.Float64Histogram + OutboundRequestLatency metric.Float64Histogram + SentMessages metric.Int64Counter + SentMessageErrors metric.Int64Counter + SentRequests metric.Int64Counter + SentRequestErrors metric.Int64Counter + SentBytes metric.Int64Histogram + LRUCache metric.Int64Counter + NetworkSize metric.Int64Counter +} + +// NewWithGlobalProviders uses the global meter and tracer providers from +// opentelemetry. Check out the documentation of [MeterProviderOpts] for +// implications of using this constructor. +func NewWithGlobalProviders() (*Telemetry, error) { + return New(otel.GetMeterProvider(), otel.GetTracerProvider()) +} + +// New initializes a Telemetry struct with the given meter and tracer providers. +// It constructs the different metric counters and histograms. The histograms +// have custom boundaries. Therefore, the given [metric.MeterProvider] should +// have the custom view registered that [MeterProviderOpts] returns. +func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider) (*Telemetry, error) { + var err error + + if meterProvider == nil { + meterProvider = otel.GetMeterProvider() + } + + if tracerProvider == nil { + tracerProvider = otel.GetTracerProvider() + } + + t := &Telemetry{ + Tracer: tracerProvider.Tracer(tracerName), + } + + meter := meterProvider.Meter(meterName) + t.ReceivedMessages, err = meter.Int64Counter("received_messages", metric.WithDescription("Total number of messages received per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("received_messages counter: %w", err) + } + + t.ReceivedMessageErrors, err = meter.Int64Counter("received_message_errors", metric.WithDescription("Total number of errors for messages received per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("received_message_errors counter: %w", err) + } + + t.ReceivedBytes, err = meter.Int64Histogram("received_bytes", metric.WithDescription("Total received bytes per RPC"), metric.WithUnit("By")) + if err != nil { + return nil, fmt.Errorf("received_bytes histogram: %w", err) + } + + t.InboundRequestLatency, err = meter.Float64Histogram("inbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("inbound_request_latency histogram: %w", err) + } + + t.OutboundRequestLatency, err = meter.Float64Histogram("outbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("outbound_request_latency histogram: %w", err) + } + + t.SentMessages, err = meter.Int64Counter("sent_messages", metric.WithDescription("Total number of messages sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_messages counter: %w", err) + } + + t.SentMessageErrors, err = meter.Int64Counter("sent_message_errors", metric.WithDescription("Total number of errors for messages sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_message_errors counter: %w", err) + } + + t.SentRequests, err = meter.Int64Counter("sent_requests", metric.WithDescription("Total number of requests sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_requests counter: %w", err) + } + + t.SentRequestErrors, err = meter.Int64Counter("sent_request_errors", metric.WithDescription("Total number of errors for requests sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_request_errors counter: %w", err) + } + + t.SentBytes, err = meter.Int64Histogram("sent_bytes", metric.WithDescription("Total sent bytes per RPC"), metric.WithUnit("By")) + if err != nil { + return nil, fmt.Errorf("sent_bytes histogram: %w", err) + } + + t.LRUCache, err = meter.Int64Counter("lru_cache", metric.WithDescription("Cache hit or miss counter"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("lru_cache counter: %w", err) + } + + t.NetworkSize, err = meter.Int64Counter("network_size", metric.WithDescription("Network size estimation"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("network_size counter: %w", err) + } + + return t, nil +} + +// MeterProviderOpts is a method that returns metric options. Make sure +// to register these options to your [metric.MeterProvider]. Unfortunately, +// attaching these options to an already existing [metric.MeterProvider] +// is not possible. Therefore, you can't just register the options with the +// global MeterProvider that is returned by [otel.GetMeterProvider]. +// One example to register a new [metric.MeterProvider] would be: +// +// provider := metric.NewMeterProvider(tele.MeterProviderOpts()...) // <-- also add your options, like a metric reader +// otel.SetMeterProvider(provider) +// +// Then you can use [NewWithGlobalProviders] and it will use a correctly +// configured meter provider. +// +// The options that MeterProviderOpts returns are just custom histogram +// boundaries for a few metrics. In the future, we could reconsider these +// boundaries because we just blindly ported them from v1 to v2 of +// go-libp2p-kad-dht. +var MeterProviderOpts = []motel.Option{ + motel.WithView(motel.NewView( + motel.Instrument{Name: "*_bytes", Scope: instrumentation.Scope{Name: meterName}}, + motel.Stream{ + Aggregation: motel.AggregationExplicitBucketHistogram{ + Boundaries: []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}, + }, + }, + )), + motel.WithView(motel.NewView( + motel.Instrument{Name: "*_request_latency", Scope: instrumentation.Scope{Name: meterName}}, + motel.Stream{ + Aggregation: motel.AggregationExplicitBucketHistogram{ + Boundaries: []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}, + }, + }, + )), +} + +// AttrInstanceID identifies a dht instance by the pointer address. +// Useful for differentiating between different DHTs that have the same peer id. +func AttrInstanceID(instanceID string) attribute.KeyValue { + return attribute.String("instance_id", instanceID) +} + +func AttrPeerID(pid string) attribute.KeyValue { + return attribute.String("peer_id", pid) +} + +func AttrCacheHit(hit bool) attribute.KeyValue { + return attribute.Bool("hit", hit) +} + +// AttrRecordType is currently only used for the provider backend LRU cache +func AttrRecordType(val string) attribute.KeyValue { + return attribute.String("record_type", val) +} + +func AttrMessageType(val string) attribute.KeyValue { + return attribute.String("message_type", val) +} + +func AttrKey(val string) attribute.KeyValue { + return attribute.String("key", val) +} + +// WithAttributes is a function that attaches the provided attributes to the +// given context. The given attributes will overwrite any already existing ones. +func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Context { + set := attribute.NewSet(attrs...) + val := ctx.Value(attrsCtxKey) + if val != nil { + existing, ok := val.(attribute.Set) + if ok { + set = attribute.NewSet(append(existing.ToSlice(), attrs...)...) + } + } + return context.WithValue(ctx, attrsCtxKey, set) +} + +// FromContext returns the attributes that were previously associated with the +// given context via [WithAttributes] plus any attributes that are also passed +// into this function. The given attributes will take precedence over any +// attributes stored in the context. +func FromContext(ctx context.Context, attrs ...attribute.KeyValue) attribute.Set { + val := ctx.Value(attrsCtxKey) + if val == nil { + return attribute.NewSet(attrs...) + } + + set, ok := val.(attribute.Set) + if !ok { + return attribute.NewSet(attrs...) + } + + return attribute.NewSet(append(set.ToSlice(), attrs...)...) +} diff --git a/v2/tele/tele_test.go b/v2/tele/tele_test.go new file mode 100644 index 00000000..d7c98d93 --- /dev/null +++ b/v2/tele/tele_test.go @@ -0,0 +1,41 @@ +package tele + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" +) + +func TestWithAttributes(t *testing.T) { + ctx := context.Background() + + set := FromContext(ctx) + assert.Equal(t, 0, set.Len()) + + ctx = WithAttributes(ctx, attribute.Int("A", 1)) + ctx = WithAttributes(ctx, attribute.Int("B", 1)) + ctx = WithAttributes(ctx, attribute.Int("A", 1)) + ctx = WithAttributes(ctx, attribute.Int("B", 2)) + ctx = WithAttributes(ctx, attribute.Int("C", 1)) + + set = FromContext(ctx, attribute.Int("A", 2)) + + val, found := set.Value("A") + require.True(t, found) + assert.EqualValues(t, 2, val.AsInt64()) + + val, found = set.Value("B") + require.True(t, found) + assert.EqualValues(t, 2, val.AsInt64()) + + val, found = set.Value("C") + require.True(t, found) + assert.EqualValues(t, 1, val.AsInt64()) + + ctx = context.WithValue(ctx, attrsCtxKey, "not an attribute set") + set = FromContext(ctx) + assert.Equal(t, 0, set.Len()) +} From 722e958dc1e98a1a3e42246fb1da24ffbc04839b Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Fri, 8 Sep 2023 12:39:19 +0100 Subject: [PATCH 41/64] Simplify usage of kadtest.CtxShort --- v2/coord/coordinator_test.go | 12 ++++-------- v2/coord/network_test.go | 3 +-- v2/coord/routing_test.go | 21 +++++++-------------- v2/dht_test.go | 3 +-- v2/internal/kadtest/context.go | 15 ++++++++------- 5 files changed, 21 insertions(+), 33 deletions(-) diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index 235828cc..ec62bedd 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -158,8 +158,7 @@ func TestConfigValidate(t *testing.T) { } func TestExhaustiveQuery(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -198,8 +197,7 @@ func TestExhaustiveQuery(t *testing.T) { } func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -261,8 +259,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { } func TestBootstrap(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -315,8 +312,7 @@ func TestBootstrap(t *testing.T) { } func TestIncludeNode(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) diff --git a/v2/coord/network_test.go b/v2/coord/network_test.go index ad0f3146..4d2ca5b5 100644 --- a/v2/coord/network_test.go +++ b/v2/coord/network_test.go @@ -15,8 +15,7 @@ import ( // TODO: this is just a basic is-it-working test that needs to be improved func TestGetClosestNodes(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index e1342c7c..b3375b57 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -23,8 +23,7 @@ import ( ) func TestRoutingStartBootstrapSendsEvent(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -62,8 +61,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { } func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -96,8 +94,7 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { } func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -131,8 +128,7 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { } func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -162,8 +158,7 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { } func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -197,8 +192,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { } func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) @@ -233,8 +227,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { } func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) diff --git a/v2/dht_test.go b/v2/dht_test.go index b42b77b5..29993a58 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -92,8 +92,7 @@ func expectEventType(t *testing.T, ctx context.Context, events <-chan coord.Rout } func TestAddAddresses(t *testing.T) { - ctx, cancel := kadtest.CtxShort(t) - defer cancel() + ctx := kadtest.CtxShort(t) localCfg := DefaultConfig() diff --git a/v2/internal/kadtest/context.go b/v2/internal/kadtest/context.go index 1ef31f40..41623c08 100644 --- a/v2/internal/kadtest/context.go +++ b/v2/internal/kadtest/context.go @@ -6,12 +6,11 @@ import ( "time" ) -// CtxShort returns a Context and a CancelFunc. The context will be -// cancelled after 10 seconds or just before the test binary deadline (as -// specified by the -timeout flag when running the test), whichever is -// sooner. The CancelFunc may be called to cancel the context earlier than -// the deadline. -func CtxShort(t *testing.T) (context.Context, context.CancelFunc) { +// CtxShort returns a Context for tests that are expected to complete quickly. +// The context will be cancelled after 10 seconds or just before the test +// binary deadline (as specified by the -timeout flag when running the test), whichever +// is sooner. +func CtxShort(t *testing.T) context.Context { t.Helper() timeout := 10 * time.Second @@ -27,5 +26,7 @@ func CtxShort(t *testing.T) (context.Context, context.CancelFunc) { } } - return context.WithDeadline(context.Background(), deadline) + ctx, cancel := context.WithDeadline(context.Background(), deadline) + t.Cleanup(cancel) + return ctx } From 7d838c50becd11de627b499f750456f409163ce6 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 11 Sep 2023 16:02:37 +0100 Subject: [PATCH 42/64] Give dht and coordinator their own telemetry instances (#891) * Give dht and coordinator their own telemetry instances * Fix some references to old code --- v2/backend_provider.go | 4 +- v2/backend_record.go | 6 +- v2/config.go | 7 +- v2/coord/coordinator.go | 64 ++++++++++------- v2/coord/coordinator_test.go | 50 ++++++------- v2/coord/telemetry.go | 28 ++++++++ v2/dht.go | 13 ++-- v2/tele/tele.go | 133 +++-------------------------------- v2/telemetry.go | 127 +++++++++++++++++++++++++++++++++ 9 files changed, 235 insertions(+), 197 deletions(-) create mode 100644 v2/coord/telemetry.go create mode 100644 v2/telemetry.go diff --git a/v2/backend_provider.go b/v2/backend_provider.go index aee03322..1ddd764f 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -92,7 +92,7 @@ type ProvidersBackendConfig struct { // Tele holds a reference to the telemetry struct to capture metrics and // traces. - Tele *tele.Telemetry + Tele *Telemetry // AddressFilter is a filter function that any addresses that we attempt to // store or fetch from the peerstore's address book need to pass through. @@ -106,7 +106,7 @@ type ProvidersBackendConfig struct { // configuration is passed to [NewBackendProvider], this default configuration // here is used. func DefaultProviderBackendConfig() (*ProvidersBackendConfig, error) { - telemetry, err := tele.NewWithGlobalProviders() + telemetry, err := NewWithGlobalProviders() if err != nil { return nil, fmt.Errorf("new telemetry: %w", err) } diff --git a/v2/backend_record.go b/v2/backend_record.go index 9655d2b7..ba4a94ba 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -11,8 +11,6 @@ import ( record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "golang.org/x/exp/slog" - - "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) type RecordBackend struct { @@ -29,11 +27,11 @@ type RecordBackendConfig struct { clk clock.Clock MaxRecordAge time.Duration Logger *slog.Logger - Tele *tele.Telemetry + Tele *Telemetry } func DefaultRecordBackendConfig() (*RecordBackendConfig, error) { - telemetry, err := tele.NewWithGlobalProviders() + telemetry, err := NewWithGlobalProviders() if err != nil { return nil, fmt.Errorf("new telemetry: %w", err) } diff --git a/v2/config.go b/v2/config.go index 98136cca..932951d9 100644 --- a/v2/config.go +++ b/v2/config.go @@ -11,7 +11,6 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" - "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing" @@ -21,6 +20,8 @@ import ( "go.opentelemetry.io/otel/trace" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" ) // ServiceName is used to scope incoming streams for the resource manager. @@ -112,7 +113,7 @@ type Config struct { Mode ModeOpt // Kademlia holds the configuration of the underlying Kademlia implementation. - Kademlia *coord.Config + Kademlia *coord.CoordinatorConfig // BucketSize determines the number of closer peers to return BucketSize int @@ -182,7 +183,7 @@ func DefaultConfig() *Config { return &Config{ Clock: clock.New(), Mode: ModeOptAutoClient, - Kademlia: coord.DefaultConfig(), + Kademlia: coord.DefaultCoordinatorConfig(), BucketSize: 20, // MAGIC ProtocolID: ProtocolIPFS, RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index 579701a4..ee5d5011 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -15,13 +15,14 @@ import ( "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" "github.com/plprobelab/go-kademlia/routing" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT @@ -51,6 +52,9 @@ type Coordinator struct { // queryBehaviour is the behaviour responsible for running user-submitted queries queryBehaviour Behaviour[BehaviourEvent, BehaviourEvent] + + // tele provides tracing and metric reporting capabilities + tele *Telemetry } type CoordinatorConfig struct { @@ -64,8 +68,10 @@ type CoordinatorConfig struct { RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight RequestTimeout time.Duration // the timeout queries should use for contacting a single node - Logger *slog.Logger // a structured logger that should be used when logging. - Tele *tele.Telemetry // a struct holding a reference to various metric counters/histograms and a tracer + Logger *slog.Logger // a structured logger that should be used when logging. + + MeterProvider metric.MeterProvider // the meter provider to use when initialising metric instruments + TracerProvider trace.TracerProvider // the tracer provider to use when initialising tracing } // Validate checks the configuration options and returns an error if any have invalid values. @@ -111,22 +117,24 @@ func (cfg *CoordinatorConfig) Validate() error { } } - if cfg.Tele == nil { + if cfg.MeterProvider == nil { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("meter provider must not be nil"), + } + } + + if cfg.TracerProvider == nil { return &kaderr.ConfigurationError{ Component: "CoordinatorConfig", - Err: fmt.Errorf("telemetry must not be nil"), + Err: fmt.Errorf("tracer provider must not be nil"), } } return nil } -func DefaultCoordinatorConfig() (*CoordinatorConfig, error) { - telemetry, err := tele.NewWithGlobalProviders() - if err != nil { - return nil, fmt.Errorf("new telemetry: %w", err) - } - +func DefaultCoordinatorConfig() *CoordinatorConfig { return &CoordinatorConfig{ Clock: clock.New(), PeerstoreTTL: 10 * time.Minute, @@ -135,21 +143,24 @@ func DefaultCoordinatorConfig() (*CoordinatorConfig, error) { RequestConcurrency: 3, RequestTimeout: time.Minute, Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), - Tele: telemetry, - }, nil + MeterProvider: otel.GetMeterProvider(), + TracerProvider: otel.GetTracerProvider(), + } } func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, kad.NodeID[KadKey]], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { - c, err := DefaultCoordinatorConfig() - if err != nil { - return nil, fmt.Errorf("default config: %w", err) - } - cfg = c + cfg = DefaultCoordinatorConfig() } else if err := cfg.Validate(); err != nil { return nil, err } + // initialize a new telemetry struct + tele, err := NewTelemetry(cfg.MeterProvider, cfg.TracerProvider) + if err != nil { + return nil, fmt.Errorf("init telemetry: %w", err) + } + qpCfg := query.DefaultPoolConfig() qpCfg.Clock = cfg.Clock qpCfg.Concurrency = cfg.QueryConcurrency @@ -161,7 +172,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, if err != nil { return nil, fmt.Errorf("query pool: %w", err) } - queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger, cfg.Tele.Tracer) + queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger, tele.Tracer) bootstrapCfg := routing.DefaultBootstrapConfig[KadKey, ma.Multiaddr]() bootstrapCfg.Clock = cfg.Clock @@ -199,14 +210,15 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, return nil, fmt.Errorf("probe: %w", err) } - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger, cfg.Tele.Tracer) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger, tele.Tracer) - networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, cfg.Tele.Tracer) + networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, tele.Tracer) ctx, cancel := context.WithCancel(context.Background()) d := &Coordinator{ self: self, + tele: tele, cfg: *cfg, rtr: rtr, rt: rt, @@ -248,7 +260,7 @@ func (c *Coordinator) RoutingNotifications() <-chan RoutingNotification { } func (c *Coordinator) eventLoop(ctx context.Context) { - ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.eventLoop") + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.eventLoop") defer span.End() for { var ev BehaviourEvent @@ -272,7 +284,7 @@ func (c *Coordinator) eventLoop(ctx context.Context) { } func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { - ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.dispatchEvent", trace.WithAttributes(attribute.String("event_type", fmt.Sprintf("%T", ev)))) + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.dispatchEvent", trace.WithAttributes(attribute.String("event_type", fmt.Sprintf("%T", ev)))) defer span.End() switch ev := ev.(type) { @@ -335,7 +347,7 @@ func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { // Query traverses the DHT calling fn for each node visited. func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (QueryStats, error) { - ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.Query") + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Query") defer span.End() ctx, cancel := context.WithCancel(ctx) @@ -419,7 +431,7 @@ func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo, ttl time.Duration) error { - ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.AddNodes") + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.AddNodes") defer span.End() for _, ai := range ais { if ai.ID == c.self { @@ -441,7 +453,7 @@ func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo, ttl tim // Bootstrap instructs the dht to begin bootstrapping the routing table. func (c *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { - ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.Bootstrap") + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Bootstrap") defer span.End() c.routingBehaviour.Notify(ctx, &EventStartBootstrap{ // Bootstrap state machine uses the message diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index ec62bedd..694ea7fc 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -17,7 +17,6 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) const peerstoreTTL = 10 * time.Minute @@ -75,8 +74,8 @@ func (w *notificationWatcher) Expect(ctx context.Context, expected RoutingNotifi } // TracingTelemetry may be used to create a Telemetry that traces a test -func TracingTelemetry(t *testing.T) *tele.Telemetry { - telemetry, err := tele.New(otel.GetMeterProvider(), kadtest.JaegerTracerProvider(t)) +func TracingTelemetry(t *testing.T) *Telemetry { + telemetry, err := NewTelemetry(otel.GetMeterProvider(), kadtest.JaegerTracerProvider(t)) if err != nil { t.Fatalf("unexpected error creating telemetry: %v", err) } @@ -86,23 +85,20 @@ func TracingTelemetry(t *testing.T) *tele.Telemetry { func TestConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() require.NoError(t, cfg.Validate()) }) t.Run("clock is not nil", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() cfg.Clock = nil require.Error(t, cfg.Validate()) }) t.Run("query concurrency positive", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() cfg.QueryConcurrency = 0 require.Error(t, cfg.Validate()) @@ -111,8 +107,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("query timeout positive", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() cfg.QueryTimeout = 0 require.Error(t, cfg.Validate()) @@ -121,8 +116,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("request concurrency positive", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() cfg.RequestConcurrency = 0 require.Error(t, cfg.Validate()) @@ -131,8 +125,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("request timeout positive", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() cfg.RequestTimeout = 0 require.Error(t, cfg.Validate()) @@ -141,18 +134,21 @@ func TestConfigValidate(t *testing.T) { }) t.Run("logger not nil", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + cfg := DefaultCoordinatorConfig() cfg.Logger = nil require.Error(t, cfg.Validate()) }) - t.Run("telemetry not nil", func(t *testing.T) { - cfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + t.Run("meter provider not nil", func(t *testing.T) { + cfg := DefaultCoordinatorConfig() + cfg.MeterProvider = nil + require.Error(t, cfg.Validate()) + }) - cfg.Tele = nil + t.Run("tracer provider not nil", func(t *testing.T) { + cfg := DefaultCoordinatorConfig() + cfg.TracerProvider = nil require.Error(t, cfg.Validate()) }) } @@ -163,8 +159,7 @@ func TestExhaustiveQuery(t *testing.T) { clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -203,8 +198,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -265,8 +259,7 @@ func TestBootstrap(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -318,8 +311,7 @@ func TestIncludeNode(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultCoordinatorConfig() - require.NoError(t, err) + ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL diff --git a/v2/coord/telemetry.go b/v2/coord/telemetry.go new file mode 100644 index 00000000..11873503 --- /dev/null +++ b/v2/coord/telemetry.go @@ -0,0 +1,28 @@ +package coord + +import ( + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// Telemetry is the struct that holds a reference to all metrics and the tracer used +// by the coordinator and its components. +// Make sure to also register the [MeterProviderOpts] with your custom or the global +// [metric.MeterProvider]. +type Telemetry struct { + Tracer trace.Tracer + // TODO: define metrics produced by coordinator +} + +// NewTelemetry initializes a Telemetry struct with the given meter and tracer providers. +func NewTelemetry(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider) (*Telemetry, error) { + t := &Telemetry{ + Tracer: tracerProvider.Tracer(tele.TracerName), + } + + // TODO: Initalize metrics produced by the coordinator + + return t, nil +} diff --git a/v2/dht.go b/v2/dht.go index 31787ae5..5191349f 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -20,7 +20,6 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // DHT is an implementation of Kademlia with S/Kademlia modifications. @@ -58,7 +57,7 @@ type DHT struct { sub event.Subscription // tele holds a reference to a telemetry struct - tele *tele.Telemetry + tele *Telemetry } // New constructs a new [DHT] for the given underlying host and with the given @@ -88,7 +87,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // initialize a new telemetry struct - d.tele, err = tele.New(cfg.MeterProvider, cfg.TracerProvider) + d.tele, err = NewTelemetry(cfg.MeterProvider, cfg.TracerProvider) if err != nil { return nil, fmt.Errorf("init telemetry: %w", err) } @@ -152,11 +151,9 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - coordCfg, err := coord.DefaultCoordinatorConfig() - if err != nil { - return nil, fmt.Errorf("new coordinator config: %w", err) - } - coordCfg.Tele = d.tele + coordCfg := coord.DefaultCoordinatorConfig() + coordCfg.MeterProvider = cfg.MeterProvider + coordCfg.TracerProvider = cfg.TracerProvider d.kad, err = coord.NewCoordinator(d.host.ID(), &Router{host: h}, d.rt, coordCfg) if err != nil { diff --git a/v2/tele/tele.go b/v2/tele/tele.go index a3571ffb..5ee01666 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -2,141 +2,24 @@ package tele import ( "context" - "fmt" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/instrumentation" motel "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/trace" ) // ctxKey is an unexported type alias for the value of a context key. This is // used to attach metric values to a context and get them out of a context. type ctxKey struct{} -var ( - meterName = "github.com/libp2p/go-libp2p-kad-dht/v2" - tracerName = "go-libp2p-kad-dht" - - // attrsCtxKey is the actual context key value that's used as a key for - // metric values that are attached to a context. - attrsCtxKey = ctxKey{} +const ( + MeterName = "github.com/libp2p/go-libp2p-kad-dht/v2" + TracerName = "go-libp2p-kad-dht" ) -// Telemetry is the struct that holds a reference to all metrics and the tracer. -// Initialize this struct with [New] or [NewWithGlobalProviders]. Make sure -// to also register the [MeterProviderOpts] with your custom or the global -// [metric.MeterProvider]. -// -// To see the documentation for each metric below, check out [New] and the -// metric.WithDescription() calls when initializing each metric. -type Telemetry struct { - Tracer trace.Tracer - ReceivedMessages metric.Int64Counter - ReceivedMessageErrors metric.Int64Counter - ReceivedBytes metric.Int64Histogram - InboundRequestLatency metric.Float64Histogram - OutboundRequestLatency metric.Float64Histogram - SentMessages metric.Int64Counter - SentMessageErrors metric.Int64Counter - SentRequests metric.Int64Counter - SentRequestErrors metric.Int64Counter - SentBytes metric.Int64Histogram - LRUCache metric.Int64Counter - NetworkSize metric.Int64Counter -} - -// NewWithGlobalProviders uses the global meter and tracer providers from -// opentelemetry. Check out the documentation of [MeterProviderOpts] for -// implications of using this constructor. -func NewWithGlobalProviders() (*Telemetry, error) { - return New(otel.GetMeterProvider(), otel.GetTracerProvider()) -} - -// New initializes a Telemetry struct with the given meter and tracer providers. -// It constructs the different metric counters and histograms. The histograms -// have custom boundaries. Therefore, the given [metric.MeterProvider] should -// have the custom view registered that [MeterProviderOpts] returns. -func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider) (*Telemetry, error) { - var err error - - if meterProvider == nil { - meterProvider = otel.GetMeterProvider() - } - - if tracerProvider == nil { - tracerProvider = otel.GetTracerProvider() - } - - t := &Telemetry{ - Tracer: tracerProvider.Tracer(tracerName), - } - - meter := meterProvider.Meter(meterName) - t.ReceivedMessages, err = meter.Int64Counter("received_messages", metric.WithDescription("Total number of messages received per RPC"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("received_messages counter: %w", err) - } - - t.ReceivedMessageErrors, err = meter.Int64Counter("received_message_errors", metric.WithDescription("Total number of errors for messages received per RPC"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("received_message_errors counter: %w", err) - } - - t.ReceivedBytes, err = meter.Int64Histogram("received_bytes", metric.WithDescription("Total received bytes per RPC"), metric.WithUnit("By")) - if err != nil { - return nil, fmt.Errorf("received_bytes histogram: %w", err) - } - - t.InboundRequestLatency, err = meter.Float64Histogram("inbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) - if err != nil { - return nil, fmt.Errorf("inbound_request_latency histogram: %w", err) - } - - t.OutboundRequestLatency, err = meter.Float64Histogram("outbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) - if err != nil { - return nil, fmt.Errorf("outbound_request_latency histogram: %w", err) - } - - t.SentMessages, err = meter.Int64Counter("sent_messages", metric.WithDescription("Total number of messages sent per RPC"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("sent_messages counter: %w", err) - } - - t.SentMessageErrors, err = meter.Int64Counter("sent_message_errors", metric.WithDescription("Total number of errors for messages sent per RPC"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("sent_message_errors counter: %w", err) - } - - t.SentRequests, err = meter.Int64Counter("sent_requests", metric.WithDescription("Total number of requests sent per RPC"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("sent_requests counter: %w", err) - } - - t.SentRequestErrors, err = meter.Int64Counter("sent_request_errors", metric.WithDescription("Total number of errors for requests sent per RPC"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("sent_request_errors counter: %w", err) - } - - t.SentBytes, err = meter.Int64Histogram("sent_bytes", metric.WithDescription("Total sent bytes per RPC"), metric.WithUnit("By")) - if err != nil { - return nil, fmt.Errorf("sent_bytes histogram: %w", err) - } - - t.LRUCache, err = meter.Int64Counter("lru_cache", metric.WithDescription("Cache hit or miss counter"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("lru_cache counter: %w", err) - } - - t.NetworkSize, err = meter.Int64Counter("network_size", metric.WithDescription("Network size estimation"), metric.WithUnit("1")) - if err != nil { - return nil, fmt.Errorf("network_size counter: %w", err) - } - - return t, nil -} +// attrsCtxKey is the actual context key value that's used as a key for +// metric values that are attached to a context. +var attrsCtxKey = ctxKey{} // MeterProviderOpts is a method that returns metric options. Make sure // to register these options to your [metric.MeterProvider]. Unfortunately, @@ -157,7 +40,7 @@ func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider // go-libp2p-kad-dht. var MeterProviderOpts = []motel.Option{ motel.WithView(motel.NewView( - motel.Instrument{Name: "*_bytes", Scope: instrumentation.Scope{Name: meterName}}, + motel.Instrument{Name: "*_bytes", Scope: instrumentation.Scope{Name: MeterName}}, motel.Stream{ Aggregation: motel.AggregationExplicitBucketHistogram{ Boundaries: []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}, @@ -165,7 +48,7 @@ var MeterProviderOpts = []motel.Option{ }, )), motel.WithView(motel.NewView( - motel.Instrument{Name: "*_request_latency", Scope: instrumentation.Scope{Name: meterName}}, + motel.Instrument{Name: "*_request_latency", Scope: instrumentation.Scope{Name: MeterName}}, motel.Stream{ Aggregation: motel.AggregationExplicitBucketHistogram{ Boundaries: []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}, diff --git a/v2/telemetry.go b/v2/telemetry.go new file mode 100644 index 00000000..86e1f3fc --- /dev/null +++ b/v2/telemetry.go @@ -0,0 +1,127 @@ +package dht + +import ( + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// Telemetry is the struct that holds a reference to all metrics and the tracer. +// Initialize this struct with [NewTelemetry]. Make sure +// to also register the [MeterProviderOpts] with your custom or the global +// [metric.MeterProvider]. +// +// To see the documentation for each metric below, check out [NewTelemetry] and the +// metric.WithDescription() calls when initializing each metric. +type Telemetry struct { + Tracer trace.Tracer + ReceivedMessages metric.Int64Counter + ReceivedMessageErrors metric.Int64Counter + ReceivedBytes metric.Int64Histogram + InboundRequestLatency metric.Float64Histogram + OutboundRequestLatency metric.Float64Histogram + SentMessages metric.Int64Counter + SentMessageErrors metric.Int64Counter + SentRequests metric.Int64Counter + SentRequestErrors metric.Int64Counter + SentBytes metric.Int64Histogram + LRUCache metric.Int64Counter + NetworkSize metric.Int64Counter +} + +// NewWithGlobalProviders uses the global meter and tracer providers from +// opentelemetry. Check out the documentation of [MeterProviderOpts] for +// implications of using this constructor. +func NewWithGlobalProviders() (*Telemetry, error) { + return NewTelemetry(otel.GetMeterProvider(), otel.GetTracerProvider()) +} + +// NewTelemetry initializes a Telemetry struct with the given meter and tracer providers. +// It constructs the different metric counters and histograms. The histograms +// have custom boundaries. Therefore, the given [metric.MeterProvider] should +// have the custom view registered that [MeterProviderOpts] returns. +func NewTelemetry(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider) (*Telemetry, error) { + var err error + + if meterProvider == nil { + meterProvider = otel.GetMeterProvider() + } + + if tracerProvider == nil { + tracerProvider = otel.GetTracerProvider() + } + + t := &Telemetry{ + Tracer: tracerProvider.Tracer(tele.TracerName), + } + + meter := meterProvider.Meter(tele.MeterName) + + // Initalize metrics for the DHT + + t.ReceivedMessages, err = meter.Int64Counter("received_messages", metric.WithDescription("Total number of messages received per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("received_messages counter: %w", err) + } + + t.ReceivedMessageErrors, err = meter.Int64Counter("received_message_errors", metric.WithDescription("Total number of errors for messages received per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("received_message_errors counter: %w", err) + } + + t.ReceivedBytes, err = meter.Int64Histogram("received_bytes", metric.WithDescription("Total received bytes per RPC"), metric.WithUnit("By")) + if err != nil { + return nil, fmt.Errorf("received_bytes histogram: %w", err) + } + + t.InboundRequestLatency, err = meter.Float64Histogram("inbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("inbound_request_latency histogram: %w", err) + } + + t.OutboundRequestLatency, err = meter.Float64Histogram("outbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("outbound_request_latency histogram: %w", err) + } + + t.SentMessages, err = meter.Int64Counter("sent_messages", metric.WithDescription("Total number of messages sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_messages counter: %w", err) + } + + t.SentMessageErrors, err = meter.Int64Counter("sent_message_errors", metric.WithDescription("Total number of errors for messages sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_message_errors counter: %w", err) + } + + t.SentRequests, err = meter.Int64Counter("sent_requests", metric.WithDescription("Total number of requests sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_requests counter: %w", err) + } + + t.SentRequestErrors, err = meter.Int64Counter("sent_request_errors", metric.WithDescription("Total number of errors for requests sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_request_errors counter: %w", err) + } + + t.SentBytes, err = meter.Int64Histogram("sent_bytes", metric.WithDescription("Total sent bytes per RPC"), metric.WithUnit("By")) + if err != nil { + return nil, fmt.Errorf("sent_bytes histogram: %w", err) + } + + t.LRUCache, err = meter.Int64Counter("lru_cache", metric.WithDescription("Cache hit or miss counter"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("lru_cache counter: %w", err) + } + + t.NetworkSize, err = meter.Int64Counter("network_size", metric.WithDescription("Network size estimation"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("network_size counter: %w", err) + } + + return t, nil +} From 77dbff015c9846c3d37ab87de97a6befae66a188 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Wed, 13 Sep 2023 11:24:22 +0100 Subject: [PATCH 43/64] Migrate go-kademlia state machines (#893) * Migrate go-kademlia state machines * Clean up naming of events * Add Node generic parameter * Remove unused helpers * Remove unused fields * Add peer addresses to peerstore * go fmt * Remove some more usages of NodeID fields and args * Replace usage of key.Key256 by kadt.Key * Use kadt.PeerID rather than peer.ID in many places * Remove CloserNodeIDs --- v2/config.go | 13 +- v2/coord/conversion.go | 49 +- v2/coord/coordinator.go | 41 +- v2/coord/coordinator_test.go | 12 +- v2/coord/coretypes.go | 17 +- v2/coord/event.go | 21 +- v2/coord/internal/nettest/layouts.go | 4 +- v2/coord/internal/nettest/routing.go | 4 +- v2/coord/internal/nettest/topology.go | 7 +- v2/coord/internal/tiny/node.go | 31 + v2/coord/network.go | 42 +- v2/coord/query.go | 38 +- v2/coord/query/iter.go | 99 +++ v2/coord/query/iter_test.go | 78 ++ v2/coord/query/node.go | 40 + v2/coord/query/pool.go | 354 ++++++++ v2/coord/query/pool_test.go | 333 ++++++++ v2/coord/query/query.go | 387 +++++++++ v2/coord/query/query_test.go | 1066 +++++++++++++++++++++++++ v2/coord/routing.go | 101 ++- v2/coord/routing/bootstrap.go | 248 ++++++ v2/coord/routing/bootstrap_test.go | 222 +++++ v2/coord/routing/include.go | 289 +++++++ v2/coord/routing/include_test.go | 271 +++++++ v2/coord/routing/probe.go | 508 ++++++++++++ v2/coord/routing/probe_test.go | 841 +++++++++++++++++++ v2/coord/routing_test.go | 60 +- v2/dht.go | 20 +- v2/handlers.go | 5 +- v2/kadt/kadt.go | 14 +- v2/pb/msg.aux.go | 10 +- v2/router.go | 8 +- v2/tele/tele.go | 12 + 33 files changed, 4998 insertions(+), 247 deletions(-) create mode 100644 v2/coord/internal/tiny/node.go create mode 100644 v2/coord/query/iter.go create mode 100644 v2/coord/query/iter_test.go create mode 100644 v2/coord/query/node.go create mode 100644 v2/coord/query/pool.go create mode 100644 v2/coord/query/pool_test.go create mode 100644 v2/coord/query/query.go create mode 100644 v2/coord/query/query_test.go create mode 100644 v2/coord/routing/bootstrap.go create mode 100644 v2/coord/routing/bootstrap_test.go create mode 100644 v2/coord/routing/include.go create mode 100644 v2/coord/routing/include_test.go create mode 100644 v2/coord/routing/probe.go create mode 100644 v2/coord/routing/probe_test.go diff --git a/v2/config.go b/v2/config.go index 932951d9..0bb6150a 100644 --- a/v2/config.go +++ b/v2/config.go @@ -11,9 +11,6 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/routing" "github.com/plprobelab/go-kademlia/routing/triert" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" @@ -22,6 +19,8 @@ import ( "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // ServiceName is used to scope incoming streams for the resource manager. @@ -128,7 +127,7 @@ type Config struct { // [triert.TrieRT] routing table will be used. This field will be nil // in the default configuration because a routing table requires information // about the local node. - RoutingTable routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] + RoutingTable routing.RoutingTableCpl[kadt.Key, kadt.PeerID] // The Backends field holds a map of key namespaces to their corresponding // backend implementation. For example, if we received an IPNS record, the @@ -200,9 +199,9 @@ func DefaultConfig() *Config { // DefaultRoutingTable returns a triert.TrieRT routing table. This routing table // cannot be initialized in [DefaultConfig] because it requires information // about the local peer. -func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]], error) { - rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() - rt, err := triert.New[key.Key256, kad.NodeID[key.Key256]](nodeID, rtCfg) +func DefaultRoutingTable(nodeID kadt.PeerID) (routing.RoutingTableCpl[kadt.Key, kadt.PeerID], error) { + rtCfg := triert.DefaultConfig[kadt.Key, kadt.PeerID]() + rt, err := triert.New[kadt.Key, kadt.PeerID](nodeID, rtCfg) if err != nil { return nil, fmt.Errorf("new trie routing table: %w", err) } diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go index 3a6b0ba8..2cdc3f2e 100644 --- a/v2/coord/conversion.go +++ b/v2/coord/conversion.go @@ -2,61 +2,38 @@ package coord import ( "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -// NodeInfoToAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. -// This function will panic if info.ID() does not return a kadt.PeerID -func NodeInfoToAddrInfo(info kad.NodeInfo[KadKey, ma.Multiaddr]) peer.AddrInfo { - peerID := info.ID().(kadt.PeerID) - return peer.AddrInfo{ - ID: peer.ID(peerID), - Addrs: info.Addresses(), - } -} - -// NodeIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. +// KadPeerIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. // This function will panic if id's underlying type is not kadt.PeerID -func NodeIDToAddrInfo(id kad.NodeID[KadKey]) peer.AddrInfo { +func KadPeerIDToAddrInfo(id kad.NodeID[kadt.Key]) peer.AddrInfo { peerID := id.(kadt.PeerID) return peer.AddrInfo{ ID: peer.ID(peerID), } } -// AddrInfoToNodeID converts a peer.AddrInfo to a kad.NodeID. -func AddrInfoToNodeID(ai peer.AddrInfo) kad.NodeID[KadKey] { +// AddrInfoToKadPeerID converts a peer.AddrInfo to a kad.NodeID. +func AddrInfoToKadPeerID(ai peer.AddrInfo) kadt.PeerID { return kadt.PeerID(ai.ID) } -// SliceOfNodeInfoToSliceOfAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. -// This function will panic if any info.ID() does not return a kadt.PeerID -func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[KadKey, ma.Multiaddr]) []peer.AddrInfo { - peers := make([]peer.AddrInfo, len(infos)) - for i := range infos { - peerID := infos[i].ID().(kadt.PeerID) - peers[i] = peer.AddrInfo{ - ID: peer.ID(peerID), - Addrs: infos[i].Addresses(), - } - } - return peers -} - -// SliceOfPeerIDToSliceOfNodeID converts a slice peer.ID to a slice of kad.NodeID -func SliceOfPeerIDToSliceOfNodeID(peers []peer.ID) []kad.NodeID[KadKey] { - nodes := make([]kad.NodeID[KadKey], len(peers)) +// SliceOfPeerIDToSliceOfKadPeerID converts a slice of peer.ID to a slice of kadt.PeerID +func SliceOfPeerIDToSliceOfKadPeerID(peers []peer.ID) []kadt.PeerID { + nodes := make([]kadt.PeerID, len(peers)) for i := range peers { nodes[i] = kadt.PeerID(peers[i]) } return nodes } -// NodeIDToPeerID converts a kad.NodeID to a peer.ID. -// This function will panic if id's underlying type is not kadt.PeerID -func NodeIDToPeerID(id kad.NodeID[KadKey]) peer.ID { - return peer.ID(id.(kadt.PeerID)) +func SliceOfAddrInfoToSliceOfKadPeerID(ais []peer.AddrInfo) []kadt.PeerID { + nodes := make([]kadt.PeerID, len(ais)) + for i := range ais { + nodes[i] = kadt.PeerID(ais[i].ID) + } + return nodes } diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index ee5d5011..9641c165 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -13,8 +13,6 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/network/address" - "github.com/plprobelab/go-kademlia/query" - "github.com/plprobelab/go-kademlia/routing" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" @@ -22,13 +20,15 @@ import ( "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT type Coordinator struct { // self is the peer id of the system the dht is running on - self peer.ID + self kadt.PeerID // cancel is used to cancel all running goroutines when the coordinator is cleaning up cancel context.CancelFunc @@ -37,7 +37,7 @@ type Coordinator struct { cfg CoordinatorConfig // rt is the routing table used to look up nodes by distance - rt kad.RoutingTable[KadKey, kad.NodeID[KadKey]] + rt kad.RoutingTable[kadt.Key, kadt.PeerID] // rtr is the message router used to send messages rtr Router @@ -148,7 +148,7 @@ func DefaultCoordinatorConfig() *CoordinatorConfig { } } -func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, kad.NodeID[KadKey]], cfg *CoordinatorConfig) (*Coordinator, error) { +func NewCoordinator(self kadt.PeerID, rtr Router, rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { cfg = DefaultCoordinatorConfig() } else if err := cfg.Validate(); err != nil { @@ -168,19 +168,19 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, qpCfg.QueryConcurrency = cfg.RequestConcurrency qpCfg.RequestTimeout = cfg.RequestTimeout - qp, err := query.NewPool[KadKey, ma.Multiaddr](kadt.PeerID(self), qpCfg) + qp, err := query.NewPool[kadt.Key](kadt.PeerID(self), qpCfg) if err != nil { return nil, fmt.Errorf("query pool: %w", err) } queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger, tele.Tracer) - bootstrapCfg := routing.DefaultBootstrapConfig[KadKey, ma.Multiaddr]() + bootstrapCfg := routing.DefaultBootstrapConfig[kadt.Key]() bootstrapCfg.Clock = cfg.Clock bootstrapCfg.Timeout = cfg.QueryTimeout bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency bootstrapCfg.RequestTimeout = cfg.RequestTimeout - bootstrap, err := routing.NewBootstrap[KadKey, ma.Multiaddr](kadt.PeerID(self), bootstrapCfg) + bootstrap, err := routing.NewBootstrap[kadt.Key](kadt.PeerID(self), bootstrapCfg) if err != nil { return nil, fmt.Errorf("bootstrap: %w", err) } @@ -194,7 +194,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, // includeCfg.Concurrency = cfg.IncludeConcurrency // includeCfg.Timeout = cfg.IncludeTimeout - include, err := routing.NewInclude[KadKey, ma.Multiaddr](rt, includeCfg) + include, err := routing.NewInclude[kadt.Key, kadt.PeerID](rt, includeCfg) if err != nil { return nil, fmt.Errorf("include: %w", err) } @@ -205,7 +205,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, // TODO: expose config // probeCfg.Concurrency = cfg.ProbeConcurrency - probe, err := routing.NewProbe[KadKey, ma.Multiaddr](rt, probeCfg) + probe, err := routing.NewProbe[kadt.Key](rt, probeCfg) if err != nil { return nil, fmt.Errorf("probe: %w", err) } @@ -241,13 +241,13 @@ func (c *Coordinator) Close() error { return nil } -func (c *Coordinator) ID() peer.ID { +func (c *Coordinator) ID() kadt.PeerID { return c.self } func (c *Coordinator) Addresses() []ma.Multiaddr { // TODO: return configured listen addresses - info, err := c.rtr.GetNodeInfo(context.TODO(), c.self) + info, err := c.rtr.GetNodeInfo(context.TODO(), peer.ID(c.self)) if err != nil { return nil } @@ -312,7 +312,7 @@ func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { return nil, ErrNodeNotFound } - nh, err := c.networkBehaviour.getNodeHandler(ctx, id) + nh, err := c.networkBehaviour.getNodeHandler(ctx, kadt.PeerID(id)) if err != nil { return nil, err } @@ -320,11 +320,11 @@ func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. -func (c *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { +func (c *Coordinator) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]Node, error) { closest := c.rt.NearestNodes(k, n) nodes := make([]Node, 0, len(closest)) for _, id := range closest { - nh, err := c.networkBehaviour.getNodeHandler(ctx, NodeIDToPeerID(id)) + nh, err := c.networkBehaviour.getNodeHandler(ctx, id) if err != nil { return nil, err } @@ -335,7 +335,7 @@ func (c *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]N // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (c *Coordinator) GetValue(ctx context.Context, k KadKey) (Value, error) { +func (c *Coordinator) GetValue(ctx context.Context, k kadt.Key) (Value, error) { panic("not implemented") } @@ -346,7 +346,7 @@ func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { } // Query traverses the DHT calling fn for each node visited. -func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (QueryStats, error) { +func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) (QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Query") defer span.End() @@ -393,7 +393,7 @@ func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q Success: ev.Stats.Success, Failure: ev.Stats.Failure, } - nh, err := c.networkBehaviour.getNodeHandler(ctx, ev.NodeID) + nh, err := c.networkBehaviour.getNodeHandler(ctx, kadt.PeerID(ev.NodeID)) if err != nil { // ignore unknown node break @@ -430,11 +430,11 @@ func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q // AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. -func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo, ttl time.Duration) error { +func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.AddNodes") defer span.End() for _, ai := range ais { - if ai.ID == c.self { + if ai.ID == peer.ID(c.self) { // skip self continue } @@ -443,7 +443,6 @@ func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo, ttl tim c.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ NodeInfo: ai, - TTL: ttl, }) } diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index 694ea7fc..f6f29837 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -167,7 +167,7 @@ func TestExhaustiveQuery(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) @@ -206,7 +206,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating coordinator: %v", err) @@ -264,7 +264,7 @@ func TestBootstrap(t *testing.T) { ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) @@ -318,7 +318,7 @@ func TestIncludeNode(t *testing.T) { candidate := nodes[len(nodes)-1].NodeInfo // not in nodes[0] routing table - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating dht: %v", err) @@ -331,8 +331,8 @@ func TestIncludeNode(t *testing.T) { w := new(notificationWatcher) w.Watch(t, ctx, d.RoutingNotifications()) - // inject a new node into the dht's includeEvents queue - err = d.AddNodes(ctx, []peer.AddrInfo{candidate}, time.Minute) + // inject a new node + err = d.AddNodes(ctx, []peer.AddrInfo{candidate}) require.NoError(t, err) // the include state machine runs in the background and eventually should add the node to routing table diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index fe72d90f..8da79942 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -5,20 +5,17 @@ import ( "errors" "time" - "github.com/libp2p/go-libp2p-kad-dht/v2/pb" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" -) -// KadKey is a type alias for the type of key that's used with this DHT -// implementation. -type KadKey = key.Key256 + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) // Value is a value that may be stored in the DHT. type Value interface { - Key() KadKey + Key() kadt.Key MarshalBinary() ([]byte, error) } @@ -32,12 +29,12 @@ type Node interface { // GetClosestNodes requests the n closest nodes to the key from the node's // local routing table. The node may return fewer nodes than requested. - GetClosestNodes(ctx context.Context, key KadKey, n int) ([]Node, error) + GetClosestNodes(ctx context.Context, key kadt.Key, n int) ([]Node, error) // GetValue requests that the node return any value associated with the // supplied key. If the node does not have a value for the key it returns // ErrValueNotFound. - GetValue(ctx context.Context, key KadKey) (Value, error) + GetValue(ctx context.Context, key kadt.Key) (Value, error) // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. @@ -89,5 +86,5 @@ type Router interface { // GetClosestNodes attempts to send a request to another node asking it for nodes that it considers to be // closest to the target key. - GetClosestNodes(ctx context.Context, to peer.AddrInfo, target KadKey) ([]peer.AddrInfo, error) + GetClosestNodes(ctx context.Context, to peer.AddrInfo, target kadt.Key) ([]peer.AddrInfo, error) } diff --git a/v2/coord/event.go b/v2/coord/event.go index 4d5790f7..2355e259 100644 --- a/v2/coord/event.go +++ b/v2/coord/event.go @@ -1,13 +1,13 @@ package coord import ( - "time" - "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/network/address" - "github.com/plprobelab/go-kademlia/query" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) type BehaviourEvent interface { @@ -49,7 +49,7 @@ type RoutingNotification interface { type EventStartBootstrap struct { ProtocolID address.ProtocolID - Message kad.Request[KadKey, ma.Multiaddr] + Message kad.Request[kadt.Key, ma.Multiaddr] SeedNodes []peer.ID // TODO: peer.AddrInfo } @@ -59,7 +59,7 @@ func (*EventStartBootstrap) routingCommand() {} type EventOutboundGetCloserNodes struct { QueryID query.QueryID To peer.AddrInfo - Target KadKey + Target kadt.Key Notify Notify[BehaviourEvent] } @@ -69,9 +69,9 @@ func (*EventOutboundGetCloserNodes) networkCommand() {} type EventStartQuery struct { QueryID query.QueryID - Target KadKey + Target kadt.Key ProtocolID address.ProtocolID - Message kad.Request[KadKey, ma.Multiaddr] + Message kad.Request[kadt.Key, ma.Multiaddr] KnownClosestNodes []peer.ID Notify NotifyCloser[BehaviourEvent] } @@ -88,7 +88,6 @@ func (*EventStopQuery) queryCommand() {} type EventAddAddrInfo struct { NodeInfo peer.AddrInfo - TTL time.Duration } func (*EventAddAddrInfo) behaviourEvent() {} @@ -97,7 +96,7 @@ func (*EventAddAddrInfo) routingCommand() {} type EventGetCloserNodesSuccess struct { QueryID query.QueryID To peer.AddrInfo - Target KadKey + Target kadt.Key CloserNodes []peer.AddrInfo } @@ -107,7 +106,7 @@ func (*EventGetCloserNodesSuccess) nodeHandlerResponse() {} type EventGetCloserNodesFailure struct { QueryID query.QueryID To peer.AddrInfo - Target KadKey + Target kadt.Key Err error } @@ -119,7 +118,7 @@ func (*EventGetCloserNodesFailure) nodeHandlerResponse() {} type EventQueryProgressed struct { QueryID query.QueryID NodeID peer.ID - Response kad.Response[KadKey, ma.Multiaddr] + Response kad.Response[kadt.Key, ma.Multiaddr] Stats query.QueryStats } diff --git a/v2/coord/internal/nettest/layouts.go b/v2/coord/internal/nettest/layouts.go index f5236dc1..c90e544b 100644 --- a/v2/coord/internal/nettest/layouts.go +++ b/v2/coord/internal/nettest/layouts.go @@ -6,8 +6,6 @@ import ( "github.com/benbjohnson/clock" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing/simplert" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -38,7 +36,7 @@ func LinearTopology(n int, clk clock.Clock) (*Topology, []*Node, error) { nodes[i] = &Node{ NodeInfo: ai, Router: NewRouter(ai.ID, top), - RoutingTable: simplert.New[key.Key256, kad.NodeID[key.Key256]](kadt.PeerID(ai.ID), 20), + RoutingTable: simplert.New[kadt.Key, kadt.PeerID](kadt.PeerID(ai.ID), 20), } } diff --git a/v2/coord/internal/nettest/routing.go b/v2/coord/internal/nettest/routing.go index e0217052..7553674f 100644 --- a/v2/coord/internal/nettest/routing.go +++ b/v2/coord/internal/nettest/routing.go @@ -72,7 +72,7 @@ func NewRouter(self peer.ID, top *Topology) *Router { } } -func (r *Router) NodeID() kad.NodeID[key.Key256] { +func (r *Router) NodeID() kad.NodeID[kadt.Key] { return kadt.PeerID(r.self) } @@ -158,7 +158,7 @@ func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, er return status.NodeInfo, nil } -func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) { +func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target kadt.Key) ([]peer.AddrInfo, error) { protoID := address.ProtocolID("/test/1.0.0") req := &pb.Message{ diff --git a/v2/coord/internal/nettest/topology.go b/v2/coord/internal/nettest/topology.go index c7aae8d5..61653f23 100644 --- a/v2/coord/internal/nettest/topology.go +++ b/v2/coord/internal/nettest/topology.go @@ -6,18 +6,17 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" - "github.com/plprobelab/go-kademlia/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) type Node struct { NodeInfo peer.AddrInfo Router *Router - RoutingTable routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] + RoutingTable routing.RoutingTableCpl[kadt.Key, kadt.PeerID] } type Topology struct { diff --git a/v2/coord/internal/tiny/node.go b/v2/coord/internal/tiny/node.go new file mode 100644 index 00000000..72c67887 --- /dev/null +++ b/v2/coord/internal/tiny/node.go @@ -0,0 +1,31 @@ +// Package tiny implements Kademlia types suitable for tiny test networks +package tiny + +import ( + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +type Key = key.Key8 + +type Node struct { + key Key +} + +var _ kad.NodeID[Key] = Node{} + +func NewNode(k Key) Node { + return Node{key: k} +} + +func (n Node) Key() Key { + return n.key +} + +func (n Node) Equal(other Node) bool { + return n.key.Compare(other.key) == 0 +} + +func (n Node) String() string { + return key.HexString(n.key) +} diff --git a/v2/coord/network.go b/v2/coord/network.go index eeb05402..d2da896c 100644 --- a/v2/coord/network.go +++ b/v2/coord/network.go @@ -9,10 +9,10 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/query" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -21,7 +21,7 @@ type NetworkBehaviour struct { rtr Router nodeHandlersMu sync.Mutex - nodeHandlers map[peer.ID]*NodeHandler // TODO: garbage collect node handlers + nodeHandlers map[kadt.PeerID]*NodeHandler // TODO: garbage collect node handlers pendingMu sync.Mutex pending []BehaviourEvent @@ -34,9 +34,9 @@ type NetworkBehaviour struct { func NewNetworkBehaviour(rtr Router, logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { b := &NetworkBehaviour{ rtr: rtr, - nodeHandlers: make(map[peer.ID]*NodeHandler), + nodeHandlers: make(map[kadt.PeerID]*NodeHandler), ready: make(chan struct{}, 1), - logger: logger, + logger: logger.With("behaviour", "network"), tracer: tracer, } @@ -53,10 +53,10 @@ func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { switch ev := ev.(type) { case *EventOutboundGetCloserNodes: b.nodeHandlersMu.Lock() - nh, ok := b.nodeHandlers[ev.To.ID] + nh, ok := b.nodeHandlers[kadt.PeerID(ev.To.ID)] if !ok { nh = NewNodeHandler(ev.To, b.rtr, b.logger, b.tracer) - b.nodeHandlers[ev.To.ID] = nh + b.nodeHandlers[kadt.PeerID(ev.To.ID)] = nh } b.nodeHandlersMu.Unlock() nh.Notify(ctx, ev) @@ -100,11 +100,11 @@ func (b *NetworkBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { return nil, false } -func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id peer.ID) (*NodeHandler, error) { +func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id kadt.PeerID) (*NodeHandler, error) { b.nodeHandlersMu.Lock() nh, ok := b.nodeHandlers[id] if !ok || len(nh.Addresses()) == 0 { - info, err := b.rtr.GetNodeInfo(ctx, id) + info, err := b.rtr.GetNodeInfo(ctx, peer.ID(id)) if err != nil { return nil, err } @@ -182,7 +182,7 @@ func (h *NodeHandler) Addresses() []ma.Multiaddr { // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. -func (h *NodeHandler) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { +func (h *NodeHandler) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]Node, error) { ctx, span := h.tracer.Start(ctx, "NodeHandler.GetClosestNodes") defer span.End() w := NewWaiter[BehaviourEvent]() @@ -224,7 +224,7 @@ func (h *NodeHandler) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]N // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (h *NodeHandler) GetValue(ctx context.Context, key KadKey) (Value, error) { +func (h *NodeHandler) GetValue(ctx context.Context, key kadt.Key) (Value, error) { panic("not implemented") } @@ -234,31 +234,19 @@ func (h *NodeHandler) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } -func CloserNodesResponse(k KadKey, nodes []peer.AddrInfo) kad.Response[KadKey, ma.Multiaddr] { - infos := make([]kad.NodeInfo[KadKey, ma.Multiaddr], len(nodes)) - for i := range nodes { - infos[i] = kadt.AddrInfo{Info: nodes[i]} - } - - return &fakeMessage{ - key: k, - infos: infos, - } -} - type fakeMessage struct { - key KadKey - infos []kad.NodeInfo[KadKey, ma.Multiaddr] + key kadt.Key + infos []kad.NodeInfo[kadt.Key, ma.Multiaddr] } -func (r fakeMessage) Target() KadKey { +func (r fakeMessage) Target() kadt.Key { return r.key } -func (r fakeMessage) CloserNodes() []kad.NodeInfo[KadKey, ma.Multiaddr] { +func (r fakeMessage) CloserNodes() []kad.NodeInfo[kadt.Key, ma.Multiaddr] { return r.infos } -func (r fakeMessage) EmptyResponse() kad.Response[KadKey, ma.Multiaddr] { +func (r fakeMessage) EmptyResponse() kad.Response[kadt.Key, ma.Multiaddr] { return &fakeMessage{} } diff --git a/v2/coord/query.go b/v2/coord/query.go index 8ef2bdfc..adbce4b1 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -6,14 +6,14 @@ import ( "sync" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/query" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" ) type PooledQueryBehaviour struct { - pool *query.Pool[KadKey, ma.Multiaddr] + pool *query.Pool[kadt.Key, kadt.PeerID] waiters map[query.QueryID]NotifyCloser[BehaviourEvent] pendingMu sync.Mutex @@ -24,12 +24,12 @@ type PooledQueryBehaviour struct { tracer trace.Tracer } -func NewPooledQueryBehaviour(pool *query.Pool[KadKey, ma.Multiaddr], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { +func NewPooledQueryBehaviour(pool *query.Pool[kadt.Key, kadt.PeerID], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { h := &PooledQueryBehaviour{ pool: pool, waiters: make(map[query.QueryID]NotifyCloser[BehaviourEvent]), ready: make(chan struct{}, 1), - logger: logger, + logger: logger.With("behaviour", "query"), tracer: tracer, } return h @@ -45,12 +45,10 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { var cmd query.PoolEvent switch ev := ev.(type) { case *EventStartQuery: - cmd = &query.EventPoolAddQuery[KadKey, ma.Multiaddr]{ + cmd = &query.EventPoolAddQuery[kadt.Key, kadt.PeerID]{ QueryID: ev.QueryID, Target: ev.Target, - ProtocolID: ev.ProtocolID, - Message: ev.Message, - KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.KnownClosestNodes), + KnownClosestNodes: SliceOfPeerIDToSliceOfKadPeerID(ev.KnownClosestNodes), } if ev.Notify != nil { p.waiters[ev.QueryID] = ev.Notify @@ -71,19 +69,19 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { waiter, ok := p.waiters[ev.QueryID] if ok { waiter.Notify(ctx, &EventQueryProgressed{ - NodeID: ev.To.ID, - QueryID: ev.QueryID, - Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + NodeID: ev.To.ID, + QueryID: ev.QueryID, + // CloserNodes: CloserNodeIDs(ev.CloserNodes), // Stats: stats, }) } - cmd = &query.EventPoolMessageResponse[KadKey, ma.Multiaddr]{ - NodeID: kadt.PeerID(ev.To.ID), - QueryID: ev.QueryID, - Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + cmd = &query.EventPoolFindCloserResponse[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + QueryID: ev.QueryID, + CloserNodes: SliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), } case *EventGetCloserNodesFailure: - cmd = &query.EventPoolMessageFailure[KadKey]{ + cmd = &query.EventPoolFindCloserFailure[kadt.Key, kadt.PeerID]{ NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, Error: ev.Err, @@ -150,11 +148,11 @@ func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEve pstate := p.pool.Advance(ctx, ev) switch st := pstate.(type) { - case *query.StatePoolQueryMessage[KadKey, ma.Multiaddr]: + case *query.StatePoolFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ QueryID: st.QueryID, - To: NodeIDToAddrInfo(st.NodeID), - Target: st.Message.Target(), + To: KadPeerIDToAddrInfo(st.NodeID), + Target: st.Target, Notify: p, }, true case *query.StatePoolWaitingAtCapacity: diff --git a/v2/coord/query/iter.go b/v2/coord/query/iter.go new file mode 100644 index 00000000..52960dea --- /dev/null +++ b/v2/coord/query/iter.go @@ -0,0 +1,99 @@ +package query + +import ( + "context" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/key/trie" +) + +// A NodeIter iterates nodes according to some strategy. +type NodeIter[K kad.Key[K], N kad.NodeID[K]] interface { + // Add adds node information to the iterator + Add(*NodeStatus[K, N]) + + // Find returns the node information corresponding to the given Kademlia key + Find(K) (*NodeStatus[K, N], bool) + + // Each applies fn to each entry in the iterator in order. Each stops and returns true if fn returns true. + // Otherwise Each returns false when there are no further entries. + Each(ctx context.Context, fn func(context.Context, *NodeStatus[K, N]) bool) bool +} + +// A ClosestNodesIter iterates nodes in order of ascending distance from a key. +type ClosestNodesIter[K kad.Key[K], N kad.NodeID[K]] struct { + // target is the key whose distance to a node determines the position of that node in the iterator. + target K + + // nodelist holds the nodes discovered so far, ordered by increasing distance from the target. + nodes *trie.Trie[K, *NodeStatus[K, N]] +} + +// NewClosestNodesIter creates a new ClosestNodesIter +func NewClosestNodesIter[K kad.Key[K], N kad.NodeID[K]](target K) *ClosestNodesIter[K, N] { + return &ClosestNodesIter[K, N]{ + target: target, + nodes: trie.New[K, *NodeStatus[K, N]](), + } +} + +func (iter *ClosestNodesIter[K, N]) Add(ni *NodeStatus[K, N]) { + iter.nodes.Add(ni.NodeID.Key(), ni) +} + +func (iter *ClosestNodesIter[K, N]) Find(k K) (*NodeStatus[K, N], bool) { + found, ni := trie.Find(iter.nodes, k) + return ni, found +} + +func (iter *ClosestNodesIter[K, N]) Each(ctx context.Context, fn func(context.Context, *NodeStatus[K, N]) bool) bool { + // get all the nodes in order of distance from the target + // TODO: turn this into a walk or iterator on trie.Trie + entries := trie.Closest(iter.nodes, iter.target, iter.nodes.Size()) + for _, e := range entries { + ni := e.Data + if fn(ctx, ni) { + return true + } + } + return false +} + +// A SequentialIter iterates nodes in the order they were added to the iterator. +type SequentialIter[K kad.Key[K], N kad.NodeID[K]] struct { + // nodelist holds the nodes discovered so far, ordered by increasing distance from the target. + nodes []*NodeStatus[K, N] +} + +// NewSequentialIter creates a new SequentialIter +func NewSequentialIter[K kad.Key[K], N kad.NodeID[K]]() *SequentialIter[K, N] { + return &SequentialIter[K, N]{ + nodes: make([]*NodeStatus[K, N], 0), + } +} + +func (iter *SequentialIter[K, N]) Add(ni *NodeStatus[K, N]) { + iter.nodes = append(iter.nodes, ni) +} + +// Find returns the node information corresponding to the given Kademlia key. It uses a linear +// search which makes it unsuitable for large numbers of entries. +func (iter *SequentialIter[K, N]) Find(k K) (*NodeStatus[K, N], bool) { + for i := range iter.nodes { + if key.Equal(k, iter.nodes[i].NodeID.Key()) { + return iter.nodes[i], true + } + } + + return nil, false +} + +func (iter *SequentialIter[K, N]) Each(ctx context.Context, fn func(context.Context, *NodeStatus[K, N]) bool) bool { + for _, ns := range iter.nodes { + if fn(ctx, ns) { + return true + } + } + return false +} diff --git a/v2/coord/query/iter_test.go b/v2/coord/query/iter_test.go new file mode 100644 index 00000000..0985182a --- /dev/null +++ b/v2/coord/query/iter_test.go @@ -0,0 +1,78 @@ +package query + +import ( + "context" + "testing" + + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" +) + +var ( + _ NodeIter[tiny.Key, tiny.Node] = (*ClosestNodesIter[tiny.Key, tiny.Node])(nil) + _ NodeIter[tiny.Key, tiny.Node] = (*SequentialIter[tiny.Key, tiny.Node])(nil) +) + +func TestClosestNodesIter(t *testing.T) { + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + // add nodes in "random order" + + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: b}) + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: d}) + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: a}) + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: c}) + + // Each should iterate in order of distance from target + + distances := make([]tiny.Key, 0, 4) + iter.Each(context.Background(), func(ctx context.Context, ns *NodeStatus[tiny.Key, tiny.Node]) bool { + distances = append(distances, target.Xor(ns.NodeID.Key())) + return false + }) + + require.True(t, key.IsSorted(distances)) +} + +func TestSequentialIter(t *testing.T) { + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + iter := NewSequentialIter[tiny.Key, tiny.Node]() + + // add nodes in "random order" + + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: b}) + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: d}) + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: a}) + iter.Add(&NodeStatus[tiny.Key, tiny.Node]{NodeID: c}) + + // Each should iterate in order the nodes were added to the iiterator + + order := make([]tiny.Key, 0, 4) + iter.Each(context.Background(), func(ctx context.Context, ns *NodeStatus[tiny.Key, tiny.Node]) bool { + order = append(order, ns.NodeID.Key()) + return false + }) + + require.Equal(t, 4, len(order)) + require.True(t, key.Equal(order[0], b.Key())) + require.True(t, key.Equal(order[1], d.Key())) + require.True(t, key.Equal(order[2], a.Key())) + require.True(t, key.Equal(order[3], c.Key())) +} diff --git a/v2/coord/query/node.go b/v2/coord/query/node.go new file mode 100644 index 00000000..7540acef --- /dev/null +++ b/v2/coord/query/node.go @@ -0,0 +1,40 @@ +package query + +import ( + "time" + + "github.com/plprobelab/go-kademlia/kad" +) + +type NodeStatus[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N + State NodeState +} + +type NodeState interface { + nodeState() +} + +// StateNodeNotContacted indicates that the node has not been contacted yet. +type StateNodeNotContacted struct{} + +// StateNodeWaiting indicates that a query is waiting for a response from the node. +type StateNodeWaiting struct { + Deadline time.Time +} + +// StateNodeUnresponsive indicates that the node did not respond within the configured timeout. +type StateNodeUnresponsive struct{} + +// StateNodeFailed indicates that the attempt to contact the node failed. +type StateNodeFailed struct{} + +// StateNodeSucceeded indicates that the attempt to contact the node succeeded. +type StateNodeSucceeded struct{} + +// nodeState() ensures that only node states can be assigned to a nodeState interface. +func (*StateNodeNotContacted) nodeState() {} +func (*StateNodeWaiting) nodeState() {} +func (*StateNodeUnresponsive) nodeState() {} +func (*StateNodeFailed) nodeState() {} +func (*StateNodeSucceeded) nodeState() {} diff --git a/v2/coord/query/pool.go b/v2/coord/query/pool.go new file mode 100644 index 00000000..2fffc706 --- /dev/null +++ b/v2/coord/query/pool.go @@ -0,0 +1,354 @@ +package query + +import ( + "context" + "fmt" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +type Pool[K kad.Key[K], N kad.NodeID[K]] struct { + // self is the node id of the system the pool is running on + self N + queries []*Query[K, N] + queryIndex map[QueryID]*Query[K, N] + + // cfg is a copy of the optional configuration supplied to the pool + cfg PoolConfig + + // queriesInFlight is number of queries that are waiting for message responses + queriesInFlight int +} + +// PoolConfig specifies optional configuration for a Pool +type PoolConfig struct { + Concurrency int // the maximum number of queries that may be waiting for message responses at any one time + Timeout time.Duration // the time to wait before terminating a query that is not making progress + Replication int // the 'k' parameter defined by Kademlia + QueryConcurrency int // the maximum number of concurrent requests that each query may have in flight + RequestTimeout time.Duration // the timeout queries should use for contacting a single node + Clock clock.Clock // a clock that may replaced by a mock when testing +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *PoolConfig) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "PoolConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + if cfg.Concurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "PoolConfig", + Err: fmt.Errorf("concurrency must be greater than zero"), + } + } + if cfg.Timeout < 1 { + return &kaderr.ConfigurationError{ + Component: "PoolConfig", + Err: fmt.Errorf("timeout must be greater than zero"), + } + } + if cfg.Replication < 1 { + return &kaderr.ConfigurationError{ + Component: "PoolConfig", + Err: fmt.Errorf("replication must be greater than zero"), + } + } + + if cfg.QueryConcurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "PoolConfig", + Err: fmt.Errorf("query concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "PoolConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + + return nil +} + +// DefaultPoolConfig returns the default configuration options for a Pool. +// Options may be overridden before passing to NewPool +func DefaultPoolConfig() *PoolConfig { + return &PoolConfig{ + Clock: clock.New(), // use standard time + Concurrency: 3, + Timeout: 5 * time.Minute, + Replication: 20, + QueryConcurrency: 3, + RequestTimeout: time.Minute, + } +} + +func NewPool[K kad.Key[K], N kad.NodeID[K]](self N, cfg *PoolConfig) (*Pool[K, N], error) { + if cfg == nil { + cfg = DefaultPoolConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + return &Pool[K, N]{ + self: self, + cfg: *cfg, + queries: make([]*Query[K, N], 0), + queryIndex: make(map[QueryID]*Query[K, N]), + }, nil +} + +// Advance advances the state of the pool by attempting to advance one of its queries +func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { + ctx, span := tele.StartSpan(ctx, "Pool.Advance") + defer span.End() + + // reset the in flight counter so it can be calculated as the queries are advanced + p.queriesInFlight = 0 + + // eventQueryID keeps track of a query that was advanced via a specific event, to avoid it + // being advanced twice + eventQueryID := InvalidQueryID + + switch tev := ev.(type) { + case *EventPoolAddQuery[K, N]: + p.addQuery(ctx, tev.QueryID, tev.Target, tev.KnownClosestNodes) + // TODO: return error as state + case *EventPoolStopQuery: + if qry, ok := p.queryIndex[tev.QueryID]; ok { + state, terminal := p.advanceQuery(ctx, qry, &EventQueryCancel{}) + if terminal { + return state + } + eventQueryID = qry.id + } + case *EventPoolFindCloserResponse[K, N]: + if qry, ok := p.queryIndex[tev.QueryID]; ok { + state, terminal := p.advanceQuery(ctx, qry, &EventQueryFindCloserResponse[K, N]{ + NodeID: tev.NodeID, + CloserNodes: tev.CloserNodes, + }) + if terminal { + return state + } + eventQueryID = qry.id + } + case *EventPoolFindCloserFailure[K, N]: + if qry, ok := p.queryIndex[tev.QueryID]; ok { + state, terminal := p.advanceQuery(ctx, qry, &EventQueryFindCloserFailure[K, N]{ + NodeID: tev.NodeID, + Error: tev.Error, + }) + if terminal { + return state + } + eventQueryID = qry.id + } + case *EventPoolPoll: + // no event to process + default: + panic(fmt.Sprintf("unexpected event: %T", tev)) + } + + if len(p.queries) == 0 { + return &StatePoolIdle{} + } + + // Attempt to advance another query + for _, qry := range p.queries { + if eventQueryID == qry.id { + // avoid advancing query twice + continue + } + + state, terminal := p.advanceQuery(ctx, qry, nil) + if terminal { + return state + } + + // check if we have the maximum number of queries in flight + if p.queriesInFlight >= p.cfg.Concurrency { + return &StatePoolWaitingAtCapacity{} + } + } + + if p.queriesInFlight > 0 { + return &StatePoolWaitingWithCapacity{} + } + + return &StatePoolIdle{} +} + +func (p *Pool[K, N]) advanceQuery(ctx context.Context, qry *Query[K, N], qev QueryEvent) (PoolState, bool) { + state := qry.Advance(ctx, qev) + switch st := state.(type) { + case *StateQueryFindCloser[K, N]: + p.queriesInFlight++ + return &StatePoolFindCloser[K, N]{ + QueryID: st.QueryID, + Stats: st.Stats, + NodeID: st.NodeID, + Target: st.Target, + }, true + case *StateQueryFinished: + p.removeQuery(qry.id) + return &StatePoolQueryFinished{ + QueryID: st.QueryID, + Stats: st.Stats, + }, true + case *StateQueryWaitingAtCapacity: + elapsed := p.cfg.Clock.Since(qry.stats.Start) + if elapsed > p.cfg.Timeout { + p.removeQuery(qry.id) + return &StatePoolQueryTimeout{ + QueryID: st.QueryID, + Stats: st.Stats, + }, true + } + p.queriesInFlight++ + case *StateQueryWaitingWithCapacity: + elapsed := p.cfg.Clock.Since(qry.stats.Start) + if elapsed > p.cfg.Timeout { + p.removeQuery(qry.id) + return &StatePoolQueryTimeout{ + QueryID: st.QueryID, + Stats: st.Stats, + }, true + } + p.queriesInFlight++ + } + return nil, false +} + +func (p *Pool[K, N]) removeQuery(queryID QueryID) { + for i := range p.queries { + if p.queries[i].id != queryID { + continue + } + // remove from slice + copy(p.queries[i:], p.queries[i+1:]) + p.queries[len(p.queries)-1] = nil + p.queries = p.queries[:len(p.queries)-1] + break + } + delete(p.queryIndex, queryID) +} + +// addQuery adds a query to the pool, returning the new query id +// TODO: remove target argument and use msg.Target +func (p *Pool[K, N]) addQuery(ctx context.Context, queryID QueryID, target K, knownClosestNodes []N) error { + if _, exists := p.queryIndex[queryID]; exists { + return fmt.Errorf("query id already in use") + } + iter := NewClosestNodesIter[K, N](target) + + qryCfg := DefaultQueryConfig[K]() + qryCfg.Clock = p.cfg.Clock + qryCfg.Concurrency = p.cfg.QueryConcurrency + qryCfg.RequestTimeout = p.cfg.RequestTimeout + + qry, err := NewQuery[K, N](p.self, queryID, target, iter, knownClosestNodes, qryCfg) + if err != nil { + return fmt.Errorf("new query: %w", err) + } + + p.queries = append(p.queries, qry) + p.queryIndex[queryID] = qry + + return nil +} + +// States + +type PoolState interface { + poolState() +} + +// StatePoolIdle indicates that the pool is idle, i.e. there are no queries to process. +type StatePoolIdle struct{} + +// StatePoolFindCloser indicates that a pool query wants to send a find closer nodes message to a node. +type StatePoolFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID + Target K // the key that the query wants to find closer nodes for + NodeID N // the node to send the message to + Stats QueryStats +} + +// StatePoolWaitingAtCapacity indicates that at least one query is waiting for results and the pool has reached +// its maximum number of concurrent queries. +type StatePoolWaitingAtCapacity struct{} + +// StatePoolWaitingWithCapacity indicates that at least one query is waiting for results but capacity to +// start more is available. +type StatePoolWaitingWithCapacity struct{} + +// StatePoolQueryFinished indicates that a query has finished. +type StatePoolQueryFinished struct { + QueryID QueryID + Stats QueryStats +} + +// StatePoolQueryTimeout indicates that a query has timed out. +type StatePoolQueryTimeout struct { + QueryID QueryID + Stats QueryStats +} + +// poolState() ensures that only Pool states can be assigned to the PoolState interface. +func (*StatePoolIdle) poolState() {} +func (*StatePoolFindCloser[K, N]) poolState() {} +func (*StatePoolWaitingAtCapacity) poolState() {} +func (*StatePoolWaitingWithCapacity) poolState() {} +func (*StatePoolQueryFinished) poolState() {} +func (*StatePoolQueryTimeout) poolState() {} + +// PoolEvent is an event intended to advance the state of a pool. +type PoolEvent interface { + poolEvent() +} + +// EventPoolAddQuery is an event that attempts to add a new query +type EventPoolAddQuery[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID // the id to use for the new query + Target K // the target key for the query + KnownClosestNodes []N // an initial set of close nodes the query should use +} + +// EventPoolStopQuery notifies a [Pool] to stop a query. +type EventPoolStopQuery struct { + QueryID QueryID // the id of the query that should be stopped +} + +// EventPoolFindCloserResponse notifies a [Pool] that an attempt to find closer nodes has received a successful response. +type EventPoolFindCloserResponse[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to + CloserNodes []N // the closer nodes sent by the node +} + +// EventPoolFindCloserFailure notifies a [Pool] that an attempt to find closer nodes has failed. +type EventPoolFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any +} + +// EventPoolPoll is an event that signals the pool that it can perform housekeeping work such as time out queries. +type EventPoolPoll struct{} + +// poolEvent() ensures that only events accepted by a [Pool] can be assigned to the [PoolEvent] interface. +func (*EventPoolAddQuery[K, N]) poolEvent() {} +func (*EventPoolStopQuery) poolEvent() {} +func (*EventPoolFindCloserResponse[K, N]) poolEvent() {} +func (*EventPoolFindCloserFailure[K, N]) poolEvent() {} +func (*EventPoolPoll) poolEvent() {} diff --git a/v2/coord/query/pool_test.go b/v2/coord/query/pool_test.go new file mode 100644 index 00000000..0deaa5a5 --- /dev/null +++ b/v2/coord/query/pool_test.go @@ -0,0 +1,333 @@ +package query + +import ( + "context" + "testing" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" +) + +func TestPoolConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultPoolConfig() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultPoolConfig() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("concurrency positive", func(t *testing.T) { + cfg := DefaultPoolConfig() + cfg.Concurrency = 0 + require.Error(t, cfg.Validate()) + cfg.Concurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("timeout positive", func(t *testing.T) { + cfg := DefaultPoolConfig() + cfg.Timeout = 0 + require.Error(t, cfg.Validate()) + cfg.Timeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("replication positive", func(t *testing.T) { + cfg := DefaultPoolConfig() + cfg.Replication = 0 + require.Error(t, cfg.Validate()) + cfg.Replication = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("query concurrency positive", func(t *testing.T) { + cfg := DefaultPoolConfig() + cfg.QueryConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.QueryConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultPoolConfig() + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) +} + +func TestPoolStartsIdle(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + p, err := NewPool[tiny.Key](self, cfg) + require.NoError(t, err) + + state := p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) +} + +func TestPoolStopWhenNoQueries(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + p, err := NewPool[tiny.Key](self, cfg) + require.NoError(t, err) + + state := p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) +} + +func TestPoolAddQueryStartsIfCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + p, err := NewPool[tiny.Key](self, cfg) + require.NoError(t, err) + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + queryID := QueryID("test") + + // first thing the new pool should do is start the query + state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID, + Target: target, + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should attempt to contact the node it was given + st := state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + + // the query should be the one just added + require.Equal(t, queryID, st.QueryID) + + // the query should attempt to contact the node it was given + require.Equal(t, a, st.NodeID) + + // with the correct target + require.True(t, key.Equal(target, st.Target)) + + // now the pool reports that it is waiting + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolWaitingWithCapacity{}, state) +} + +func TestPoolMessageResponse(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + p, err := NewPool[tiny.Key](self, cfg) + require.NoError(t, err) + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + queryID := QueryID("test") + + // first thing the new pool should do is start the query + state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID, + Target: target, + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should attempt to contact the node it was given + st := state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID, st.QueryID) + require.Equal(t, a, st.NodeID) + + // notify query that node was contacted successfully, but no closer nodes + state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + QueryID: queryID, + NodeID: a, + }) + + // pool should respond that query has finished + require.IsType(t, &StatePoolQueryFinished{}, state) + + stf := state.(*StatePoolQueryFinished) + require.Equal(t, queryID, stf.QueryID) + require.Equal(t, 1, stf.Stats.Requests) + require.Equal(t, 1, stf.Stats.Success) +} + +func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + cfg.Concurrency = 2 // allow two queries to run concurrently + + self := tiny.NewNode(tiny.Key(0)) + p, err := NewPool[tiny.Key](self, cfg) + require.NoError(t, err) + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // Add the first query + queryID1 := QueryID("1") + state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID1, + Target: target, + KnownClosestNodes: []tiny.Node{a, b, c, d}, + }) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + + // the first query should attempt to contact the node it was given + st := state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID1, st.QueryID) + require.Equal(t, a, st.NodeID) + + // Add the second query + queryID2 := QueryID("2") + state = p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID2, + Target: target, + KnownClosestNodes: []tiny.Node{a, b, c, d}, + }) + + // the first query should continue its operation in preference to starting the new query + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID1, st.QueryID) + require.Equal(t, b, st.NodeID) + + // advance the pool again, the first query should continue its operation in preference to starting the new query + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID1, st.QueryID) + require.Equal(t, c, st.NodeID) + + // advance the pool again, the first query is at capacity so the second query can start + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID2, st.QueryID) + require.Equal(t, a, st.NodeID) + + // notify first query that node was contacted successfully, but no closer nodes + state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + QueryID: queryID1, + NodeID: a, + }) + + // first query starts a new message request + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID1, st.QueryID) + require.Equal(t, d, st.NodeID) + + // notify first query that next node was contacted successfully, but no closer nodes + state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + QueryID: queryID1, + NodeID: b, + }) + + // first query is out of nodes to try so second query can proceed + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID2, st.QueryID) + require.Equal(t, b, st.NodeID) +} + +func TestPoolRespectsConcurrency(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + cfg.Concurrency = 2 // allow two queries to run concurrently + cfg.QueryConcurrency = 1 // allow each query to have a single request in flight + + self := tiny.NewNode(tiny.Key(0)) + p, err := NewPool[tiny.Key](self, cfg) + require.NoError(t, err) + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + // Add the first query + queryID1 := QueryID("1") + state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID1, + Target: target, + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + + // the first query should attempt to contact the node it was given + st := state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID1, st.QueryID) + require.Equal(t, a, st.NodeID) + + // Add the second query + queryID2 := QueryID("2") + state = p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID2, + Target: target, + KnownClosestNodes: []tiny.Node{a}, + }) + + // the second query should start since the first query has a request in flight + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID2, st.QueryID) + require.Equal(t, a, st.NodeID) + + // Add a third query + queryID3 := QueryID("3") + state = p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + QueryID: queryID3, + Target: target, + KnownClosestNodes: []tiny.Node{a}, + }) + + // the third query should wait since the pool has reached maximum concurrency + require.IsType(t, &StatePoolWaitingAtCapacity{}, state) + + // notify first query that next node was contacted successfully, but no closer nodes + state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + QueryID: queryID1, + NodeID: a, + }) + + // first query is out of nodes so it has finished + require.IsType(t, &StatePoolQueryFinished{}, state) + stf := state.(*StatePoolQueryFinished) + require.Equal(t, queryID1, stf.QueryID) + + // advancing pool again allows query 3 to start + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID3, st.QueryID) + require.Equal(t, a, st.NodeID) +} diff --git a/v2/coord/query/query.go b/v2/coord/query/query.go new file mode 100644 index 00000000..e5009a04 --- /dev/null +++ b/v2/coord/query/query.go @@ -0,0 +1,387 @@ +package query + +import ( + "context" + "fmt" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + "github.com/plprobelab/go-kademlia/key" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +type QueryID string + +const InvalidQueryID QueryID = "" + +type QueryStats struct { + Start time.Time + End time.Time + Requests int + Success int + Failure int +} + +// QueryConfig specifies optional configuration for a Query +type QueryConfig[K kad.Key[K]] struct { + Concurrency int // the maximum number of concurrent requests that may be in flight + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete + RequestTimeout time.Duration // the timeout for contacting a single node + Clock clock.Clock // a clock that may replaced by a mock when testing +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *QueryConfig[K]) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + if cfg.Concurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("concurrency must be greater than zero"), + } + } + if cfg.NumResults < 1 { + return &kaderr.ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("num results must be greater than zero"), + } + } + if cfg.RequestTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + return nil +} + +// DefaultQueryConfig returns the default configuration options for a Query. +// Options may be overridden before passing to NewQuery +func DefaultQueryConfig[K kad.Key[K]]() *QueryConfig[K] { + return &QueryConfig[K]{ + Concurrency: 3, + NumResults: 20, + RequestTimeout: time.Minute, + Clock: clock.New(), // use standard time + } +} + +type Query[K kad.Key[K], N kad.NodeID[K]] struct { + self N + id QueryID + + // cfg is a copy of the optional configuration supplied to the query + cfg QueryConfig[K] + + iter NodeIter[K, N] + target K + stats QueryStats + + // finished indicates that that the query has completed its work or has been stopped. + finished bool + + // inFlight is number of requests in flight, will be <= concurrency + inFlight int +} + +func NewQuery[K kad.Key[K], N kad.NodeID[K]](self N, id QueryID, target K, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig[K]) (*Query[K, N], error) { + if cfg == nil { + cfg = DefaultQueryConfig[K]() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + for _, node := range knownClosestNodes { + // exclude self from closest nodes + if key.Equal(node.Key(), self.Key()) { + continue + } + iter.Add(&NodeStatus[K, N]{ + NodeID: node, + State: &StateNodeNotContacted{}, + }) + } + + return &Query[K, N]{ + self: self, + id: id, + cfg: *cfg, + iter: iter, + target: target, + }, nil +} + +func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { + ctx, span := tele.StartSpan(ctx, "Query.Advance") + defer span.End() + if q.finished { + return &StateQueryFinished{ + QueryID: q.id, + Stats: q.stats, + } + } + + switch tev := ev.(type) { + case *EventQueryCancel: + q.markFinished() + return &StateQueryFinished{ + QueryID: q.id, + Stats: q.stats, + } + case *EventQueryFindCloserResponse[K, N]: + q.onMessageResponse(ctx, tev.NodeID, tev.CloserNodes) + case *EventQueryFindCloserFailure[K, N]: + q.onMessageFailure(ctx, tev.NodeID) + case nil: + // TEMPORARY: no event to process + default: + panic(fmt.Sprintf("unexpected event: %T", tev)) + } + + // count number of successes in the order of the iteration + successes := 0 + + // progressing is set to true if any node is still awaiting contact + progressing := false + + // TODO: if stalled then we should contact all remaining nodes that have not already been queried + atCapacity := func() bool { + return q.inFlight >= q.cfg.Concurrency + } + + // get all the nodes in order of distance from the target + // TODO: turn this into a walk or iterator on trie.Trie + + var returnState QueryState + + q.iter.Each(ctx, func(ctx context.Context, ni *NodeStatus[K, N]) bool { + switch st := ni.State.(type) { + case *StateNodeWaiting: + if q.cfg.Clock.Now().After(st.Deadline) { + // mark node as unresponsive + ni.State = &StateNodeUnresponsive{} + q.inFlight-- + q.stats.Failure++ + } else if atCapacity() { + returnState = &StateQueryWaitingAtCapacity{ + QueryID: q.id, + Stats: q.stats, + } + return true + } else { + // The iterator is still waiting for a result from a node so can't be considered done + progressing = true + } + case *StateNodeSucceeded: + successes++ + // The iterator has attempted to contact all nodes closer than this one. + // If the iterator is not progressing then it doesn't expect any more nodes to be added to the list. + // If it has contacted at least NumResults nodes successfully then the iteration is done. + if !progressing && successes >= q.cfg.NumResults { + q.markFinished() + returnState = &StateQueryFinished{ + QueryID: q.id, + Stats: q.stats, + } + return true + } + + case *StateNodeNotContacted: + if !atCapacity() { + deadline := q.cfg.Clock.Now().Add(q.cfg.RequestTimeout) + ni.State = &StateNodeWaiting{Deadline: deadline} + q.inFlight++ + q.stats.Requests++ + if q.stats.Start.IsZero() { + q.stats.Start = q.cfg.Clock.Now() + } + returnState = &StateQueryFindCloser[K, N]{ + NodeID: ni.NodeID, + QueryID: q.id, + Stats: q.stats, + Target: q.target, + } + return true + + } + returnState = &StateQueryWaitingAtCapacity{ + QueryID: q.id, + Stats: q.stats, + } + return true + case *StateNodeUnresponsive: + // ignore + case *StateNodeFailed: + // ignore + default: + panic(fmt.Sprintf("unexpected state: %T", ni.State)) + } + + return false + }) + + if returnState != nil { + return returnState + } + + if q.inFlight > 0 { + // The iterator is still waiting for results and not at capacity + return &StateQueryWaitingWithCapacity{ + QueryID: q.id, + Stats: q.stats, + } + } + + // The iterator is finished because all available nodes have been contacted + // and the iterator is not waiting for any more results. + q.markFinished() + return &StateQueryFinished{ + QueryID: q.id, + Stats: q.stats, + } +} + +func (q *Query[K, N]) markFinished() { + q.finished = true + if q.stats.End.IsZero() { + q.stats.End = q.cfg.Clock.Now() + } +} + +// onMessageResponse processes the result of a successful response received from a node. +func (q *Query[K, N]) onMessageResponse(ctx context.Context, node N, closer []N) { + ni, found := q.iter.Find(node.Key()) + if !found { + // got a rogue message + return + } + switch st := ni.State.(type) { + case *StateNodeWaiting: + q.inFlight-- + q.stats.Success++ + case *StateNodeUnresponsive: + q.stats.Success++ + + case *StateNodeNotContacted: + // ignore duplicate or late response + return + case *StateNodeFailed: + // ignore duplicate or late response + return + case *StateNodeSucceeded: + // ignore duplicate or late response + return + default: + panic(fmt.Sprintf("unexpected state: %T", st)) + } + + // add closer nodes to list + for _, id := range closer { + // exclude self from closest nodes + if key.Equal(id.Key(), q.self.Key()) { + continue + } + q.iter.Add(&NodeStatus[K, N]{ + NodeID: id, + State: &StateNodeNotContacted{}, + }) + } + ni.State = &StateNodeSucceeded{} +} + +// onMessageFailure processes the result of a failed attempt to contact a node. +func (q *Query[K, N]) onMessageFailure(ctx context.Context, node N) { + ni, found := q.iter.Find(node.Key()) + if !found { + // got a rogue message + return + } + switch st := ni.State.(type) { + case *StateNodeWaiting: + q.inFlight-- + q.stats.Failure++ + case *StateNodeUnresponsive: + // update node state to failed + break + case *StateNodeNotContacted: + // update node state to failed + break + case *StateNodeFailed: + // ignore duplicate or late response + return + case *StateNodeSucceeded: + // ignore duplicate or late response + return + default: + panic(fmt.Sprintf("unexpected state: %T", st)) + } + + ni.State = &StateNodeFailed{} +} + +type QueryState interface { + queryState() +} + +// StateQueryFinished indicates that the [Query] has finished. +type StateQueryFinished struct { + QueryID QueryID + Stats QueryStats +} + +// StateQueryFindCloser indicates that the [Query] wants to send a find closer nodes message to a node. +type StateQueryFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID + Target K // the key that the query wants to find closer nodes for + NodeID N // the node to send the message to + Stats QueryStats +} + +// StateQueryWaitingAtCapacity indicates that the [Query] is waiting for results and is at capacity. +type StateQueryWaitingAtCapacity struct { + QueryID QueryID + Stats QueryStats +} + +// StateQueryWaitingWithCapacity indicates that the [Query] is waiting for results but has no further nodes to contact. +type StateQueryWaitingWithCapacity struct { + QueryID QueryID + Stats QueryStats +} + +// queryState() ensures that only [Query] states can be assigned to a QueryState. +func (*StateQueryFinished) queryState() {} +func (*StateQueryFindCloser[K, N]) queryState() {} +func (*StateQueryWaitingAtCapacity) queryState() {} +func (*StateQueryWaitingWithCapacity) queryState() {} + +type QueryEvent interface { + queryEvent() +} + +// EventQueryMessageResponse notifies a query to stop all work and enter the finished state. +type EventQueryCancel struct{} + +// EventQueryFindCloserResponse notifies a [Query] that an attempt to find closer nodes has received a successful response. +type EventQueryFindCloserResponse[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + CloserNodes []N // the closer nodes sent by the node +} + +// EventQueryFindCloserFailure notifies a [Query] that an attempt to find closer nodes has failed. +type EventQueryFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any +} + +// queryEvent() ensures that only events accepted by [Query] can be assigned to a [QueryEvent]. +func (*EventQueryCancel) queryEvent() {} +func (*EventQueryFindCloserResponse[K, N]) queryEvent() {} +func (*EventQueryFindCloserFailure[K, N]) queryEvent() {} diff --git a/v2/coord/query/query_test.go b/v2/coord/query/query_test.go new file mode 100644 index 00000000..6565d3e5 --- /dev/null +++ b/v2/coord/query/query_test.go @@ -0,0 +1,1066 @@ +package query + +import ( + "context" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" +) + +func TestQueryConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultQueryConfig[tiny.Key]() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultQueryConfig[tiny.Key]() + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("concurrency positive", func(t *testing.T) { + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Concurrency = 0 + require.Error(t, cfg.Validate()) + cfg.Concurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("num results positive", func(t *testing.T) { + cfg := DefaultQueryConfig[tiny.Key]() + cfg.NumResults = 0 + require.Error(t, cfg.Validate()) + cfg.NumResults = -1 + require.Error(t, cfg.Validate()) + }) +} + +func TestQueryMessagesNode(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + // one known node to start with + knownNodes := []tiny.Node{a} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is request to send a message to the node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // check that we are messaging the correct node with the right message + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, queryID, st.QueryID) + require.Equal(t, a, st.NodeID) + require.True(t, key.Equal(target, st.Target)) + require.Equal(t, clk.Now(), st.Stats.Start) + require.Equal(t, 1, st.Stats.Requests) + require.Equal(t, 0, st.Stats.Success) + + // advancing now reports that the query is waiting for a response but its underlying query still has capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + stw := state.(*StateQueryWaitingWithCapacity) + require.Equal(t, 1, stw.Stats.Requests) + require.Equal(t, 0, st.Stats.Success) +} + +func TestQueryMessagesNearest(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000011) + far := tiny.NewNode(tiny.Key(0b11011011)) + near := tiny.NewNode(tiny.Key(0b00000110)) + + // ensure near is nearer to target than far is + require.Less(t, target.Xor(near.Key()), target.Xor(far.Key())) + + // knownNodes are in "random" order with furthest before nearest + knownNodes := []tiny.Node{ + far, + near, + } + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is message the nearest node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // check that we are contacting the nearest node first + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, near, st.NodeID) +} + +func TestQueryCancelFinishesQuery(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + // one known node to start with + knownNodes := []tiny.Node{a} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is request to send a message to the node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + clk.Add(time.Second) + + // cancel the query + state = qry.Advance(ctx, &EventQueryCancel{}) + require.IsType(t, &StateQueryFinished{}, state) + + stf := state.(*StateQueryFinished) + require.Equal(t, 1, stf.Stats.Requests) + + // no successful responses were received before query was cancelled + require.Equal(t, 0, stf.Stats.Success) + + // no failed responses were received before query was cancelled + require.Equal(t, 0, stf.Stats.Failure) + + // query should have an end time + require.Equal(t, clk.Now(), stf.Stats.End) +} + +func TestQueryNoClosest(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000011) + + // no known nodes to start with + knownNodes := []tiny.Node{} + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + clk := clock.NewMock() + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // query is finished because there were no nodes to contat + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFinished{}, state) + + stf := state.(*StateQueryFinished) + + // no requests were made + require.Equal(t, 0, stf.Stats.Requests) + + // no successful responses were received before query was cancelled + require.Equal(t, 0, stf.Stats.Success) + + // no failed responses were received before query was cancelled + require.Equal(t, 0, stf.Stats.Failure) + + // query should have an end time + require.Equal(t, clk.Now(), stf.Stats.End) +} + +func TestQueryWaitsAtCapacity(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + + // one known node to start with + knownNodes := []tiny.Node{a, b, c} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is request to send a message to the node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + require.Equal(t, 1, st.Stats.Requests) + + // advancing sends the message to the next node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + require.Equal(t, 2, st.Stats.Requests) + + // advancing now reports that the query is waiting at capacity since there are 2 messages in flight + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + stw := state.(*StateQueryWaitingAtCapacity) + require.Equal(t, 2, stw.Stats.Requests) +} + +func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + // knownNodes are in "random" order + knownNodes := []tiny.Node{b, c, a, d} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.RequestTimeout = 3 * time.Minute + cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + stwm := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 1, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // advance time by one minute + clk.Add(time.Minute) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + stwm = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 2, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // advance time by one minute + clk.Add(time.Minute) + + // while the query has capacity the query should contact the second nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + stwm = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 3, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // advance time by one minute + clk.Add(time.Minute) + + // the query should be at capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + stwa := state.(*StateQueryWaitingAtCapacity) + require.Equal(t, 3, stwa.Stats.Requests) + require.Equal(t, 0, stwa.Stats.Success) + require.Equal(t, 0, stwa.Stats.Failure) + + // advance time by another minute, now at 4 minutes, first node connection attempt should now time out + clk.Add(time.Minute) + + // the first node request should have timed out, making capacity for the last node to attempt connection + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, d, st.NodeID) + + stwm = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 4, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 1, stwm.Stats.Failure) + + // advance time by another minute, now at 5 minutes, second node connection attempt should now time out + clk.Add(time.Minute) + + // advancing now makes more capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + stww := state.(*StateQueryWaitingWithCapacity) + require.Equal(t, 4, stww.Stats.Requests) + require.Equal(t, 0, stww.Stats.Success) + require.Equal(t, 2, stww.Stats.Failure) +} + +func TestQueryMessageResponseMakesCapacity(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + // knownNodes are in "random" order + knownNodes := []tiny.Node{b, c, a, d} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + stwm := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 1, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + stwm = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 2, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // while the query has capacity the query should contact the second nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + stwm = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 3, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // the query should be at capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that first node was contacted successfully, now node d can be contacted + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: a}) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, d, st.NodeID) + stwm = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 4, stwm.Stats.Requests) + require.Equal(t, 1, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // the query should be at capacity again + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + stwa := state.(*StateQueryWaitingAtCapacity) + require.Equal(t, 4, stwa.Stats.Requests) + require.Equal(t, 1, stwa.Stats.Success) + require.Equal(t, 0, stwa.Stats.Failure) +} + +func TestQueryCloserNodesAreAddedToIteration(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + // one known node to start with + knownNodes := []tiny.Node{d} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, d, st.NodeID) + + // advancing reports query has capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that first node was contacted successfully, with closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: d, + CloserNodes: []tiny.Node{ + b, + a, + }, + }) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // query should contact the next nearest uncontacted node + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) +} + +func TestQueryCloserNodesIgnoresDuplicates(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + // one known node to start with + knownNodes := []tiny.Node{d, a} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // next the query attempts to contact second nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, d, st.NodeID) + + // advancing reports query has no capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that second node was contacted successfully, with closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: d, + CloserNodes: []tiny.Node{ + b, + a, + }, + }) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // query should contact the next nearest uncontacted node, which is b + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) +} + +func TestQueryCancelFinishesIteration(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + // one known node to start with + knownNodes := []tiny.Node{a} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // cancel the query so it is now finished + state = qry.Advance(ctx, &EventQueryCancel{}) + require.IsType(t, &StateQueryFinished{}, state) + + stf := state.(*StateQueryFinished) + require.Equal(t, 0, stf.Stats.Success) +} + +func TestQueryFinishedIgnoresLaterEvents(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + + // one known node to start with + knownNodes := []tiny.Node{b} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // cancel the query so it is now finished + state = qry.Advance(ctx, &EventQueryCancel{}) + require.IsType(t, &StateQueryFinished{}, state) + + // no successes + stf := state.(*StateQueryFinished) + require.Equal(t, 1, stf.Stats.Requests) + require.Equal(t, 0, stf.Stats.Success) + require.Equal(t, 0, stf.Stats.Failure) + + // notify query that second node was contacted successfully, with closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{a}, + }) + + // query remains finished + require.IsType(t, &StateQueryFinished{}, state) + + // still no successes since contact message was after query had been cancelled + stf = state.(*StateQueryFinished) + require.Equal(t, 1, stf.Stats.Requests) + require.Equal(t, 0, stf.Stats.Success) + require.Equal(t, 0, stf.Stats.Failure) +} + +func TestQueryWithCloserIterIgnoresMessagesFromUnknownNodes(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + + // one known node to start with + knownNodes := []tiny.Node{c} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + stwm := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, 1, stwm.Stats.Requests) + require.Equal(t, 0, stwm.Stats.Success) + require.Equal(t, 0, stwm.Stats.Failure) + + // notify query that second node was contacted successfully, with closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{a}, + }) + + // query ignores message from unknown node + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + stwc := state.(*StateQueryWaitingWithCapacity) + require.Equal(t, 1, stwc.Stats.Requests) + require.Equal(t, 0, stwc.Stats.Success) + require.Equal(t, 0, stwc.Stats.Failure) +} + +func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // one known node to start with + knownNodes := []tiny.Node{a, b, c, d} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 4 + cfg.NumResults = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // contact first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // contact second node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // notify query that first node was contacted successfully + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // query attempts to contact third node + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + + // notify query that second node was contacted successfully + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + }) + + // query has finished since it contacted the NumResults closest nodes + require.IsType(t, &StateQueryFinished{}, state) +} + +func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + + // one known node to start with, the furthesr + knownNodes := []tiny.Node{c} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 4 + cfg.NumResults = 2 + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // contact first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + + // notify query that node was contacted successfully and tell it about + // a closer one + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: c, + CloserNodes: []tiny.Node{b}, + }) + + // query attempts to contact second node + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // notify query that node was contacted successfully and tell it about + // a closer one + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{a}, + }) + + // query has seen enough successful contacts but there are still + // closer nodes that have not been contacted, so query attempts + // to contact third node + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // notify query that second node was contacted successfully + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // query has finished since it contacted the NumResults closest nodes + require.IsType(t, &StateQueryFinished{}, state) + + stf := state.(*StateQueryFinished) + require.Equal(t, 3, stf.Stats.Success) +} + +func TestQueryNotContactedMakesCapacity(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + knownNodes := []tiny.Node{a, b, c, d} + iter := NewSequentialIter[tiny.Key, tiny.Node]() + + clk := clock.NewMock() + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // while the query has capacity the query should contact the second nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + + // the query should be at capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that first node was not contacted, now node d can be contacted + state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: a}) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, d, st.NodeID) + + // the query should be at capacity again + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) +} + +func TestQueryAllNotContactedFinishes(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + + // knownNodes are in "random" order + knownNodes := []tiny.Node{a, b, c} + + clk := clock.NewMock() + + iter := NewSequentialIter[tiny.Key, tiny.Node]() + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = len(knownNodes) // allow all to be contacted at once + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // while the query has capacity the query should contact the third nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should be at capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that first node was not contacted + state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: a}) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that second node was not contacted + state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: b}) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that third node was not contacted + state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: c}) + + // query has finished since it contacted all possible nodes + require.IsType(t, &StateQueryFinished{}, state) + + stf := state.(*StateQueryFinished) + require.Equal(t, 0, stf.Stats.Success) +} + +func TestQueryAllContactedFinishes(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + + knownNodes := []tiny.Node{a, b, c} + + clk := clock.NewMock() + + iter := NewSequentialIter[tiny.Key, tiny.Node]() + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = len(knownNodes) // allow all to be contacted at once + cfg.NumResults = len(knownNodes) + 1 // one more than the size of the network + + queryID := QueryID("test") + + self := tiny.NewNode(tiny.Key(0)) + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // while the query has capacity the query should contact the third nearest node + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should be at capacity + state = qry.Advance(ctx, nil) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that first node was contacted successfully, but no closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: a}) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that second node was contacted successfully, but no closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: b}) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that third node was contacted successfully, but no closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: c}) + + // query has finished since it contacted all possible nodes, even though it didn't + // reach the desired NumResults + require.IsType(t, &StateQueryFinished{}, state) + + stf := state.(*StateQueryFinished) + require.Equal(t, 3, stf.Stats.Success) +} + +func TestQueryNeverMessagesSelf(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + + // one known node to start with + knownNodes := []tiny.Node{b} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig[tiny.Key]() + cfg.Clock = clk + cfg.Concurrency = 2 + + queryID := QueryID("test") + + self := a + qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the first node + state := qry.Advance(ctx, nil) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // notify query that first node was contacted successfully, with closer nodes + state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{a}, + }) + + // query is finished since it can't contact self + require.IsType(t, &StateQueryFinished{}, state) + + // one successful message + stf := state.(*StateQueryFinished) + require.Equal(t, 1, stf.Stats.Requests) + require.Equal(t, 1, stf.Stats.Success) + require.Equal(t, 0, stf.Stats.Failure) +} diff --git a/v2/coord/routing.go b/v2/coord/routing.go index 488ac689..94c80907 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -6,19 +6,19 @@ import ( "sync" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/routing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // A RoutingBehaviour provices the behaviours for bootstrapping and maintaining a DHT's routing table. type RoutingBehaviour struct { // self is the peer id of the system the dht is running on - self peer.ID + self kadt.PeerID + // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table bootstrap SM[routing.BootstrapEvent, routing.BootstrapState] @@ -36,14 +36,14 @@ type RoutingBehaviour struct { tracer trace.Tracer } -func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger, tracer trace.Tracer) *RoutingBehaviour { +func NewRoutingBehaviour(self kadt.PeerID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger, tracer trace.Tracer) *RoutingBehaviour { r := &RoutingBehaviour{ self: self, bootstrap: bootstrap, include: include, probe: probe, ready: make(chan struct{}, 1), - logger: logger, + logger: logger.With("behaviour", "routing"), tracer: tracer, } return r @@ -65,10 +65,8 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { switch ev := ev.(type) { case *EventStartBootstrap: span.SetAttributes(attribute.String("event", "EventStartBootstrap")) - cmd := &routing.EventBootstrapStart[KadKey, ma.Multiaddr]{ - ProtocolID: ev.ProtocolID, - Message: ev.Message, - KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), + cmd := &routing.EventBootstrapStart[kadt.Key, kadt.PeerID]{ + KnownClosestNodes: SliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -79,12 +77,12 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventAddAddrInfo: span.SetAttributes(attribute.String("event", "EventAddAddrInfo")) // Ignore self - if ev.NodeInfo.ID == r.self { + if ev.NodeInfo.ID == peer.ID(r.self) { break } // TODO: apply ttl - cmd := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ - NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, + cmd := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.NodeInfo.ID), } // attempt to advance the include next, ok := r.advanceInclude(ctx, cmd) @@ -94,8 +92,8 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventRoutingUpdated: span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) - cmd := &routing.EventProbeAdd[KadKey]{ - NodeID: AddrInfoToNodeID(ev.NodeInfo), + cmd := &routing.EventProbeAdd[kadt.Key, kadt.PeerID]{ + NodeID: AddrInfoToKadPeerID(ev.NodeInfo), } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -113,9 +111,9 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { NodeInfo: info, }) } - cmd := &routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]{ - NodeID: kadt.PeerID(ev.To.ID), - Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + cmd := &routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + CloserNodes: SliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -124,9 +122,18 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } case "include": - cmd := &routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]{ - NodeInfo: kadt.AddrInfo{Info: ev.To}, - Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + var cmd routing.IncludeEvent + + // require that the node responded with at least one closer node + if len(ev.CloserNodes) > 0 { + cmd = &routing.EventIncludeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + } + } else { + cmd = &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + Error: fmt.Errorf("response did not include any closer nodes"), + } } // attempt to advance the include next, ok := r.advanceInclude(ctx, cmd) @@ -135,9 +142,17 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } case "probe": - cmd := &routing.EventProbeMessageResponse[KadKey, ma.Multiaddr]{ - NodeInfo: kadt.AddrInfo{Info: ev.To}, - Response: CloserNodesResponse(ev.Target, ev.CloserNodes), + var cmd routing.ProbeEvent + // require that the node responded with at least one closer node + if len(ev.CloserNodes) > 0 { + cmd = &routing.EventProbeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + } + } else { + cmd = &routing.EventProbeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + Error: fmt.Errorf("response did not include any closer nodes"), + } } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -153,7 +168,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { span.RecordError(ev.Err) switch ev.QueryID { case "bootstrap": - cmd := &routing.EventBootstrapMessageFailure[KadKey]{ + cmd := &routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]{ NodeID: kadt.PeerID(ev.To.ID), Error: ev.Err, } @@ -163,9 +178,9 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { r.pending = append(r.pending, next) } case "include": - cmd := &routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]{ - NodeInfo: kadt.AddrInfo{Info: ev.To}, - Error: ev.Err, + cmd := &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + Error: ev.Err, } // attempt to advance the include state machine next, ok := r.advanceInclude(ctx, cmd) @@ -173,9 +188,9 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { r.pending = append(r.pending, next) } case "probe": - cmd := &routing.EventProbeMessageFailure[KadKey, ma.Multiaddr]{ - NodeInfo: kadt.AddrInfo{Info: ev.To}, - Error: ev.Err, + cmd := &routing.EventProbeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.To.ID), + Error: ev.Err, } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -255,11 +270,11 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot bstate := r.bootstrap.Advance(ctx, ev) switch st := bstate.(type) { - case *routing.StateBootstrapMessage[KadKey, ma.Multiaddr]: + case *routing.StateBootstrapFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ QueryID: "bootstrap", - To: NodeIDToAddrInfo(st.NodeID), - Target: st.Message.Target(), + To: KadPeerIDToAddrInfo(st.NodeID), + Target: st.Target, Notify: r, }, true @@ -284,28 +299,28 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ istate := r.include.Advance(ctx, ev) switch st := istate.(type) { - case *routing.StateIncludeFindNodeMessage[KadKey, ma.Multiaddr]: + case *routing.StateIncludeConnectivityCheck[kadt.Key, kadt.PeerID]: span.SetAttributes(attribute.String("out_event", "EventOutboundGetCloserNodes")) // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "include", - To: NodeInfoToAddrInfo(st.NodeInfo), - Target: st.NodeInfo.ID().Key(), + To: KadPeerIDToAddrInfo(st.NodeID), + Target: st.NodeID.Key(), Notify: r, }, true - case *routing.StateIncludeRoutingUpdated[KadKey, ma.Multiaddr]: + case *routing.StateIncludeRoutingUpdated[kadt.Key, kadt.PeerID]: // a node has been included in the routing table // notify other routing state machines that there is a new node in the routing table r.notify(ctx, &EventRoutingUpdated{ - NodeInfo: NodeInfoToAddrInfo(st.NodeInfo), + NodeInfo: KadPeerIDToAddrInfo(st.NodeID), }) // return the event to notify outwards too span.SetAttributes(attribute.String("out_event", "EventRoutingUpdated")) return &EventRoutingUpdated{ - NodeInfo: NodeInfoToAddrInfo(st.NodeInfo), + NodeInfo: KadPeerIDToAddrInfo(st.NodeID), }, true case *routing.StateIncludeWaitingAtCapacity: // nothing to do except wait for message response or timeout @@ -327,19 +342,19 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve defer span.End() st := r.probe.Advance(ctx, ev) switch st := st.(type) { - case *routing.StateProbeConnectivityCheck[KadKey]: + case *routing.StateProbeConnectivityCheck[kadt.Key, kadt.PeerID]: // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "probe", - To: NodeIDToAddrInfo(st.NodeID), + To: KadPeerIDToAddrInfo(st.NodeID), Target: st.NodeID.Key(), Notify: r, }, true - case *routing.StateProbeNodeFailure[KadKey]: + case *routing.StateProbeNodeFailure[kadt.Key, kadt.PeerID]: // a node has failed a connectivity check been removed from the routing table and the probe list // add the node to the inclusion list for a second chance r.notify(ctx, &EventAddAddrInfo{ - NodeInfo: NodeIDToAddrInfo(st.NodeID), + NodeInfo: KadPeerIDToAddrInfo(st.NodeID), }) case *routing.StateProbeWaitingAtCapacity: // the probe state machine is waiting for responses for checks and the maximum number of concurrent checks has been reached. diff --git a/v2/coord/routing/bootstrap.go b/v2/coord/routing/bootstrap.go new file mode 100644 index 00000000..2c674b00 --- /dev/null +++ b/v2/coord/routing/bootstrap.go @@ -0,0 +1,248 @@ +package routing + +import ( + "context" + "fmt" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +type Bootstrap[K kad.Key[K], N kad.NodeID[K]] struct { + // self is the node id of the system the bootstrap is running on + self N + + // qry is the query used by the bootstrap process + qry *query.Query[K, N] + + // cfg is a copy of the optional configuration supplied to the Bootstrap + cfg BootstrapConfig[K] +} + +// BootstrapConfig specifies optional configuration for a Bootstrap +type BootstrapConfig[K kad.Key[K]] struct { + Timeout time.Duration // the time to wait before terminating a query that is not making progress + RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight + RequestTimeout time.Duration // the timeout queries should use for contacting a single node + Clock clock.Clock // a clock that may replaced by a mock when testing +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *BootstrapConfig[K]) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "BootstrapConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.Timeout < 1 { + return &kaderr.ConfigurationError{ + Component: "BootstrapConfig", + Err: fmt.Errorf("timeout must be greater than zero"), + } + } + + if cfg.RequestConcurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "BootstrapConfig", + Err: fmt.Errorf("request concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "BootstrapConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + + return nil +} + +// DefaultBootstrapConfig returns the default configuration options for a Bootstrap. +// Options may be overridden before passing to NewBootstrap +func DefaultBootstrapConfig[K kad.Key[K]]() *BootstrapConfig[K] { + return &BootstrapConfig[K]{ + Clock: clock.New(), // use standard time + Timeout: 5 * time.Minute, + RequestConcurrency: 3, + RequestTimeout: time.Minute, + } +} + +func NewBootstrap[K kad.Key[K], N kad.NodeID[K]](self N, cfg *BootstrapConfig[K]) (*Bootstrap[K, N], error) { + if cfg == nil { + cfg = DefaultBootstrapConfig[K]() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + return &Bootstrap[K, N]{ + self: self, + cfg: *cfg, + }, nil +} + +// Advance advances the state of the bootstrap by attempting to advance its query if running. +func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) BootstrapState { + ctx, span := tele.StartSpan(ctx, "Bootstrap.Advance") + defer span.End() + + switch tev := ev.(type) { + case *EventBootstrapStart[K, N]: + + // TODO: ignore start event if query is already in progress + iter := query.NewClosestNodesIter[K, N](b.self.Key()) + + qryCfg := query.DefaultQueryConfig[K]() + qryCfg.Clock = b.cfg.Clock + qryCfg.Concurrency = b.cfg.RequestConcurrency + qryCfg.RequestTimeout = b.cfg.RequestTimeout + + queryID := query.QueryID("bootstrap") + + qry, err := query.NewQuery[K, N](b.self, queryID, b.self.Key(), iter, tev.KnownClosestNodes, qryCfg) + if err != nil { + // TODO: don't panic + panic(err) + } + b.qry = qry + return b.advanceQuery(ctx, nil) + + case *EventBootstrapFindCloserResponse[K, N]: + return b.advanceQuery(ctx, &query.EventQueryFindCloserResponse[K, N]{ + NodeID: tev.NodeID, + CloserNodes: tev.CloserNodes, + }) + case *EventBootstrapFindCloserFailure[K, N]: + return b.advanceQuery(ctx, &query.EventQueryFindCloserFailure[K, N]{ + NodeID: tev.NodeID, + Error: tev.Error, + }) + + case *EventBootstrapPoll: + // ignore, nothing to do + default: + panic(fmt.Sprintf("unexpected event: %T", tev)) + } + + if b.qry != nil { + return b.advanceQuery(ctx, nil) + } + + return &StateBootstrapIdle{} +} + +func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) BootstrapState { + state := b.qry.Advance(ctx, qev) + switch st := state.(type) { + case *query.StateQueryFindCloser[K, N]: + return &StateBootstrapFindCloser[K, N]{ + QueryID: st.QueryID, + Stats: st.Stats, + NodeID: st.NodeID, + Target: st.Target, + } + case *query.StateQueryFinished: + return &StateBootstrapFinished{ + Stats: st.Stats, + } + case *query.StateQueryWaitingAtCapacity: + elapsed := b.cfg.Clock.Since(st.Stats.Start) + if elapsed > b.cfg.Timeout { + return &StateBootstrapTimeout{ + Stats: st.Stats, + } + } + return &StateBootstrapWaiting{ + Stats: st.Stats, + } + case *query.StateQueryWaitingWithCapacity: + elapsed := b.cfg.Clock.Since(st.Stats.Start) + if elapsed > b.cfg.Timeout { + return &StateBootstrapTimeout{ + Stats: st.Stats, + } + } + return &StateBootstrapWaiting{ + Stats: st.Stats, + } + default: + panic(fmt.Sprintf("unexpected state: %T", st)) + } +} + +// BootstrapState is the state of a bootstrap. +type BootstrapState interface { + bootstrapState() +} + +// StateBootstrapFindCloser indicates that the bootstrap query wants to send a find closer nodes message to a node. +type StateBootstrapFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID query.QueryID + Target K // the key that the query wants to find closer nodes for + NodeID N // the node to send the message to + Stats query.QueryStats +} + +// StateBootstrapIdle indicates that the bootstrap is not running its query. +type StateBootstrapIdle struct{} + +// StateBootstrapFinished indicates that the bootstrap has finished. +type StateBootstrapFinished struct { + Stats query.QueryStats +} + +// StateBootstrapTimeout indicates that the bootstrap query has timed out. +type StateBootstrapTimeout struct { + Stats query.QueryStats +} + +// StateBootstrapWaiting indicates that the bootstrap query is waiting for a response. +type StateBootstrapWaiting struct { + Stats query.QueryStats +} + +// bootstrapState() ensures that only Bootstrap states can be assigned to a BootstrapState. +func (*StateBootstrapFindCloser[K, N]) bootstrapState() {} +func (*StateBootstrapIdle) bootstrapState() {} +func (*StateBootstrapFinished) bootstrapState() {} +func (*StateBootstrapTimeout) bootstrapState() {} +func (*StateBootstrapWaiting) bootstrapState() {} + +// BootstrapEvent is an event intended to advance the state of a bootstrap. +type BootstrapEvent interface { + bootstrapEvent() +} + +// EventBootstrapPoll is an event that signals the bootstrap that it can perform housekeeping work such as time out queries. +type EventBootstrapPoll struct{} + +// EventBootstrapStart is an event that attempts to start a new bootstrap +type EventBootstrapStart[K kad.Key[K], N kad.NodeID[K]] struct { + KnownClosestNodes []N +} + +// EventBootstrapFindCloserResponse notifies a bootstrap that an attempt to find closer nodes has received a successful response. +type EventBootstrapFindCloserResponse[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + CloserNodes []N // the closer nodes sent by the node +} + +// EventBootstrapFindCloserFailure notifies a bootstrap that an attempt to find closer nodes has failed. +type EventBootstrapFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any +} + +// bootstrapEvent() ensures that only events accepted by a [Bootstrap] can be assigned to the [BootstrapEvent] interface. +func (*EventBootstrapPoll) bootstrapEvent() {} +func (*EventBootstrapStart[K, N]) bootstrapEvent() {} +func (*EventBootstrapFindCloserResponse[K, N]) bootstrapEvent() {} +func (*EventBootstrapFindCloserFailure[K, N]) bootstrapEvent() {} diff --git a/v2/coord/routing/bootstrap_test.go b/v2/coord/routing/bootstrap_test.go new file mode 100644 index 00000000..f66ecec0 --- /dev/null +++ b/v2/coord/routing/bootstrap_test.go @@ -0,0 +1,222 @@ +package routing + +import ( + "context" + "testing" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" +) + +func TestBootstrapConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultBootstrapConfig[tiny.Key]() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("timeout positive", func(t *testing.T) { + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.Timeout = 0 + require.Error(t, cfg.Validate()) + cfg.Timeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.RequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.RequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) +} + +func TestBootstrapStartsIdle(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + state := bs.Advance(ctx, &EventBootstrapPoll{}) + require.IsType(t, &StateBootstrapIdle{}, state) +} + +func TestBootstrapStart(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + // start the bootstrap + state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should attempt to contact the node it was given + st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + + // the query should be the one just added + require.Equal(t, query.QueryID("bootstrap"), st.QueryID) + + // the query should attempt to contact the node it was given + require.Equal(t, a, st.NodeID) + + // with the correct key + require.True(t, key.Equal(self.Key(), st.Target)) + + // now the bootstrap reports that it is waiting + state = bs.Advance(ctx, &EventBootstrapPoll{}) + require.IsType(t, &StateBootstrapWaiting{}, state) +} + +func TestBootstrapMessageResponse(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.Clock = clk + + self := tiny.NewNode(tiny.Key(0)) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + + // start the bootstrap + state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // the bootstrap should attempt to contact the node it was given + st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, query.QueryID("bootstrap"), st.QueryID) + require.Equal(t, a, st.NodeID) + + // notify bootstrap that node was contacted successfully, but no closer nodes + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // bootstrap should respond that its query has finished + require.IsType(t, &StateBootstrapFinished{}, state) + + stf := state.(*StateBootstrapFinished) + require.Equal(t, 1, stf.Stats.Requests) + require.Equal(t, 1, stf.Stats.Success) +} + +func TestBootstrapProgress(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig[tiny.Key]() + cfg.Clock = clk + cfg.RequestConcurrency = 3 // 1 less than the 4 nodes to be visited + + self := tiny.NewNode(tiny.Key(0)) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + + // ensure the order of the known nodes + require.True(t, self.Key().Xor(a.Key()).Compare(self.Key().Xor(b.Key())) == -1) + require.True(t, self.Key().Xor(b.Key()).Compare(self.Key().Xor(c.Key())) == -1) + require.True(t, self.Key().Xor(c.Key()).Compare(self.Key().Xor(d.Key())) == -1) + + // start the bootstrap + state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ + KnownClosestNodes: []tiny.Node{d, a, b, c}, + }) + + // the bootstrap should attempt to contact the closest node it was given + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, query.QueryID("bootstrap"), st.QueryID) + require.Equal(t, a, st.NodeID) + + // next the bootstrap attempts to contact second nearest node + state = bs.Advance(ctx, &EventBootstrapPoll{}) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // next the bootstrap attempts to contact third nearest node + state = bs.Advance(ctx, &EventBootstrapPoll{}) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + + // now the bootstrap should be waiting since it is at request capacity + state = bs.Advance(ctx, &EventBootstrapPoll{}) + require.IsType(t, &StateBootstrapWaiting{}, state) + + // notify bootstrap that node was contacted successfully, but no closer nodes + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // now the bootstrap has capacity to contact fourth nearest node + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, d, st.NodeID) + + // notify bootstrap that a node was contacted successfully + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + }) + + // bootstrap should respond that it is waiting for messages + require.IsType(t, &StateBootstrapWaiting{}, state) + + // notify bootstrap that a node was contacted successfully + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: c, + }) + + // bootstrap should respond that it is waiting for last message + require.IsType(t, &StateBootstrapWaiting{}, state) + + // notify bootstrap that the final node was contacted successfully + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: d, + }) + + // bootstrap should respond that its query has finished + require.IsType(t, &StateBootstrapFinished{}, state) + + stf := state.(*StateBootstrapFinished) + require.Equal(t, 4, stf.Stats.Requests) + require.Equal(t, 4, stf.Stats.Success) +} diff --git a/v2/coord/routing/include.go b/v2/coord/routing/include.go new file mode 100644 index 00000000..0830a6b1 --- /dev/null +++ b/v2/coord/routing/include.go @@ -0,0 +1,289 @@ +package routing + +import ( + "context" + "fmt" + "time" + + "github.com/benbjohnson/clock" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + "github.com/plprobelab/go-kademlia/key" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +type check[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N + Started time.Time +} + +type Include[K kad.Key[K], N kad.NodeID[K]] struct { + rt kad.RoutingTable[K, N] + + // checks is an index of checks in progress + checks map[string]check[K, N] + + candidates *nodeQueue[K, N] + + // cfg is a copy of the optional configuration supplied to the Include + cfg IncludeConfig +} + +// IncludeConfig specifies optional configuration for an Include +type IncludeConfig struct { + QueueCapacity int // the maximum number of nodes that can be in the candidate queue + Concurrency int // the maximum number of include checks that may be in progress at any one time + Timeout time.Duration // the time to wait before terminating a check that is not making progress + Clock clock.Clock // a clock that may replaced by a mock when testing +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *IncludeConfig) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "IncludeConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.Concurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "IncludeConfig", + Err: fmt.Errorf("concurrency must be greater than zero"), + } + } + + if cfg.Timeout < 1 { + return &kaderr.ConfigurationError{ + Component: "IncludeConfig", + Err: fmt.Errorf("timeout must be greater than zero"), + } + } + + if cfg.QueueCapacity < 1 { + return &kaderr.ConfigurationError{ + Component: "IncludeConfig", + Err: fmt.Errorf("queue size must be greater than zero"), + } + } + + return nil +} + +// DefaultIncludeConfig returns the default configuration options for an Include. +// Options may be overridden before passing to NewInclude +func DefaultIncludeConfig() *IncludeConfig { + return &IncludeConfig{ + Clock: clock.New(), // use standard time + Concurrency: 3, + Timeout: time.Minute, + QueueCapacity: 128, + } +} + +func NewInclude[K kad.Key[K], N kad.NodeID[K]](rt kad.RoutingTable[K, N], cfg *IncludeConfig) (*Include[K, N], error) { + if cfg == nil { + cfg = DefaultIncludeConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + return &Include[K, N]{ + candidates: newNodeQueue[K, N](cfg.QueueCapacity), + cfg: *cfg, + rt: rt, + checks: make(map[string]check[K, N], cfg.Concurrency), + }, nil +} + +// Advance advances the state of the include state machine by attempting to advance its query if running. +func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeState { + ctx, span := tele.StartSpan(ctx, "Include.Advance") + defer span.End() + + switch tev := ev.(type) { + + case *EventIncludeAddCandidate[K, N]: + // Ignore if already running a check + _, checking := b.checks[key.HexString(tev.NodeID.Key())] + if checking { + break + } + + // Ignore if node already in routing table + if _, exists := b.rt.GetNode(tev.NodeID.Key()); exists { + break + } + + // TODO: potentially time out a check and make room in the queue + if !b.candidates.HasCapacity() { + return &StateIncludeWaitingFull{} + } + b.candidates.Enqueue(ctx, tev.NodeID) + + case *EventIncludeConnectivityCheckSuccess[K, N]: + ch, ok := b.checks[key.HexString(tev.NodeID.Key())] + if ok { + delete(b.checks, key.HexString(tev.NodeID.Key())) + if b.rt.AddNode(tev.NodeID) { + return &StateIncludeRoutingUpdated[K, N]{ + NodeID: ch.NodeID, + } + } + } + case *EventIncludeConnectivityCheckFailure[K, N]: + delete(b.checks, key.HexString(tev.NodeID.Key())) + + case *EventIncludePoll: + // ignore, nothing to do + default: + panic(fmt.Sprintf("unexpected event: %T", tev)) + } + + if len(b.checks) == b.cfg.Concurrency { + if !b.candidates.HasCapacity() { + return &StateIncludeWaitingFull{} + } + return &StateIncludeWaitingAtCapacity{} + } + + candidate, ok := b.candidates.Dequeue(ctx) + if !ok { + // No candidate in queue + if len(b.checks) > 0 { + return &StateIncludeWaitingWithCapacity{} + } + return &StateIncludeIdle{} + } + + b.checks[key.HexString(candidate.Key())] = check[K, N]{ + NodeID: candidate, + Started: b.cfg.Clock.Now(), + } + + // Ask the node to find itself + return &StateIncludeConnectivityCheck[K, N]{ + NodeID: candidate, + } +} + +// nodeQueue is a bounded queue of unique NodeIDs +type nodeQueue[K kad.Key[K], N kad.NodeID[K]] struct { + capacity int + nodes []N + keys map[string]struct{} +} + +func newNodeQueue[K kad.Key[K], N kad.NodeID[K]](capacity int) *nodeQueue[K, N] { + return &nodeQueue[K, N]{ + capacity: capacity, + nodes: make([]N, 0, capacity), + keys: make(map[string]struct{}, capacity), + } +} + +// Enqueue adds a node to the queue. It returns true if the node was +// added and false otherwise. +func (q *nodeQueue[K, N]) Enqueue(ctx context.Context, id N) bool { + if len(q.nodes) == q.capacity { + return false + } + + if _, exists := q.keys[key.HexString(id.Key())]; exists { + return false + } + + q.nodes = append(q.nodes, id) + q.keys[key.HexString(id.Key())] = struct{}{} + return true +} + +// Dequeue reads an node from the queue. It returns the node and a true value +// if a node was read or nil and false if no node was read. +func (q *nodeQueue[K, N]) Dequeue(ctx context.Context) (N, bool) { + if len(q.nodes) == 0 { + var v N + return v, false + } + + var id N + id, q.nodes = q.nodes[0], q.nodes[1:] + delete(q.keys, key.HexString(id.Key())) + + return id, true +} + +func (q *nodeQueue[K, N]) HasCapacity() bool { + return len(q.nodes) < q.capacity +} + +// IncludeState is the state of a include. +type IncludeState interface { + includeState() +} + +// StateIncludeConnectivityCheck indicates that an [Include] is waiting to send a connectivity check to a node. +// A find node message should be sent to the node, with the target being the node's key. +type StateIncludeConnectivityCheck[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node to send the message to +} + +// StateIncludeIdle indicates that an [Include] is not peforming any work or waiting for any responses.. +type StateIncludeIdle struct{} + +// StateIncludeWaitingAtCapacity indicates that an [Include] is waiting for responses for checks and +// that the maximum number of concurrent checks has been reached. +type StateIncludeWaitingAtCapacity struct{} + +// StateIncludeWaitingWithCapacity indicates that an [Include] is waiting for responses for checks +// but has capacity to perform more. +type StateIncludeWaitingWithCapacity struct{} + +// StateIncludeWaitingFull indicates that the include subsystem is waiting for responses for checks and +// that the maximum number of queued candidates has been reached. +type StateIncludeWaitingFull struct{} + +// StateIncludeRoutingUpdated indicates the routing table has been updated with a new node. +type StateIncludeRoutingUpdated[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N +} + +// includeState() ensures that only Include states can be assigned to an IncludeState. +func (*StateIncludeConnectivityCheck[K, N]) includeState() {} +func (*StateIncludeIdle) includeState() {} +func (*StateIncludeWaitingAtCapacity) includeState() {} +func (*StateIncludeWaitingWithCapacity) includeState() {} +func (*StateIncludeWaitingFull) includeState() {} +func (*StateIncludeRoutingUpdated[K, N]) includeState() {} + +// IncludeEvent is an event intended to advance the state of an [Include]. +type IncludeEvent interface { + includeEvent() +} + +// EventIncludePoll is an event that signals an [Include] to perform housekeeping work such as time out queries. +type EventIncludePoll struct{} + +// EventIncludeAddCandidate notifies an [Include] that a node should be added to the candidate list. +type EventIncludeAddCandidate[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the candidate node +} + +// EventIncludeConnectivityCheckSuccess notifies an [Include] that a requested connectivity check has received a successful response. +type EventIncludeConnectivityCheckSuccess[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to +} + +// EventIncludeConnectivityCheckFailure notifies an [Include] that a requested connectivity check has failed. +type EventIncludeConnectivityCheckFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any +} + +// includeEvent() ensures that only events accepted by an [Include] can be assigned to the [IncludeEvent] interface. +func (*EventIncludePoll) includeEvent() {} +func (*EventIncludeAddCandidate[K, N]) includeEvent() {} +func (*EventIncludeConnectivityCheckSuccess[K, N]) includeEvent() {} +func (*EventIncludeConnectivityCheckFailure[K, N]) includeEvent() {} diff --git a/v2/coord/routing/include_test.go b/v2/coord/routing/include_test.go new file mode 100644 index 00000000..41e80e3a --- /dev/null +++ b/v2/coord/routing/include_test.go @@ -0,0 +1,271 @@ +package routing + +import ( + "context" + "testing" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/stretchr/testify/require" +) + +func TestIncludeConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultIncludeConfig() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultIncludeConfig() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("timeout positive", func(t *testing.T) { + cfg := DefaultIncludeConfig() + cfg.Timeout = 0 + require.Error(t, cfg.Validate()) + cfg.Timeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultIncludeConfig() + cfg.Concurrency = 0 + require.Error(t, cfg.Validate()) + cfg.Concurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("queue size positive", func(t *testing.T) { + cfg := DefaultIncludeConfig() + cfg.QueueCapacity = 0 + require.Error(t, cfg.Validate()) + cfg.QueueCapacity = -1 + require.Error(t, cfg.Validate()) + }) +} + +func TestIncludeStartsIdle(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultIncludeConfig() + cfg.Clock = clk + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + + bs, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + state := bs.Advance(ctx, &EventIncludePoll{}) + require.IsType(t, &StateIncludeIdle{}, state) +} + +func TestIncludeAddCandidateStartsCheckIfCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultIncludeConfig() + cfg.Clock = clk + cfg.Concurrency = 1 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + + p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + candidate := tiny.NewNode(tiny.Key(0b00000100)) + + // add a candidate + state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: candidate, + }) + // the state machine should attempt to send a message + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + st := state.(*StateIncludeConnectivityCheck[tiny.Key, tiny.Node]) + + // the message should be sent to the candidate node + require.Equal(t, candidate, st.NodeID) + + // the message should be looking for the candidate node + require.Equal(t, candidate, st.NodeID) + + // now the include reports that it is waiting since concurrency is 1 + state = p.Advance(ctx, &EventIncludePoll{}) + require.IsType(t, &StateIncludeWaitingAtCapacity{}, state) +} + +func TestIncludeAddCandidateReportsCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultIncludeConfig() + cfg.Clock = clk + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + candidate := tiny.NewNode(tiny.Key(0b00000100)) + + // add a candidate + state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: candidate, + }) + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // now the state machine reports that it is waiting with capacity since concurrency + // is greater than the number of checks in flight + state = p.Advance(ctx, &EventIncludePoll{}) + require.IsType(t, &StateIncludeWaitingWithCapacity{}, state) +} + +func TestIncludeAddCandidateOverQueueLength(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultIncludeConfig() + cfg.Clock = clk + cfg.QueueCapacity = 2 // only allow two candidates in the queue + cfg.Concurrency = 3 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + + p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // add a candidate + state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000100)), + }) + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // include reports that it is waiting and has capacity for more + state = p.Advance(ctx, &EventIncludePoll{}) + require.IsType(t, &StateIncludeWaitingWithCapacity{}, state) + + // add second candidate + state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000010)), + }) + // sends a message to the candidate + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // include reports that it is waiting and has capacity for more + state = p.Advance(ctx, &EventIncludePoll{}) + // sends a message to the candidate + require.IsType(t, &StateIncludeWaitingWithCapacity{}, state) + + // add third candidate + state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000011)), + }) + // sends a message to the candidate + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // include reports that it is waiting at capacity since 3 messages are in flight + state = p.Advance(ctx, &EventIncludePoll{}) + require.IsType(t, &StateIncludeWaitingAtCapacity{}, state) + + // add fourth candidate + state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000101)), + }) + + // include reports that it is waiting at capacity since 3 messages are already in flight + require.IsType(t, &StateIncludeWaitingAtCapacity{}, state) + + // add fifth candidate + state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000110)), + }) + + // include reports that it is waiting and the candidate queue is full since it + // is configured to have 3 concurrent checks and 2 queued + require.IsType(t, &StateIncludeWaitingFull{}, state) + + // add sixth candidate + state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000111)), + }) + + // include reports that it is still waiting and the candidate queue is full since it + // is configured to have 3 concurrent checks and 2 queued + require.IsType(t, &StateIncludeWaitingFull{}, state) +} + +func TestIncludeConnectivityCheckSuccess(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultIncludeConfig() + cfg.Clock = clk + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + + p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // add a candidate + state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000100)), + }) + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // notify that node was contacted successfully, with no closer nodes + state = p.Advance(ctx, &EventIncludeConnectivityCheckSuccess[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000100)), + }) + + // should respond that the routing table was updated + require.IsType(t, &StateIncludeRoutingUpdated[tiny.Key, tiny.Node]{}, state) + + st := state.(*StateIncludeRoutingUpdated[tiny.Key, tiny.Node]) + + // the update is for the correct node + require.Equal(t, tiny.NewNode(tiny.Key(4)), st.NodeID) + + // the routing table should contain the node + foundNode, found := rt.GetNode(tiny.Key(4)) + require.True(t, found) + require.NotNil(t, foundNode) + + require.True(t, key.Equal(foundNode.Key(), tiny.Key(4))) + + // advancing again should reports that it is idle + state = p.Advance(ctx, &EventIncludePoll{}) + require.IsType(t, &StateIncludeIdle{}, state) +} + +func TestIncludeConnectivityCheckFailure(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultIncludeConfig() + cfg.Clock = clk + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + + p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // add a candidate + state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000100)), + }) + require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // notify that node was not contacted successfully + state = p.Advance(ctx, &EventIncludeConnectivityCheckFailure[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(0b00000100)), + }) + + // should respond that state machine is idle + require.IsType(t, &StateIncludeIdle{}, state) + + // the routing table should not contain the node + foundNode, found := rt.GetNode(tiny.Key(4)) + require.False(t, found) + require.Zero(t, foundNode) +} diff --git a/v2/coord/routing/probe.go b/v2/coord/routing/probe.go new file mode 100644 index 00000000..fd044036 --- /dev/null +++ b/v2/coord/routing/probe.go @@ -0,0 +1,508 @@ +package routing + +import ( + "container/heap" + "context" + "errors" + "fmt" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + "github.com/plprobelab/go-kademlia/key" + "go.opentelemetry.io/otel/attribute" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +type RoutingTableCpl[K kad.Key[K], N kad.NodeID[K]] interface { + kad.RoutingTable[K, N] + + // Cpl returns the longest common prefix length the supplied key shares with the table's key. + Cpl(kk K) int + + // CplSize returns the number of nodes in the table whose longest common prefix with the table's key is of length cpl. + CplSize(cpl int) int +} + +// The Probe state machine performs regular connectivity checks for nodes in a routing table. +// +// The state machine is notified of a new entry in the routing table via the [EventProbeAdd] event. This adds the node +// to an internal list and sets a time for a check to be performed, based on the current time plus a configurable +// interval. +// +// Connectivity checks are performed in time order, so older nodes are processed first. The connectivity check performed +// is the same as for the [Include] state machine: ask the node for closest nodes to itself and confirm that the node +// returns at least one node in the list of closer nodes. The state machine emits the [StateProbeConnectivityCheck] +// state when it wants to check the status of a node. +// +// The state machine expects to be notified either with the [EventProbeMessageResponse] or the +// [EventProbeMessageFailure] events to determine the outcome of the check. If neither are received within a +// configurable timeout the node is marked as failed. +// +// Nodes that receive a successful response have their next check time updated to the current time plus the configured +// [ProbeConfig.CheckInterval]. +// +// Nodes that fail a connectivity check, or are timed out, are removed from the routing table and from the list of nodes +// to check. The state machine emits the [StateProbeNodeFailure] state to notify callers of this event. +// +// The state machine accepts a [EventProbePoll] event to check for outstanding work such as initiating a new check or +// timing out an existing one. +// +// The [EventProbeRemove] event may be used to remove a node from the check list and from the routing table. +// +// The state machine accepts the [EventProbeNotifyConnectivity] event as a notification that an external system has +// performed a suitable connectivity check, such as when the node responds to a query. The probe state machine treats +// these events as if a successful response had been received from a check by advancing the time of the next check. +type Probe[K kad.Key[K], N kad.NodeID[K]] struct { + rt RoutingTableCpl[K, N] + + // nvl is a list of nodes with information about their connectivity checks + // TODO: this will be expanded with more general scoring information related to their utility + nvl *nodeValueList[K, N] + + // cfg is a copy of the optional configuration supplied to the Probe + cfg ProbeConfig +} + +// ProbeConfig specifies optional configuration for a Probe +type ProbeConfig struct { + CheckInterval time.Duration // the minimum time interval between checks for a node + Concurrency int // the maximum number of probe checks that may be in progress at any one time + Timeout time.Duration // the time to wait before terminating a check that is not making progress + Clock clock.Clock // a clock that may be replaced by a mock when testing +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *ProbeConfig) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "ProbeConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.Concurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "ProbeConfig", + Err: fmt.Errorf("concurrency must be greater than zero"), + } + } + + if cfg.Timeout < 1 { + return &kaderr.ConfigurationError{ + Component: "ProbeConfig", + Err: fmt.Errorf("timeout must be greater than zero"), + } + } + + if cfg.CheckInterval < 1 { + return &kaderr.ConfigurationError{ + Component: "ProbeConfig", + Err: fmt.Errorf("revisit interval must be greater than zero"), + } + } + + return nil +} + +// DefaultProbeConfig returns the default configuration options for a Probe. +// Options may be overridden before passing to NewProbe +func DefaultProbeConfig() *ProbeConfig { + return &ProbeConfig{ + Clock: clock.New(), // use standard time + Concurrency: 3, // MAGIC + Timeout: time.Minute, // MAGIC + CheckInterval: 6 * time.Hour, // MAGIC + } +} + +func NewProbe[K kad.Key[K], N kad.NodeID[K]](rt RoutingTableCpl[K, N], cfg *ProbeConfig) (*Probe[K, N], error) { + if cfg == nil { + cfg = DefaultProbeConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + return &Probe[K, N]{ + cfg: *cfg, + rt: rt, + nvl: NewNodeValueList[K, N](), + }, nil +} + +// Advance advances the state of the probe state machine by attempting to advance its query if running. +func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { + _, span := tele.StartSpan(ctx, "Probe.Advance") + defer span.End() + + switch tev := ev.(type) { + case *EventProbePoll: + // ignore, nothing to do + span.SetAttributes(tele.AttrEvent("EventProbePoll")) + case *EventProbeAdd[K, N]: + // check presence in routing table + span.SetAttributes(tele.AttrEvent("EventProbeAdd"), attribute.String("nodeid", tev.NodeID.String())) + if _, found := p.rt.GetNode(tev.NodeID.Key()); !found { + // ignore if not in routing table + span.RecordError(errors.New("node not in routing table")) + break + } + + // add a node to the value list + nv := &nodeValue[K, N]{ + NodeID: tev.NodeID, + NextCheckDue: p.cfg.Clock.Now().Add(p.cfg.CheckInterval), + Cpl: p.rt.Cpl(tev.NodeID.Key()), + } + // TODO: if node was in ongoing list return a state that can signal the caller to cancel any prior outbound message + p.nvl.Put(nv) + case *EventProbeRemove[K, N]: + span.SetAttributes(tele.AttrEvent("EventProbeRemove"), attribute.String("nodeid", tev.NodeID.String())) + p.rt.RemoveKey(tev.NodeID.Key()) + p.nvl.Remove(tev.NodeID) + return &StateProbeNodeFailure[K, N]{ + NodeID: tev.NodeID, + } + case *EventProbeConnectivityCheckSuccess[K, N]: + span.SetAttributes(tele.AttrEvent("EventProbeMessageResponse"), attribute.String("nodeid", tev.NodeID.String())) + nv, found := p.nvl.Get(tev.NodeID) + if !found { + // ignore message for unknown node, which might have been removed + span.RecordError(errors.New("node not in node value list")) + break + } + // update next check time + nv.NextCheckDue = p.cfg.Clock.Now().Add(p.cfg.CheckInterval) + + // put into list, which will clear any ongoing check too + p.nvl.Put(nv) + + case *EventProbeConnectivityCheckFailure[K, N]: + // probe failed, so remove from routing table and from list + span.SetAttributes(tele.AttrEvent("EventProbeMessageFailure"), attribute.String("nodeid", tev.NodeID.String())) + span.RecordError(tev.Error) + p.rt.RemoveKey(tev.NodeID.Key()) + p.nvl.Remove(tev.NodeID) + return &StateProbeNodeFailure[K, N]{ + NodeID: tev.NodeID, + } + case *EventProbeNotifyConnectivity[K, N]: + span.SetAttributes(tele.AttrEvent("EventProbeNotifyConnectivity"), attribute.String("nodeid", tev.NodeID.String())) + nv, found := p.nvl.Get(tev.NodeID) + if !found { + // ignore message for unknown node, which might have been removed + break + } + // update next check time + nv.NextCheckDue = p.cfg.Clock.Now().Add(p.cfg.CheckInterval) + + // put into list, which will clear any ongoing check too + p.nvl.Put(nv) + + default: + panic(fmt.Sprintf("unexpected event: %T", tev)) + } + + // Check if there is capacity + if p.cfg.Concurrency <= p.nvl.OngoingCount() { + // see if a check can be timed out to free capacity + candidate, found := p.nvl.FindCheckPastDeadline(p.cfg.Clock.Now()) + if !found { + // nothing suitable for time out + return &StateProbeWaitingAtCapacity{} + } + + // mark the node as failed since it timed out + p.rt.RemoveKey(candidate.Key()) + p.nvl.Remove(candidate) + return &StateProbeNodeFailure[K, N]{ + NodeID: candidate, + } + + } + + // there is capacity to start a new check + next, ok := p.nvl.PeekNext(p.cfg.Clock.Now()) + if !ok { + if p.nvl.OngoingCount() > 0 { + // waiting for a check but nothing else to do + return &StateProbeWaitingWithCapacity{} + } + // nothing happening and nothing to do + return &StateProbeIdle{} + } + + p.nvl.MarkOngoing(next.NodeID, p.cfg.Clock.Now().Add(p.cfg.Timeout)) + + // Ask the node to find itself + return &StateProbeConnectivityCheck[K, N]{ + NodeID: next.NodeID, + } +} + +// ProbeState is the state of the [Probe] state machine. +type ProbeState interface { + probeState() +} + +// StateProbeConnectivityCheck indicates that the probe subsystem is waiting to send a connectivity check to a node. +// A find node message should be sent to the node, with the target being the node's key. +type StateProbeConnectivityCheck[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node to send the message to +} + +// StateProbeIdle indicates that the probe state machine is not running any checks. +type StateProbeIdle struct{} + +// StateProbeWaitingAtCapacity indicates that the probe state machine is waiting for responses for checks and +// the maximum number of concurrent checks has been reached. +type StateProbeWaitingAtCapacity struct{} + +// StateProbeWaitingWithCapacity indicates that the probe state machine is waiting for responses for checks +// but has capacity to perform more. +type StateProbeWaitingWithCapacity struct{} + +// StateProbeNodeFailure indicates a node has failed a connectivity check been removed from the routing table and the probe list +type StateProbeNodeFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N +} + +// probeState() ensures that only Probe states can be assigned to the ProbeState interface. +func (*StateProbeConnectivityCheck[K, N]) probeState() {} +func (*StateProbeIdle) probeState() {} +func (*StateProbeWaitingAtCapacity) probeState() {} +func (*StateProbeWaitingWithCapacity) probeState() {} +func (*StateProbeNodeFailure[K, N]) probeState() {} + +// ProbeEvent is an event intended to advance the state of a probe. +type ProbeEvent interface { + probeEvent() +} + +// EventProbePoll is an event that signals the probe that it can perform housekeeping work such as time out queries. +type EventProbePoll struct{} + +// EventProbeAdd notifies a probe that a node should be added to its list of nodes. +type EventProbeAdd[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node to be probed +} + +// EventProbeRemove notifies a probe that a node should be removed from its list of nodes and the routing table. +type EventProbeRemove[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node to be removed +} + +// EventProbeConnectivityCheckSuccess notifies a [Probe] that a requested connectivity check has received a successful response. +type EventProbeConnectivityCheckSuccess[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to +} + +// EventProbeConnectivityCheckFailure notifies a [Probe] that a requested connectivity check has failed. +type EventProbeConnectivityCheckFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any +} + +// EventProbeNotifyConnectivity notifies a probe that a node has confirmed connectivity from another source such as a query. +type EventProbeNotifyConnectivity[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N +} + +// probeEvent() ensures that only events accepted by a [Probe] can be assigned to the [ProbeEvent] interface. +func (*EventProbePoll) probeEvent() {} +func (*EventProbeAdd[K, N]) probeEvent() {} +func (*EventProbeRemove[K, N]) probeEvent() {} +func (*EventProbeConnectivityCheckSuccess[K, N]) probeEvent() {} +func (*EventProbeConnectivityCheckFailure[K, N]) probeEvent() {} +func (*EventProbeNotifyConnectivity[K, N]) probeEvent() {} + +type nodeValue[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N + Cpl int // the longest common prefix length the node shares with the routing table's key + NextCheckDue time.Time + CheckDeadline time.Time + Index int // the index of the item in the ordering +} + +type nodeValueEntry[K kad.Key[K], N kad.NodeID[K]] struct { + nv *nodeValue[K, N] + index int // the index of the item in the ordering +} + +type nodeValueList[K kad.Key[K], N kad.NodeID[K]] struct { + nodes map[string]*nodeValueEntry[K, N] + pending *nodeValuePendingList[K, N] + // ongoing is a list of nodes with ongoing/in-progress probes, loosely ordered earliest to most recent + ongoing []N +} + +func NewNodeValueList[K kad.Key[K], N kad.NodeID[K]]() *nodeValueList[K, N] { + return &nodeValueList[K, N]{ + nodes: make(map[string]*nodeValueEntry[K, N]), + ongoing: make([]N, 0), + pending: new(nodeValuePendingList[K, N]), + } +} + +// Put adds a node value to the list, replacing any existing value. +// It is added to the pending list and removed from the ongoing list if it was already present there. +func (l *nodeValueList[K, N]) Put(nv *nodeValue[K, N]) { + mk := key.HexString(nv.NodeID.Key()) + nve, exists := l.nodes[mk] + if !exists { + nve = &nodeValueEntry[K, N]{ + nv: nv, + } + } else { + nve.nv = nv + heap.Remove(l.pending, nve.index) + } + heap.Push(l.pending, nve) + l.nodes[mk] = nve + heap.Fix(l.pending, nve.index) + l.removeFromOngoing(nv.NodeID) +} + +func (l *nodeValueList[K, N]) Get(n N) (*nodeValue[K, N], bool) { + mk := key.HexString(n.Key()) + nve, found := l.nodes[mk] + if !found { + return nil, false + } + return nve.nv, true +} + +func (l *nodeValueList[K, N]) PendingCount() int { + return len(*l.pending) +} + +func (l *nodeValueList[K, N]) OngoingCount() int { + return len(l.ongoing) +} + +func (l *nodeValueList[K, N]) NodeCount() int { + return len(l.nodes) +} + +// Put removes a node value from the list, deleting its information. +// It is removed from the pending list andongoing list if it was already present in either. +func (l *nodeValueList[K, N]) Remove(n N) { + mk := key.HexString(n.Key()) + nve, ok := l.nodes[mk] + if !ok { + return + } + delete(l.nodes, mk) + if nve.index >= 0 { + heap.Remove(l.pending, nve.index) + } + l.removeFromOngoing(n) +} + +// FindCheckPastDeadline looks for the first node in the ongoing list whose deadline is +// before the supplied timestamp. +func (l *nodeValueList[K, N]) FindCheckPastDeadline(ts time.Time) (N, bool) { + // ongoing is in start time order, oldest first + for _, n := range l.ongoing { + mk := key.HexString(n.Key()) + nve, ok := l.nodes[mk] + if !ok { + // somehow the node doesn't exist so this is an obvious candidate for removal + return n, true + } + if !nve.nv.CheckDeadline.After(ts) { + return n, true + } + } + var v N + return v, false +} + +func (l *nodeValueList[K, N]) removeFromOngoing(n N) { + // ongoing list is expected to be small, so linear search is ok + for i := range l.ongoing { + if key.Equal(n.Key(), l.ongoing[i].Key()) { + if len(l.ongoing) > 1 { + // swap with last entry + l.ongoing[i], l.ongoing[len(l.ongoing)-1] = l.ongoing[len(l.ongoing)-1], l.ongoing[i] + } + // remove last entry + var v N + l.ongoing[len(l.ongoing)-1] = v + l.ongoing = l.ongoing[:len(l.ongoing)-1] + return + } + } +} + +// PeekNext returns the next node that is due a connectivity check without removing it +// from the pending list. +func (l *nodeValueList[K, N]) PeekNext(ts time.Time) (*nodeValue[K, N], bool) { + if len(*l.pending) == 0 { + return nil, false + } + + nve := (*l.pending)[0] + + // Is the check due yet? + if nve.nv.NextCheckDue.After(ts) { + return nil, false + } + + return (*l.pending)[0].nv, true +} + +// MarkOngoing marks a node as having an ongoing connectivity check. +// It has no effect if the node is not already present in the list. +func (l *nodeValueList[K, N]) MarkOngoing(n N, deadline time.Time) { + mk := key.HexString(n.Key()) + nve, ok := l.nodes[mk] + if !ok { + return + } + nve.nv.CheckDeadline = deadline + l.nodes[mk] = nve + heap.Remove(l.pending, nve.index) + l.ongoing = append(l.ongoing, nve.nv.NodeID) +} + +// nodeValuePendingList is a min-heap of NodeValue ordered by NextCheckDue +type nodeValuePendingList[K kad.Key[K], N kad.NodeID[K]] []*nodeValueEntry[K, N] + +func (o nodeValuePendingList[K, N]) Len() int { return len(o) } +func (o nodeValuePendingList[K, N]) Less(i, j int) bool { + // if due times are equal, then sort higher cpls first + if o[i].nv.NextCheckDue.Equal(o[j].nv.NextCheckDue) { + return o[i].nv.Cpl > o[j].nv.Cpl + } + + return o[i].nv.NextCheckDue.Before(o[j].nv.NextCheckDue) +} + +func (o nodeValuePendingList[K, N]) Swap(i, j int) { + o[i], o[j] = o[j], o[i] + o[i].index = i + o[j].index = j +} + +func (o *nodeValuePendingList[K, N]) Push(x any) { + n := len(*o) + v := x.(*nodeValueEntry[K, N]) + v.index = n + *o = append(*o, v) +} + +func (o *nodeValuePendingList[K, N]) Pop() any { + if len(*o) == 0 { + return nil + } + old := *o + n := len(old) + v := old[n-1] + old[n-1] = nil + v.index = -1 + *o = old[0 : n-1] + return v +} diff --git a/v2/coord/routing/probe_test.go b/v2/coord/routing/probe_test.go new file mode 100644 index 00000000..872b2f50 --- /dev/null +++ b/v2/coord/routing/probe_test.go @@ -0,0 +1,841 @@ +package routing + +import ( + "container/heap" + "context" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" +) + +var _ heap.Interface = (*nodeValuePendingList[tiny.Key, tiny.Node])(nil) + +func TestProbeConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultProbeConfig() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("timeout positive", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Timeout = 0 + require.Error(t, cfg.Validate()) + cfg.Timeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Concurrency = 0 + require.Error(t, cfg.Validate()) + cfg.Concurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("revisit interval positive", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.CheckInterval = 0 + require.Error(t, cfg.Validate()) + cfg.CheckInterval = -1 + require.Error(t, cfg.Validate()) + }) +} + +func TestProbeStartsIdle(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultProbeConfig() + cfg.Clock = clk + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + + bs, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + state := bs.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeIdle{}, state) +} + +func TestProbeAddChecksPresenceInRoutingTable(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency to allow one check to run + cfg.Concurrency = 1 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // Add node that isn't in routing table + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + + // remains idle since probes aren't run unless node in routing table + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeIdle{}, state) +} + +func TestProbeAddStartsCheckIfCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency to allow one check to run + cfg.Concurrency = 1 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // remains idle + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the probe state machine should attempt to contact the next node + st := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + + // the connectivity check should be for the right node + require.True(t, key.Equal(tiny.Key(4), st.NodeID.Key())) +} + +func TestProbeAddManyStartsChecksIfCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency lower than the number of nodes + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt.AddNode(tiny.NewNode(tiny.Key(3))) + rt.AddNode(tiny.NewNode(tiny.Key(2))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // after adding second node the probe should still be idle since the + // connectivity check will be scheduled for the future + state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(3)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // after adding third node the probe should still be idle since the + // connectivity check will be scheduled for the future + state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(2)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + + // Poll the state machine, it should now attempt to contact a node + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the connectivity check should be for the right node + st := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + require.True(t, key.Equal(tiny.Key(4), st.NodeID.Key())) + + // Poll the state machine, it should now attempt to contact another node + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the connectivity check should be for the right node + st = state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + require.True(t, key.Equal(tiny.Key(2), st.NodeID.Key())) + + // Poll the state machine, it should now be at capacity + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeWaitingAtCapacity{}, state) +} + +func TestProbeAddReportsCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency to allow more than one check to run + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // remains idle + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the probe state machine should attempt to contact the next node + st := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + + // the connectivity check should be for the right node + require.True(t, key.Equal(tiny.Key(4), st.NodeID.Key())) + + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeWaitingWithCapacity{}, state) +} + +func TestProbeRemoveDeletesNodeValue(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency to allow more than one check to run + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // remove the node + state = sm.Advance(ctx, &EventProbeRemove[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + + // state indicate that node failed + require.IsType(t, &StateProbeNodeFailure[tiny.Key, tiny.Node]{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + + // state remains idle since there are no nodes to probe + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeIdle{}, state) +} + +func TestNodeValueList(t *testing.T) { + t.Run("put new", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + + l.Put(nv) + + got, found := l.Get(tiny.NewNode(tiny.Key(4))) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) + }) + + t.Run("put replace before", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + + l.Put(nv1) + + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now().Add(-time.Minute), + } + l.Put(nv2) + + got, found := l.Get(tiny.NewNode(tiny.Key(4))) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) + require.Equal(t, nv2.NextCheckDue, got.NextCheckDue) + }) + + t.Run("put replace after", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + + l.Put(nv1) + + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now().Add(time.Minute), + } + l.Put(nv2) + + got, found := l.Get(tiny.NewNode(tiny.Key(4))) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) + require.Equal(t, nv2.NextCheckDue, got.NextCheckDue) + }) + + t.Run("remove existing", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + + l.Put(nv) + + require.Equal(t, 1, l.PendingCount()) + require.Equal(t, 1, l.NodeCount()) + + _, found := l.Get(tiny.NewNode(tiny.Key(4))) + require.True(t, found) + + l.Remove(tiny.NewNode(tiny.Key(4))) + _, found = l.Get(tiny.NewNode(tiny.Key(4))) + require.False(t, found) + + require.Equal(t, 0, l.PendingCount()) + require.Equal(t, 0, l.NodeCount()) + }) + + t.Run("remove not-existing", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + + l.Put(nv) + + l.Remove(tiny.NewNode(tiny.Key(5))) + _, found := l.Get(tiny.NewNode(tiny.Key(4))) + require.True(t, found) + }) + + t.Run("next empty list", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + got, found := l.PeekNext(clk.Now()) + require.False(t, found) + require.Nil(t, got) + }) + + t.Run("next one entry", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + l.Put(nv) + + got, found := l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) + }) + + t.Run("next sorts by next check due", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(5)), + NextCheckDue: clk.Now().Add(-time.Minute), + } + l.Put(nv1) + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now().Add(-2 * time.Minute), + } + l.Put(nv2) + + got, found := l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), nv2.NodeID.Key())) + + nv2.NextCheckDue = clk.Now() + l.Put(nv2) + + got, found = l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), nv1.NodeID.Key())) + }) + + t.Run("next sorts by cpl descending after time", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(5)), + Cpl: 1, + NextCheckDue: clk.Now().Add(-time.Minute), + } + l.Put(nv1) + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + Cpl: 2, + NextCheckDue: clk.Now().Add(-time.Minute), + } + l.Put(nv2) + + got, found := l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), nv2.NodeID.Key())) + + nv2.NextCheckDue = clk.Now() + l.Put(nv2) + + got, found = l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), nv1.NodeID.Key())) + }) + + t.Run("next not due", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(5)), + NextCheckDue: clk.Now().Add(time.Minute), + } + l.Put(nv1) + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now().Add(2 * time.Minute), + } + l.Put(nv2) + + got, found := l.PeekNext(clk.Now()) + require.False(t, found) + require.Nil(t, got) + }) + + t.Run("mark ongoing", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(5)), + NextCheckDue: clk.Now().Add(time.Minute), + } + l.Put(nv1) + require.Equal(t, 1, l.PendingCount()) + require.Equal(t, 0, l.OngoingCount()) + require.Equal(t, 1, l.NodeCount()) + + l.MarkOngoing(tiny.NewNode(tiny.Key(5)), clk.Now().Add(time.Minute)) + require.Equal(t, 0, l.PendingCount()) + require.Equal(t, 1, l.OngoingCount()) + require.Equal(t, 1, l.NodeCount()) + }) + + t.Run("mark ongoing changes next", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(5)), + NextCheckDue: clk.Now().Add(-2 * time.Minute), + } + l.Put(nv1) + + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now().Add(-1 * time.Minute), + } + l.Put(nv2) + + require.Equal(t, 2, l.PendingCount()) + require.Equal(t, 0, l.OngoingCount()) + require.Equal(t, 2, l.NodeCount()) + + // nv1 is the next node due + got, found := l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), nv1.NodeID.Key())) + + l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) + require.Equal(t, 1, l.PendingCount()) + require.Equal(t, 1, l.OngoingCount()) + require.Equal(t, 2, l.NodeCount()) + + // nv2 is now the next node due + got, found = l.PeekNext(clk.Now()) + require.True(t, found) + require.True(t, key.Equal(got.NodeID.Key(), nv2.NodeID.Key())) + }) + + t.Run("put removes from ongoing", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + NextCheckDue: clk.Now(), + } + l.Put(nv1) + + require.Equal(t, 1, l.PendingCount()) + require.Equal(t, 0, l.OngoingCount()) + require.Equal(t, 1, l.NodeCount()) + + l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) + + require.Equal(t, 0, l.PendingCount()) + require.Equal(t, 1, l.OngoingCount()) + require.Equal(t, 1, l.NodeCount()) + + l.Put(nv1) + + require.Equal(t, 1, l.PendingCount()) + require.Equal(t, 0, l.OngoingCount()) + require.Equal(t, 1, l.NodeCount()) + }) +} + +func TestProbeConnectivityCheckSuccess(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency to allow more than one check to run + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the probe state machine should attempt to contact the next node + st := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + + // notify that node was contacted successfully, with no closer nodes + state = sm.Advance(ctx, &EventProbeConnectivityCheckSuccess[tiny.Key, tiny.Node]{ + NodeID: st.NodeID, + }) + + // node remains in routing table + _, found := rt.GetNode(tiny.Key(4)) + require.True(t, found) + + // state machine now idle + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by another revisit interval + clk.Add(cfg.CheckInterval) + + // the probe state machine should attempt to contact node again, now it is time + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the connectivity check should be for the right node + require.True(t, key.Equal(tiny.Key(4), st.NodeID.Key())) + + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeWaitingWithCapacity{}, state) +} + +func TestProbeConnectivityCheckFailure(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + + // Set concurrency to allow more than one check to run + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by one revisit interval + clk.Add(cfg.CheckInterval) + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the probe state machine should attempt to contact the next node + st := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + + // notify that node was contacted successfully, with no closer nodes + state = sm.Advance(ctx, &EventProbeConnectivityCheckFailure[tiny.Key, tiny.Node]{ + NodeID: st.NodeID, + }) + + // state machine announces node failure + require.IsType(t, &StateProbeNodeFailure[tiny.Key, tiny.Node]{}, state) + stf := state.(*StateProbeNodeFailure[tiny.Key, tiny.Node]) + + // the failure should be for the right node + require.True(t, key.Equal(tiny.Key(4), stf.NodeID.Key())) + + // node has been removed from routing table + _, found := rt.GetNode(tiny.Key(4)) + require.False(t, found) + + // advance time by another revisit interval + clk.Add(cfg.CheckInterval) + + // state machine still idle since node was removed + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeIdle{}, state) +} + +func TestProbeNotifyConnectivity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + cfg.Concurrency = 2 + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt.AddNode(tiny.NewNode(tiny.Key(3))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // after adding first node the probe should be idle since the + // connectivity check will be scheduled for the future (t0+10) + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + + // not time for a check yet + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by less than the revisit interval + // time is now (t0+2) + clk.Add(2 * time.Minute) + + // add a second node, which will be second in the probe list since it's + // time of next check will be later (t0+2+10=t0+12) + state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(3)), + }) + + // still not time for a check + require.IsType(t, &StateProbeIdle{}, state) + + // advance time past the first node's check time but before the second node's + // time is now (t0+2+9=t0+11) + clk.Add(9 * time.Minute) + + // notify that the node with key 4 was connected to successfully by another process + // this will delay the time for the next check to t0+11+10=to+21 + state = sm.Advance(ctx, &EventProbeNotifyConnectivity[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + + // still not time for a check + require.IsType(t, &StateProbeIdle{}, state) + + // advance time past second node's check time + // time is now (t0+2+9+4=t0+15) + clk.Add(4 * time.Minute) + + // Poll the state machine, it should now attempt to contact a node + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + + // the connectivity check should be for the right node, which is the one + // that did not get a connectivity notification + st := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + require.True(t, key.Equal(tiny.Key(3), st.NodeID.Key())) + + // Poll the state machine, it should now waiting for a response but still have capacity + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeWaitingWithCapacity{}, state) +} + +func TestProbeTimeout(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultProbeConfig() + cfg.Clock = clk + cfg.CheckInterval = 10 * time.Minute + cfg.Timeout = 3 * time.Minute + cfg.Concurrency = 1 // one probe at a time, timeouts will be used to free capacity if there are more requests + + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt.AddNode(tiny.NewNode(tiny.Key(3))) + + sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) + require.NoError(t, err) + + // add a node + state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(4)), + }) + + // not time for a check yet + require.IsType(t, &StateProbeIdle{}, state) + + // advance time a little + clk.Add(time.Minute) + + // add another node + state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(tiny.Key(3)), + }) + + // not time for a check yet + require.IsType(t, &StateProbeIdle{}, state) + + // advance time by check interval + clk.Add(cfg.CheckInterval) + + // poll state machine + state = sm.Advance(ctx, &EventProbePoll{}) + + // the connectivity check should start + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + stm := state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + require.True(t, key.Equal(tiny.Key(4), stm.NodeID.Key())) + + // Poll the state machine, it should now waiting for a response with no capacity + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeWaitingAtCapacity{}, state) + + // advance time past the timeout + clk.Add(cfg.Timeout) + + // state machine announces node failure + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeNodeFailure[tiny.Key, tiny.Node]{}, state) + stf := state.(*StateProbeNodeFailure[tiny.Key, tiny.Node]) + + // the failure should be for the right node + require.True(t, key.Equal(tiny.Key(4), stf.NodeID.Key())) + + // node has been removed from routing table + _, found := rt.GetNode(tiny.Key(4)) + require.False(t, found) + + // state machine starts check for next node now there is capacity + state = sm.Advance(ctx, &EventProbePoll{}) + require.IsType(t, &StateProbeConnectivityCheck[tiny.Key, tiny.Node]{}, state) + stm = state.(*StateProbeConnectivityCheck[tiny.Key, tiny.Node]) + require.True(t, key.Equal(tiny.Key(3), stm.NodeID.Key())) +} diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index b3375b57..7c1e87fd 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -9,14 +9,13 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/network/address" - "github.com/plprobelab/go-kademlia/query" - "github.com/plprobelab/go-kademlia/routing" "github.com/stretchr/testify/require" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" @@ -29,7 +28,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) @@ -52,10 +51,8 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { routingBehaviour.Notify(ctx, ev) // the event that should be passed to the bootstrap state machine - expected := &routing.EventBootstrapStart[KadKey, ma.Multiaddr]{ - ProtocolID: ev.ProtocolID, - Message: ev.Message, - KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), + expected := &routing.EventBootstrapStart[kadt.Key, kadt.PeerID]{ + KnownClosestNodes: SliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), } require.Equal(t, expected, bootstrap.Received) } @@ -67,7 +64,7 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) @@ -86,11 +83,11 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { routingBehaviour.Notify(ctx, ev) // bootstrap should receive message response event - require.IsType(t, &routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]{}, bootstrap.Received) + require.IsType(t, &routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]{}, bootstrap.Received) - rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]) - require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) - require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) + rev := bootstrap.Received.(*routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]) + require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) + require.Equal(t, SliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), rev.CloserNodes) } func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { @@ -100,7 +97,7 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) @@ -120,10 +117,10 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { routingBehaviour.Notify(ctx, ev) // bootstrap should receive message response event - require.IsType(t, &routing.EventBootstrapMessageFailure[KadKey]{}, bootstrap.Received) + require.IsType(t, &routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]{}, bootstrap.Received) - rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[KadKey]) - require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) + rev := bootstrap.Received.(*routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]) + require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) require.Equal(t, failure, rev.Error) } @@ -134,7 +131,7 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -151,8 +148,8 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { routingBehaviour.Notify(ctx, ev) // the event that should be passed to the include state machine - expected := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ - NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, + expected := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.NodeInfo.ID), } require.Equal(t, expected, include.Received) } @@ -164,7 +161,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -184,11 +181,10 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { routingBehaviour.Notify(ctx, ev) // include should receive message response event - require.IsType(t, &routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]{}, include.Received) + require.IsType(t, &routing.EventIncludeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]{}, include.Received) - rev := include.Received.(*routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]) - require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) - require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) + rev := include.Received.(*routing.EventIncludeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]) + require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) } func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { @@ -198,7 +194,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -219,10 +215,10 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { routingBehaviour.Notify(ctx, ev) // include should receive message response event - require.IsType(t, &routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]{}, include.Received) + require.IsType(t, &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{}, include.Received) - rev := include.Received.(*routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]) - require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) + rev := include.Received.(*routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]) + require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) require.Equal(t, failure, rev.Error) } @@ -233,18 +229,18 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := nodes[0].NodeInfo.ID + self := kadt.PeerID(nodes[0].NodeInfo.ID) rt := nodes[0].RoutingTable includeCfg := routing.DefaultIncludeConfig() includeCfg.Clock = clk - include, err := routing.NewInclude[KadKey, ma.Multiaddr](rt, includeCfg) + include, err := routing.NewInclude[kadt.Key, kadt.PeerID](rt, includeCfg) require.NoError(t, err) probeCfg := routing.DefaultProbeConfig() probeCfg.Clock = clk probeCfg.CheckInterval = 5 * time.Minute - probe, err := routing.NewProbe[KadKey, ma.Multiaddr](rt, probeCfg) + probe, err := routing.NewProbe[kadt.Key](rt, probeCfg) require.NoError(t, err) // ensure bootstrap is always idle diff --git a/v2/dht.go b/v2/dht.go index 5191349f..db0e0b63 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -13,12 +13,11 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/routing" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -42,7 +41,7 @@ type DHT struct { // rt holds a reference to the routing table implementation. This can be // configured via the Config struct. - rt routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] + rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID] // backends backends map[string]Backend @@ -155,7 +154,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { coordCfg.MeterProvider = cfg.MeterProvider coordCfg.TracerProvider = cfg.TracerProvider - d.kad, err = coord.NewCoordinator(d.host.ID(), &Router{host: h}, d.rt, coordCfg) + d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &Router{host: h}, d.rt, coordCfg) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } @@ -309,12 +308,17 @@ func (d *DHT) AddAddresses(ctx context.Context, ais []peer.AddrInfo, ttl time.Du ctx, span := d.tele.Tracer.Start(ctx, "DHT.AddAddresses") defer span.End() - return d.kad.AddNodes(ctx, ais, ttl) + ps := d.host.Peerstore() + for _, ai := range ais { + ps.AddAddrs(ai.ID, ai.Addrs, ttl) + } + + return d.kad.AddNodes(ctx, ais) } -// newSHA256Key returns a [key.Key256] that conforms to the [kad.Key] interface by -// SHA256 hashing the given bytes and wrapping them in a [key.Key256]. -func newSHA256Key(data []byte) key.Key256 { +// newSHA256Key returns a [kadt.KadKey] that conforms to the [kad.Key] interface by +// SHA256 hashing the given bytes and wrapping them in a [kadt.KadKey]. +func newSHA256Key(data []byte) kadt.Key { h := sha256.Sum256(data) return key.NewKey256(h[:]) } diff --git a/v2/handlers.go b/v2/handlers.go index 5339c3fa..5243a79d 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -10,7 +10,6 @@ import ( record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" @@ -233,7 +232,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. -func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { +func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target kadt.Key) []*pb.Message_Peer { _, span := d.tele.Tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) defer span.End() @@ -245,7 +244,7 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 // pre-allocated the result set slice. filtered := make([]*pb.Message_Peer, 0, len(peers)) for _, p := range peers { - pid := peer.ID(p.(kadt.PeerID)) // TODO: type cast + pid := peer.ID(p) // TODO: type cast // check for own peer ID if pid == d.host.ID() { diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index d71ecc39..9de3e6e9 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -13,18 +13,22 @@ import ( "github.com/plprobelab/go-kademlia/key" ) +// Key is a type alias for the type of key that's used with this DHT +// implementation. +type Key = key.Key256 + // PeerID is a type alias for [peer.ID] that implements the [kad.NodeID] // interface. This means we can use PeerID for any operation that interfaces // with go-kademlia. type PeerID peer.ID // assertion that PeerID implements the kad.NodeID interface -var _ kad.NodeID[key.Key256] = PeerID("") +var _ kad.NodeID[Key] = PeerID("") -// Key returns the Kademlia [key.Key256] of PeerID. The amino DHT operates on +// Key returns the Kademlia [KadKey] of PeerID. The amino DHT operates on // SHA256 hashes of, in this case, peer.IDs. This means this Key method takes // the [peer.ID], hashes it and constructs a 256-bit key. -func (p PeerID) Key() key.Key256 { +func (p PeerID) Key() Key { h := sha256.Sum256([]byte(p)) return key.NewKey256(h[:]) } @@ -46,10 +50,10 @@ type AddrInfo struct { } // assertion that AddrInfo implements the [kad.NodeInfo] interface -var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*AddrInfo)(nil) +var _ kad.NodeInfo[Key, ma.Multiaddr] = (*AddrInfo)(nil) // ID returns the [kad.NodeID] of this peer's information struct. -func (ai AddrInfo) ID() kad.NodeID[key.Key256] { +func (ai AddrInfo) ID() kad.NodeID[Key] { return PeerID(ai.Info.ID) } diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go index 14b7f6d0..b0bf4ef0 100644 --- a/v2/pb/msg.aux.go +++ b/v2/pb/msg.aux.go @@ -15,14 +15,14 @@ import ( // this file contains auxiliary methods to augment the protobuf generated types. // It is used to let these types conform to interfaces or add convenience methods. -var _ kad.Request[key.Key256, ma.Multiaddr] = (*Message)(nil) +var _ kad.Request[kadt.Key, ma.Multiaddr] = (*Message)(nil) -func (m *Message) Target() key.Key256 { +func (m *Message) Target() kadt.Key { b := sha256.Sum256(m.Key) return key.NewKey256(b[:]) } -func (m *Message) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { +func (m *Message) EmptyResponse() kad.Response[kadt.Key, ma.Multiaddr] { return &Message{ Type: m.Type, Key: m.Key, @@ -90,12 +90,12 @@ func (m *Message) CloserPeersAddrInfos() []peer.AddrInfo { return addrInfos } -func (m *Message) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { +func (m *Message) CloserNodes() []kad.NodeInfo[kadt.Key, ma.Multiaddr] { if m == nil { return nil } - infos := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], 0, len(m.CloserPeers)) + infos := make([]kad.NodeInfo[kadt.Key, ma.Multiaddr], 0, len(m.CloserPeers)) for _, p := range m.CloserPeers { infos = append(infos, &kadt.AddrInfo{Info: peer.AddrInfo{ ID: peer.ID(p.Id), diff --git a/v2/router.go b/v2/router.go index 8c40471d..f18f2892 100644 --- a/v2/router.go +++ b/v2/router.go @@ -5,7 +5,6 @@ import ( "fmt" "time" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -13,11 +12,12 @@ import ( "github.com/libp2p/go-msgio" "github.com/libp2p/go-msgio/pbio" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) @@ -112,7 +112,7 @@ func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, er return r.host.Peerstore().PeerInfo(id), nil } -func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) { +func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target kadt.Key) ([]peer.AddrInfo, error) { resp, err := r.SendMessage(ctx, to, address.ProtocolID(ProtocolIPFS), FindKeyRequest(target)) if err != nil { return nil, err @@ -121,7 +121,7 @@ func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target k return resp.CloserPeersAddrInfos(), nil } -func FindKeyRequest(k key.Key256) *pb.Message { +func FindKeyRequest(k kadt.Key) *pb.Message { marshalledKey, _ := k.MarshalBinary() return &pb.Message{ Type: pb.Message_FIND_NODE, diff --git a/v2/tele/tele.go b/v2/tele/tele.go index 5ee01666..7163f3d0 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -3,9 +3,11 @@ package tele import ( "context" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" motel "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/trace" ) // ctxKey is an unexported type alias for the value of a context key. This is @@ -84,6 +86,11 @@ func AttrKey(val string) attribute.KeyValue { return attribute.String("key", val) } +// AttrEvent creates an attribute that records the name of an event +func AttrEvent(val string) attribute.KeyValue { + return attribute.String("event", val) +} + // WithAttributes is a function that attaches the provided attributes to the // given context. The given attributes will overwrite any already existing ones. func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Context { @@ -115,3 +122,8 @@ func FromContext(ctx context.Context, attrs ...attribute.KeyValue) attribute.Set return attribute.NewSet(append(set.ToSlice(), attrs...)...) } + +// StartSpan creates a span and a [context.Context] containing the newly-created span. +func StartSpan(ctx context.Context, name string) (context.Context, trace.Span) { + return otel.Tracer(TracerName).Start(ctx, name) +} From 97e4e0253b10e98da21e31d68f907e2ecfc3a098 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Sat, 16 Sep 2023 13:41:18 +0100 Subject: [PATCH 44/64] v2: upgrade build to include go1.21 (#890) * v2: upgrade to go1.21 * Add uci config * Use newer uci actions * Use v2 working directory in actions * Set go-version input in actions * Set go-version input in actions * Use go 1.20.8 in actions * Use go 1.21.1 and relative working directory * Try default working directory on job * Remove uci.yaml which is not supported yet * Try default working directory on job * Try default working directory as input * Restore uci.yaml * Restore uci.yaml * Use modified go-check * Use modified go-test * Fix go-test * Fix go-test * Fix go-test * Restore libp2p 0.30.0 * go mod tidy * Remove nil error return from DefaultConfig * use mock clock for IPNS record generation (#894) * Use MapDatastore for provider backend tests instead of leveldb (#896) * revert some merge residuals * style: minor coding clean up (#898) * remove superfluous type conversion * add tiny example test * unexport type conversion helpers * Target go language version 1.20 and add 1.20.8 to build matrix * Target go language version 1.20 and add 1.20.8 to build matrix * WIP --------- Co-authored-by: Dennis Trautwein --- .github/uci.yaml | 4 + .github/workflows/go-check.yml | 56 ++++++++++-- .github/workflows/go-test.yml | 95 ++++++++++++++------ .github/workflows/release-check.yml | 18 ++-- .github/workflows/releaser.yml | 14 ++- .github/workflows/tagpush.yml | 14 ++- v2/backend_provider_test.go | 32 +++---- v2/coord/conversion.go | 24 ++--- v2/coord/internal/tiny/node_test.go | 17 ++++ v2/coord/query.go | 6 +- v2/coord/query/iter.go | 2 +- v2/coord/query/iter_test.go | 16 ++-- v2/coord/query/pool_test.go | 26 +++--- v2/coord/query/query_test.go | 130 ++++++++++++++-------------- v2/coord/routing.go | 18 ++-- v2/coord/routing/bootstrap_test.go | 20 ++--- v2/coord/routing/include_test.go | 38 ++++---- v2/coord/routing/probe_test.go | 128 +++++++++++++-------------- v2/coord/routing_test.go | 4 +- v2/go.mod | 26 +++--- v2/go.sum | 54 ++++++------ v2/handlers_test.go | 63 +++++++++----- 22 files changed, 468 insertions(+), 337 deletions(-) create mode 100644 .github/uci.yaml create mode 100644 v2/coord/internal/tiny/node_test.go diff --git a/.github/uci.yaml b/.github/uci.yaml new file mode 100644 index 00000000..9dbcd4b5 --- /dev/null +++ b/.github/uci.yaml @@ -0,0 +1,4 @@ +files: # Configure what Unified CI templates should be used for your repository; defaults to primary language default fileset + - .github/workflows/go-check.yml + - .github/workflows/go-test.yml +force: true # Configure whether Unified CI should overwrite existing workflows; defaults to false diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml index cc65ce68..135bb143 100644 --- a/.github/workflows/go-check.yml +++ b/.github/workflows/go-check.yml @@ -1,9 +1,23 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - -on: [push, pull_request] name: Go Checks +defaults: + run: + working-directory: ./v2 + + +on: + pull_request: + push: + branches: ["main"] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + jobs: unit: runs-on: ubuntu-latest @@ -13,15 +27,37 @@ jobs: with: submodules: recursive - id: config - uses: protocol/.github/.github/actions/read-config@master - - uses: actions/setup-go@v3 + uses: pl-strflt/uci/.github/actions/read-config@main + - id: go-mod + uses: pl-strflt/uci/.github/actions/read-go-mod@main + - id: go + uses: actions/setup-go@v4 with: - go-version: 1.20.x + go-version: '1.20.8' + cache: false - name: Run repo-specific setup uses: ./.github/actions/go-check-setup if: hashFiles('./.github/actions/go-check-setup') != '' - name: Install staticcheck - run: go install honnef.co/go/tools/cmd/staticcheck@4970552d932f48b71485287748246cf3237cebdf # 2023.1 (v0.4.0) + env: + STATICCHECK_VERSIONS: | + { + "1.21": "9e12e6014d3b0a854950490051ad1338fc6badd1", + "1.20": "9e12e6014d3b0a854950490051ad1338fc6badd1", + "1.19": "376210a89477dedbe6fdc4484b233998650d7b3c", + "1.18": "376210a89477dedbe6fdc4484b233998650d7b3c", + "1.17": "c8caa92bad8c27ae734c6725b8a04932d54a147b", + "1.16": "4dc1992c9bb4310ba1e98b30c8d7d46444891d3b", + "1.15": "5b7de96f09104e2be384aa93a7c821eb5e77378b", + "1.14": "5b7de96f09104e2be384aa93a7c821eb5e77378b", + "1.13": "afd67930eec2a9ed3e9b19f684d17a062285f16a" + } + GO_VERSION: ${{ steps.go.outputs.go-version }} + GO111MODULE: on + run: | + version="$(jq -nr 'env.STATICCHECK_VERSIONS | fromjson | .[env.GO_VERSION | sub("\\.[^.]+$"; "")] // "latest"')" + echo "Installing staticcheck@$version" + go install honnef.co/go/tools/cmd/staticcheck@$version || go get honnef.co/go/tools/cmd/staticcheck@$version - name: Check that go.mod is tidy uses: protocol/multiple-go-modules@v1.2 with: @@ -32,6 +68,7 @@ jobs: exit 1 fi git diff --exit-code -- go.sum go.mod + working-directory: ./v2 - name: gofmt if: success() || failure() # run this step even if the previous one failed run: | @@ -45,6 +82,7 @@ jobs: uses: protocol/multiple-go-modules@v1.2 with: run: go vet ./... + working-directory: ./v2 - name: staticcheck if: success() || failure() # run this step even if the previous one failed uses: protocol/multiple-go-modules@v1.2 @@ -52,6 +90,7 @@ jobs: run: | set -o pipefail staticcheck ./... | sed -e 's@\(.*\)\.go@./\1.go@g' + working-directory: ./v2 - name: go generate uses: protocol/multiple-go-modules@v1.2 if: (success() || failure()) && fromJSON(steps.config.outputs.json).gogenerate == true @@ -65,3 +104,4 @@ jobs: git status --short exit 1 fi + working-directory: ./v2 diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml index c5cb3efc..579da117 100644 --- a/.github/workflows/go-test.yml +++ b/.github/workflows/go-test.yml @@ -1,52 +1,87 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - -on: [push, pull_request] name: Go Test +on: + pull_request: + push: + branches: ["main"] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + jobs: unit: strategy: fail-fast: false matrix: os: [ "ubuntu", "windows", "macos" ] - go: ["1.19.x","1.20.x"] + go: ["1.20.8", "1.21.1"] env: - COVERAGES: "" + GOTESTFLAGS: -cover -coverprofile=module-coverage.txt -coverpkg=./... + GO386FLAGS: '' + GORACEFLAGS: '' runs-on: ${{ fromJSON(vars[format('UCI_GO_TEST_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }} name: ${{ matrix.os }} (go ${{ matrix.go }}) steps: + - name: Use msys2 on windows + if: matrix.os == 'windows' + # The executable for msys2 is also called bash.cmd + # https://github.com/actions/virtual-environments/blob/main/images/win/Windows2019-Readme.md#shells + # If we prepend its location to the PATH + # subsequent 'shell: bash' steps will use msys2 instead of gitbash + run: echo "C:/msys64/usr/bin" >> $GITHUB_PATH - uses: actions/checkout@v3 with: submodules: recursive + # Update to v4 is blocked by https://github.com/actions/setup-go/pull/411 + # - uses: actions/setup-go@v4 + - uses: actions/setup-go@v3 + with: + go-version: ${{ steps.go.outputs.version }} + # cache: false - id: config - uses: protocol/.github/.github/actions/read-config@master + uses: pl-strflt/uci/.github/actions/read-config@main + - id: go-mod + uses: pl-strflt/uci/.github/actions/read-go-mod@main + - id: go + uses: actions/setup-go@v4 + with: + go-version: ${{ steps.go.outputs.version }} + cache: false + - if: toJSON(fromJSON(steps.config.outputs.json).shuffle) != 'false' + run: | + echo "GOTESTFLAGS=-shuffle=on $GOTESTFLAGS" >> $GITHUB_ENV + echo "GO386FLAGS=-shuffle=on $GO386FLAGS" >> $GITHUB_ENV + echo "GORACEFLAGS=-shuffle=on $GORACEFLAGS" >> $GITHUB_ENV + - if: toJSON(fromJSON(steps.config.outputs.json).verbose) != 'false' + run: | + echo "GOTESTFLAGS=-v $GOTESTFLAGS" >> $GITHUB_ENV + echo "GO386FLAGS=-v $GO386FLAGS" >> $GITHUB_ENV + echo "GORACEFLAGS=-v $GORACEFLAGS" >> $GITHUB_ENV + # Update to v4 is blocked by https://github.com/actions/setup-go/pull/411 + # - uses: actions/setup-go@v4 - uses: actions/setup-go@v3 with: - go-version: ${{ matrix.go }} + go-version: ${{ steps.go.outputs.version }} - name: Go information run: | go version go env - - name: Use msys2 on windows - if: matrix.os == 'windows' - shell: bash - # The executable for msys2 is also called bash.cmd - # https://github.com/actions/virtual-environments/blob/main/images/win/Windows2019-Readme.md#shells - # If we prepend its location to the PATH - # subsequent 'shell: bash' steps will use msys2 instead of gitbash - run: echo "C:/msys64/usr/bin" >> $GITHUB_PATH - name: Run repo-specific setup uses: ./.github/actions/go-test-setup if: hashFiles('./.github/actions/go-test-setup') != '' - name: Run tests if: contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false uses: protocol/multiple-go-modules@v1.2 + env: + GOFLAGS: ${{ format('{0} {1}', env.GOTESTFLAGS, env.GOFLAGS) }} with: - # Use -coverpkg=./..., so that we include cross-package coverage. - # If package ./A imports ./B, and ./A's tests also cover ./B, - # this means ./B's coverage will be significantly higher than 0%. - run: go test -v -shuffle=on -coverprofile=module-coverage.txt -coverpkg=./... ./... + run: go test ./... + working-directory: ./v2 - name: Run tests (32 bit) # can't run 32 bit tests on OSX. if: matrix.os != 'macos' && @@ -55,22 +90,28 @@ jobs: uses: protocol/multiple-go-modules@v1.2 env: GOARCH: 386 + GOFLAGS: ${{ format('{0} {1}', env.GO386FLAGS, env.GOFLAGS) }} with: run: | export "PATH=$PATH_386:$PATH" - go test -v -shuffle=on ./... + go test ./... + working-directory: ./v2 - name: Run tests with race detector # speed things up. Windows and OSX VMs are slow if: matrix.os == 'ubuntu' && + fromJSON(steps.config.outputs.json).skipRace != true && contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false uses: protocol/multiple-go-modules@v1.2 + env: + GOFLAGS: ${{ format('{0} {1}', env.GORACEFLAGS, env.GOFLAGS) }} with: - run: go test -v -race ./... + run: go test -race ./... + working-directory: ./v2 - name: Collect coverage files - shell: bash - run: echo "COVERAGES=$(find . -type f -name 'module-coverage.txt' | tr -s '\n' ',' | sed 's/,$//')" >> $GITHUB_ENV + id: coverages + run: echo "files=$(find . -type f -name 'module-coverage.txt' | tr -s '\n' ',' | sed 's/,$//')" >> $GITHUB_OUTPUT - name: Upload coverage to Codecov - uses: codecov/codecov-action@d9f34f8cd5cb3b3eb79b3e4b5dae3a16df499a70 # v3.1.1 + uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # v3.1.4 with: - files: '${{ env.COVERAGES }}' - env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }} + files: ${{ steps.coverages.outputs.files }} + env_vars: OS=${{ matrix.os }}, GO=${{ steps.go.outputs.version }} diff --git a/.github/workflows/release-check.yml b/.github/workflows/release-check.yml index e2408e37..bda61600 100644 --- a/.github/workflows/release-check.yml +++ b/.github/workflows/release-check.yml @@ -1,13 +1,19 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - name: Release Checker + on: pull_request_target: paths: [ 'version.json' ] + types: [ opened, synchronize, reopened, labeled, unlabeled ] + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: release-check: - uses: protocol/.github/.github/workflows/release-check.yml@master - with: - go-version: 1.20.x + uses: pl-strflt/uci/.github/workflows/release-check.yml@v0.0 diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml index cdccbf87..dd8081b9 100644 --- a/.github/workflows/releaser.yml +++ b/.github/workflows/releaser.yml @@ -1,11 +1,17 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - name: Releaser + on: push: paths: [ 'version.json' ] + workflow_dispatch: + +permissions: + contents: write + +concurrency: + group: ${{ github.workflow }}-${{ github.sha }} + cancel-in-progress: true jobs: releaser: - uses: protocol/.github/.github/workflows/releaser.yml@master + uses: pl-strflt/uci/.github/workflows/releaser.yml@v0.0 diff --git a/.github/workflows/tagpush.yml b/.github/workflows/tagpush.yml index d8499618..59de8cb9 100644 --- a/.github/workflows/tagpush.yml +++ b/.github/workflows/tagpush.yml @@ -1,12 +1,18 @@ -# File managed by web3-bot. DO NOT EDIT. -# See https://github.com/protocol/.github/ for details. - name: Tag Push Checker + on: push: tags: - v* +permissions: + contents: read + issues: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: releaser: - uses: protocol/.github/.github/workflows/tagpush.yml@master + uses: pl-strflt/uci/.github/workflows/tagpush.yml@v0.0 diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index b87cf488..10407e54 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -9,6 +9,7 @@ import ( "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,9 +22,7 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac h, err := libp2p.New(libp2p.NoListenAddrs) require.NoError(t, err) - dstore, err := InMemoryDatastore() - require.NoError(t, err) - + dstore := syncds.MutexWrap(ds.NewMapDatastore()) t.Cleanup(func() { if err = dstore.Close(); err != nil { t.Logf("closing datastore: %s", err) @@ -41,17 +40,19 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac } func TestProvidersBackend_GarbageCollection(t *testing.T) { - mockClock := clock.NewMock() + clk := clock.NewMock() + cfg, err := DefaultProviderBackendConfig() require.NoError(t, err) - cfg.clk = mockClock + cfg.clk = clk cfg.Logger = devnull b := newBackendProvider(t, cfg) // start the garbage collection process b.StartGarbageCollection() + t.Cleanup(func() { b.StopGarbageCollection() }) // write random record to datastore and peerstore ctx := context.Background() @@ -59,7 +60,7 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { // write to datastore dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID)) - rec := expiryRecord{expiry: mockClock.Now()} + rec := expiryRecord{expiry: clk.Now()} err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) @@ -67,28 +68,19 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { b.addrBook.AddAddrs(p.ID, p.Addrs, time.Hour) // advance clock half the validity time and check if record is still there - mockClock.Add(cfg.ProvideValidity / 2) - - // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey("")) - require.NoError(t, err) + clk.Add(cfg.ProvideValidity / 2) // we expect the record to still be there after half the ProvideValidity _, err = b.datastore.Get(ctx, dsKey) require.NoError(t, err) // advance clock another time and check if the record was GC'd now - mockClock.Add(cfg.ProvideValidity + cfg.GCInterval) - - // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey("")) - require.NoError(t, err) + clk.Add(cfg.ProvideValidity + cfg.GCInterval) // we expect the record to be GC'd now - _, err = b.datastore.Get(ctx, dsKey) - require.ErrorIs(t, err, ds.ErrNotFound) - - b.StopGarbageCollection() + val, err := b.datastore.Get(ctx, dsKey) + assert.ErrorIs(t, err, ds.ErrNotFound) + assert.Nil(t, val) } func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) { diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go index 2cdc3f2e..d605507b 100644 --- a/v2/coord/conversion.go +++ b/v2/coord/conversion.go @@ -7,22 +7,22 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -// KadPeerIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. +// kadPeerIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. // This function will panic if id's underlying type is not kadt.PeerID -func KadPeerIDToAddrInfo(id kad.NodeID[kadt.Key]) peer.AddrInfo { +func kadPeerIDToAddrInfo(id kad.NodeID[kadt.Key]) peer.AddrInfo { peerID := id.(kadt.PeerID) return peer.AddrInfo{ ID: peer.ID(peerID), } } -// AddrInfoToKadPeerID converts a peer.AddrInfo to a kad.NodeID. -func AddrInfoToKadPeerID(ai peer.AddrInfo) kadt.PeerID { - return kadt.PeerID(ai.ID) +// addrInfoToKadPeerID converts a peer.AddrInfo to a kad.NodeID. +func addrInfoToKadPeerID(addrInfo peer.AddrInfo) kadt.PeerID { + return kadt.PeerID(addrInfo.ID) } -// SliceOfPeerIDToSliceOfKadPeerID converts a slice of peer.ID to a slice of kadt.PeerID -func SliceOfPeerIDToSliceOfKadPeerID(peers []peer.ID) []kadt.PeerID { +// sliceOfPeerIDToSliceOfKadPeerID converts a slice of peer.ID to a slice of kadt.PeerID +func sliceOfPeerIDToSliceOfKadPeerID(peers []peer.ID) []kadt.PeerID { nodes := make([]kadt.PeerID, len(peers)) for i := range peers { nodes[i] = kadt.PeerID(peers[i]) @@ -30,10 +30,10 @@ func SliceOfPeerIDToSliceOfKadPeerID(peers []peer.ID) []kadt.PeerID { return nodes } -func SliceOfAddrInfoToSliceOfKadPeerID(ais []peer.AddrInfo) []kadt.PeerID { - nodes := make([]kadt.PeerID, len(ais)) - for i := range ais { - nodes[i] = kadt.PeerID(ais[i].ID) +func sliceOfAddrInfoToSliceOfKadPeerID(addrInfos []peer.AddrInfo) []kadt.PeerID { + peers := make([]kadt.PeerID, len(addrInfos)) + for i := range addrInfos { + peers[i] = kadt.PeerID(addrInfos[i].ID) } - return nodes + return peers } diff --git a/v2/coord/internal/tiny/node_test.go b/v2/coord/internal/tiny/node_test.go new file mode 100644 index 00000000..a6e175e5 --- /dev/null +++ b/v2/coord/internal/tiny/node_test.go @@ -0,0 +1,17 @@ +package tiny + +import ( + "fmt" +) + +func ExampleNode_String() { + n := Node{key: 0b11111111} + fmt.Println(n.String()) + + n = Node{key: 0b01010101} + fmt.Println(n.String()) + + // Output: + // ff + // 55 +} diff --git a/v2/coord/query.go b/v2/coord/query.go index adbce4b1..8bdfbd53 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -48,7 +48,7 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { cmd = &query.EventPoolAddQuery[kadt.Key, kadt.PeerID]{ QueryID: ev.QueryID, Target: ev.Target, - KnownClosestNodes: SliceOfPeerIDToSliceOfKadPeerID(ev.KnownClosestNodes), + KnownClosestNodes: sliceOfPeerIDToSliceOfKadPeerID(ev.KnownClosestNodes), } if ev.Notify != nil { p.waiters[ev.QueryID] = ev.Notify @@ -78,7 +78,7 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { cmd = &query.EventPoolFindCloserResponse[kadt.Key, kadt.PeerID]{ NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, - CloserNodes: SliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), + CloserNodes: sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), } case *EventGetCloserNodesFailure: cmd = &query.EventPoolFindCloserFailure[kadt.Key, kadt.PeerID]{ @@ -151,7 +151,7 @@ func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEve case *query.StatePoolFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ QueryID: st.QueryID, - To: KadPeerIDToAddrInfo(st.NodeID), + To: kadPeerIDToAddrInfo(st.NodeID), Target: st.Target, Notify: p, }, true diff --git a/v2/coord/query/iter.go b/v2/coord/query/iter.go index 52960dea..0cf2bbed 100644 --- a/v2/coord/query/iter.go +++ b/v2/coord/query/iter.go @@ -17,7 +17,7 @@ type NodeIter[K kad.Key[K], N kad.NodeID[K]] interface { Find(K) (*NodeStatus[K, N], bool) // Each applies fn to each entry in the iterator in order. Each stops and returns true if fn returns true. - // Otherwise Each returns false when there are no further entries. + // Otherwise, Each returns false when there are no further entries. Each(ctx context.Context, fn func(context.Context, *NodeStatus[K, N]) bool) bool } diff --git a/v2/coord/query/iter_test.go b/v2/coord/query/iter_test.go index 0985182a..d5d02de9 100644 --- a/v2/coord/query/iter_test.go +++ b/v2/coord/query/iter_test.go @@ -17,10 +17,10 @@ var ( func TestClosestNodesIter(t *testing.T) { target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) @@ -48,10 +48,10 @@ func TestClosestNodesIter(t *testing.T) { } func TestSequentialIter(t *testing.T) { - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 iter := NewSequentialIter[tiny.Key, tiny.Node]() diff --git a/v2/coord/query/pool_test.go b/v2/coord/query/pool_test.go index 0deaa5a5..2f6ab26f 100644 --- a/v2/coord/query/pool_test.go +++ b/v2/coord/query/pool_test.go @@ -70,7 +70,7 @@ func TestPoolStartsIdle(t *testing.T) { cfg := DefaultPoolConfig() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) p, err := NewPool[tiny.Key](self, cfg) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestPoolStopWhenNoQueries(t *testing.T) { cfg := DefaultPoolConfig() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) p, err := NewPool[tiny.Key](self, cfg) require.NoError(t, err) @@ -98,12 +98,12 @@ func TestPoolAddQueryStartsIfCapacity(t *testing.T) { cfg := DefaultPoolConfig() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) p, err := NewPool[tiny.Key](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 queryID := QueryID("test") @@ -138,12 +138,12 @@ func TestPoolMessageResponse(t *testing.T) { cfg := DefaultPoolConfig() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) p, err := NewPool[tiny.Key](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 queryID := QueryID("test") @@ -182,15 +182,15 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 // allow two queries to run concurrently - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) p, err := NewPool[tiny.Key](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // Add the first query queryID1 := QueryID("1") @@ -267,12 +267,12 @@ func TestPoolRespectsConcurrency(t *testing.T) { cfg.Concurrency = 2 // allow two queries to run concurrently cfg.QueryConcurrency = 1 // allow each query to have a single request in flight - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) p, err := NewPool[tiny.Key](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 // Add the first query queryID1 := QueryID("1") diff --git a/v2/coord/query/query_test.go b/v2/coord/query/query_test.go index 6565d3e5..49564dcd 100644 --- a/v2/coord/query/query_test.go +++ b/v2/coord/query/query_test.go @@ -53,7 +53,7 @@ func TestQueryMessagesNode(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 // one known node to start with knownNodes := []tiny.Node{a} @@ -67,7 +67,7 @@ func TestQueryMessagesNode(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -96,8 +96,8 @@ func TestQueryMessagesNearest(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000011) - far := tiny.NewNode(tiny.Key(0b11011011)) - near := tiny.NewNode(tiny.Key(0b00000110)) + far := tiny.NewNode(0b11011011) + near := tiny.NewNode(0b00000110) // ensure near is nearer to target than far is require.Less(t, target.Xor(near.Key()), target.Xor(far.Key())) @@ -116,7 +116,7 @@ func TestQueryMessagesNearest(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -133,7 +133,7 @@ func TestQueryCancelFinishesQuery(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 // one known node to start with knownNodes := []tiny.Node{a} @@ -147,7 +147,7 @@ func TestQueryCancelFinishesQuery(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -190,7 +190,7 @@ func TestQueryNoClosest(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -217,9 +217,9 @@ func TestQueryWaitsAtCapacity(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 // one known node to start with knownNodes := []tiny.Node{a, b, c} @@ -234,7 +234,7 @@ func TestQueryWaitsAtCapacity(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -264,10 +264,10 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) @@ -288,7 +288,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -370,10 +370,10 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) @@ -393,7 +393,7 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -454,10 +454,10 @@ func TestQueryCloserNodesAreAddedToIteration(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) @@ -477,7 +477,7 @@ func TestQueryCloserNodesAreAddedToIteration(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -510,10 +510,10 @@ func TestQueryCloserNodesIgnoresDuplicates(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) @@ -533,7 +533,7 @@ func TestQueryCloserNodesIgnoresDuplicates(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -572,7 +572,7 @@ func TestQueryCancelFinishesIteration(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 // one known node to start with knownNodes := []tiny.Node{a} @@ -587,7 +587,7 @@ func TestQueryCancelFinishesIteration(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -609,8 +609,8 @@ func TestQueryFinishedIgnoresLaterEvents(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 // one known node to start with knownNodes := []tiny.Node{b} @@ -625,7 +625,7 @@ func TestQueryFinishedIgnoresLaterEvents(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -665,9 +665,9 @@ func TestQueryWithCloserIterIgnoresMessagesFromUnknownNodes(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 // one known node to start with knownNodes := []tiny.Node{c} @@ -682,7 +682,7 @@ func TestQueryWithCloserIterIgnoresMessagesFromUnknownNodes(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -715,10 +715,10 @@ func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // one known node to start with knownNodes := []tiny.Node{a, b, c, d} @@ -734,7 +734,7 @@ func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -773,9 +773,9 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 // one known node to start with, the furthesr knownNodes := []tiny.Node{c} @@ -791,7 +791,7 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -843,10 +843,10 @@ func TestQueryNotContactedMakesCapacity(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) @@ -863,7 +863,7 @@ func TestQueryNotContactedMakesCapacity(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -904,9 +904,9 @@ func TestQueryAllNotContactedFinishes(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 // knownNodes are in "random" order knownNodes := []tiny.Node{a, b, c} @@ -921,7 +921,7 @@ func TestQueryAllNotContactedFinishes(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -963,9 +963,9 @@ func TestQueryAllContactedFinishes(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 knownNodes := []tiny.Node{a, b, c} @@ -980,7 +980,7 @@ func TestQueryAllContactedFinishes(t *testing.T) { queryID := QueryID("test") - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) @@ -1023,8 +1023,8 @@ func TestQueryNeverMessagesSelf(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 // one known node to start with knownNodes := []tiny.Node{b} diff --git a/v2/coord/routing.go b/v2/coord/routing.go index 94c80907..95268cbe 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -66,7 +66,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventStartBootstrap: span.SetAttributes(attribute.String("event", "EventStartBootstrap")) cmd := &routing.EventBootstrapStart[kadt.Key, kadt.PeerID]{ - KnownClosestNodes: SliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), + KnownClosestNodes: sliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -93,7 +93,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventRoutingUpdated: span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) cmd := &routing.EventProbeAdd[kadt.Key, kadt.PeerID]{ - NodeID: AddrInfoToKadPeerID(ev.NodeInfo), + NodeID: addrInfoToKadPeerID(ev.NodeInfo), } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -113,7 +113,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } cmd := &routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]{ NodeID: kadt.PeerID(ev.To.ID), - CloserNodes: SliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), + CloserNodes: sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -273,7 +273,7 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot case *routing.StateBootstrapFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ QueryID: "bootstrap", - To: KadPeerIDToAddrInfo(st.NodeID), + To: kadPeerIDToAddrInfo(st.NodeID), Target: st.Target, Notify: r, }, true @@ -304,7 +304,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "include", - To: KadPeerIDToAddrInfo(st.NodeID), + To: kadPeerIDToAddrInfo(st.NodeID), Target: st.NodeID.Key(), Notify: r, }, true @@ -314,13 +314,13 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ // notify other routing state machines that there is a new node in the routing table r.notify(ctx, &EventRoutingUpdated{ - NodeInfo: KadPeerIDToAddrInfo(st.NodeID), + NodeInfo: kadPeerIDToAddrInfo(st.NodeID), }) // return the event to notify outwards too span.SetAttributes(attribute.String("out_event", "EventRoutingUpdated")) return &EventRoutingUpdated{ - NodeInfo: KadPeerIDToAddrInfo(st.NodeID), + NodeInfo: kadPeerIDToAddrInfo(st.NodeID), }, true case *routing.StateIncludeWaitingAtCapacity: // nothing to do except wait for message response or timeout @@ -346,7 +346,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "probe", - To: KadPeerIDToAddrInfo(st.NodeID), + To: kadPeerIDToAddrInfo(st.NodeID), Target: st.NodeID.Key(), Notify: r, }, true @@ -354,7 +354,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve // a node has failed a connectivity check been removed from the routing table and the probe list // add the node to the inclusion list for a second chance r.notify(ctx, &EventAddAddrInfo{ - NodeInfo: KadPeerIDToAddrInfo(st.NodeID), + NodeInfo: kadPeerIDToAddrInfo(st.NodeID), }) case *routing.StateProbeWaitingAtCapacity: // the probe state machine is waiting for responses for checks and the maximum number of concurrent checks has been reached. diff --git a/v2/coord/routing/bootstrap_test.go b/v2/coord/routing/bootstrap_test.go index f66ecec0..df1364df 100644 --- a/v2/coord/routing/bootstrap_test.go +++ b/v2/coord/routing/bootstrap_test.go @@ -55,7 +55,7 @@ func TestBootstrapStartsIdle(t *testing.T) { cfg := DefaultBootstrapConfig[tiny.Key]() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) bs, err := NewBootstrap[tiny.Key](self, cfg) require.NoError(t, err) @@ -69,11 +69,11 @@ func TestBootstrapStart(t *testing.T) { cfg := DefaultBootstrapConfig[tiny.Key]() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) bs, err := NewBootstrap[tiny.Key](self, cfg) require.NoError(t, err) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 // start the bootstrap state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ @@ -104,11 +104,11 @@ func TestBootstrapMessageResponse(t *testing.T) { cfg := DefaultBootstrapConfig[tiny.Key]() cfg.Clock = clk - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) bs, err := NewBootstrap[tiny.Key](self, cfg) require.NoError(t, err) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 + a := tiny.NewNode(0b00000100) // 4 // start the bootstrap state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ @@ -141,14 +141,14 @@ func TestBootstrapProgress(t *testing.T) { cfg.Clock = clk cfg.RequestConcurrency = 3 // 1 less than the 4 nodes to be visited - self := tiny.NewNode(tiny.Key(0)) + self := tiny.NewNode(0) bs, err := NewBootstrap[tiny.Key](self, cfg) require.NoError(t, err) - a := tiny.NewNode(tiny.Key(0b00000100)) // 4 - b := tiny.NewNode(tiny.Key(0b00001000)) // 8 - c := tiny.NewNode(tiny.Key(0b00010000)) // 16 - d := tiny.NewNode(tiny.Key(0b00100000)) // 32 + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 // ensure the order of the known nodes require.True(t, self.Key().Xor(a.Key()).Compare(self.Key().Xor(b.Key())) == -1) diff --git a/v2/coord/routing/include_test.go b/v2/coord/routing/include_test.go index 41e80e3a..a788e8d5 100644 --- a/v2/coord/routing/include_test.go +++ b/v2/coord/routing/include_test.go @@ -54,7 +54,7 @@ func TestIncludeStartsIdle(t *testing.T) { cfg := DefaultIncludeConfig() cfg.Clock = clk - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) bs, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -70,12 +70,12 @@ func TestIncludeAddCandidateStartsCheckIfCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 1 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) - candidate := tiny.NewNode(tiny.Key(0b00000100)) + candidate := tiny.NewNode(0b00000100) // add a candidate state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ @@ -104,11 +104,11 @@ func TestIncludeAddCandidateReportsCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) - candidate := tiny.NewNode(tiny.Key(0b00000100)) + candidate := tiny.NewNode(0b00000100) // add a candidate state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ @@ -130,14 +130,14 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { cfg.QueueCapacity = 2 // only allow two candidates in the queue cfg.Concurrency = 3 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) // add a candidate state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000100)), + NodeID: tiny.NewNode(0b00000100), }) require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) @@ -147,7 +147,7 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { // add second candidate state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000010)), + NodeID: tiny.NewNode(0b00000010), }) // sends a message to the candidate require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) @@ -159,7 +159,7 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { // add third candidate state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000011)), + NodeID: tiny.NewNode(0b00000011), }) // sends a message to the candidate require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) @@ -170,7 +170,7 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { // add fourth candidate state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000101)), + NodeID: tiny.NewNode(0b00000101), }) // include reports that it is waiting at capacity since 3 messages are already in flight @@ -178,7 +178,7 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { // add fifth candidate state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000110)), + NodeID: tiny.NewNode(0b00000110), }) // include reports that it is waiting and the candidate queue is full since it @@ -187,7 +187,7 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { // add sixth candidate state = p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000111)), + NodeID: tiny.NewNode(0b00000111), }) // include reports that it is still waiting and the candidate queue is full since it @@ -202,20 +202,20 @@ func TestIncludeConnectivityCheckSuccess(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) // add a candidate state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000100)), + NodeID: tiny.NewNode(0b00000100), }) require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) // notify that node was contacted successfully, with no closer nodes state = p.Advance(ctx, &EventIncludeConnectivityCheckSuccess[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000100)), + NodeID: tiny.NewNode(0b00000100), }) // should respond that the routing table was updated @@ -224,7 +224,7 @@ func TestIncludeConnectivityCheckSuccess(t *testing.T) { st := state.(*StateIncludeRoutingUpdated[tiny.Key, tiny.Node]) // the update is for the correct node - require.Equal(t, tiny.NewNode(tiny.Key(4)), st.NodeID) + require.Equal(t, tiny.NewNode(4), st.NodeID) // the routing table should contain the node foundNode, found := rt.GetNode(tiny.Key(4)) @@ -245,20 +245,20 @@ func TestIncludeConnectivityCheckFailure(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) // add a candidate state := p.Advance(ctx, &EventIncludeAddCandidate[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000100)), + NodeID: tiny.NewNode(0b00000100), }) require.IsType(t, &StateIncludeConnectivityCheck[tiny.Key, tiny.Node]{}, state) // notify that node was not contacted successfully state = p.Advance(ctx, &EventIncludeConnectivityCheckFailure[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(0b00000100)), + NodeID: tiny.NewNode(0b00000100), }) // should respond that state machine is idle diff --git a/v2/coord/routing/probe_test.go b/v2/coord/routing/probe_test.go index 872b2f50..d4106c51 100644 --- a/v2/coord/routing/probe_test.go +++ b/v2/coord/routing/probe_test.go @@ -59,7 +59,7 @@ func TestProbeStartsIdle(t *testing.T) { cfg := DefaultProbeConfig() cfg.Clock = clk - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) bs, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -79,13 +79,13 @@ func TestProbeAddChecksPresenceInRoutingTable(t *testing.T) { // Set concurrency to allow one check to run cfg.Concurrency = 1 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) // Add node that isn't in routing table state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) @@ -108,8 +108,8 @@ func TestProbeAddStartsCheckIfCapacity(t *testing.T) { // Set concurrency to allow one check to run cfg.Concurrency = 1 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -117,7 +117,7 @@ func TestProbeAddStartsCheckIfCapacity(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) @@ -148,10 +148,10 @@ func TestProbeAddManyStartsChecksIfCapacity(t *testing.T) { // Set concurrency lower than the number of nodes cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) - rt.AddNode(tiny.NewNode(tiny.Key(3))) - rt.AddNode(tiny.NewNode(tiny.Key(2))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) + rt.AddNode(tiny.NewNode(3)) + rt.AddNode(tiny.NewNode(2)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -159,21 +159,21 @@ func TestProbeAddManyStartsChecksIfCapacity(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) // after adding second node the probe should still be idle since the // connectivity check will be scheduled for the future state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(3)), + NodeID: tiny.NewNode(3), }) require.IsType(t, &StateProbeIdle{}, state) // after adding third node the probe should still be idle since the // connectivity check will be scheduled for the future state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(2)), + NodeID: tiny.NewNode(2), }) require.IsType(t, &StateProbeIdle{}, state) @@ -212,8 +212,8 @@ func TestProbeAddReportsCapacity(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -221,7 +221,7 @@ func TestProbeAddReportsCapacity(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) @@ -255,8 +255,8 @@ func TestProbeRemoveDeletesNodeValue(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -264,13 +264,13 @@ func TestProbeRemoveDeletesNodeValue(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) // remove the node state = sm.Advance(ctx, &EventProbeRemove[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) // state indicate that node failed @@ -291,13 +291,13 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } l.Put(nv) - got, found := l.Get(tiny.NewNode(tiny.Key(4))) + got, found := l.Get(tiny.NewNode(4)) require.True(t, found) require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) }) @@ -308,19 +308,19 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } l.Put(nv1) nv2 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now().Add(-time.Minute), } l.Put(nv2) - got, found := l.Get(tiny.NewNode(tiny.Key(4))) + got, found := l.Get(tiny.NewNode(4)) require.True(t, found) require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) require.Equal(t, nv2.NextCheckDue, got.NextCheckDue) @@ -332,19 +332,19 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } l.Put(nv1) nv2 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now().Add(time.Minute), } l.Put(nv2) - got, found := l.Get(tiny.NewNode(tiny.Key(4))) + got, found := l.Get(tiny.NewNode(4)) require.True(t, found) require.True(t, key.Equal(got.NodeID.Key(), tiny.Key(4))) require.Equal(t, nv2.NextCheckDue, got.NextCheckDue) @@ -356,7 +356,7 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } @@ -365,11 +365,11 @@ func TestNodeValueList(t *testing.T) { require.Equal(t, 1, l.PendingCount()) require.Equal(t, 1, l.NodeCount()) - _, found := l.Get(tiny.NewNode(tiny.Key(4))) + _, found := l.Get(tiny.NewNode(4)) require.True(t, found) - l.Remove(tiny.NewNode(tiny.Key(4))) - _, found = l.Get(tiny.NewNode(tiny.Key(4))) + l.Remove(tiny.NewNode(4)) + _, found = l.Get(tiny.NewNode(4)) require.False(t, found) require.Equal(t, 0, l.PendingCount()) @@ -382,14 +382,14 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } l.Put(nv) - l.Remove(tiny.NewNode(tiny.Key(5))) - _, found := l.Get(tiny.NewNode(tiny.Key(4))) + l.Remove(tiny.NewNode(5)) + _, found := l.Get(tiny.NewNode(4)) require.True(t, found) }) @@ -409,7 +409,7 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } l.Put(nv) @@ -425,12 +425,12 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(5)), + NodeID: tiny.NewNode(5), NextCheckDue: clk.Now().Add(-time.Minute), } l.Put(nv1) nv2 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now().Add(-2 * time.Minute), } l.Put(nv2) @@ -453,13 +453,13 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(5)), + NodeID: tiny.NewNode(5), Cpl: 1, NextCheckDue: clk.Now().Add(-time.Minute), } l.Put(nv1) nv2 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), Cpl: 2, NextCheckDue: clk.Now().Add(-time.Minute), } @@ -483,12 +483,12 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(5)), + NodeID: tiny.NewNode(5), NextCheckDue: clk.Now().Add(time.Minute), } l.Put(nv1) nv2 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now().Add(2 * time.Minute), } l.Put(nv2) @@ -504,7 +504,7 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(5)), + NodeID: tiny.NewNode(5), NextCheckDue: clk.Now().Add(time.Minute), } l.Put(nv1) @@ -512,7 +512,7 @@ func TestNodeValueList(t *testing.T) { require.Equal(t, 0, l.OngoingCount()) require.Equal(t, 1, l.NodeCount()) - l.MarkOngoing(tiny.NewNode(tiny.Key(5)), clk.Now().Add(time.Minute)) + l.MarkOngoing(tiny.NewNode(5), clk.Now().Add(time.Minute)) require.Equal(t, 0, l.PendingCount()) require.Equal(t, 1, l.OngoingCount()) require.Equal(t, 1, l.NodeCount()) @@ -524,13 +524,13 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(5)), + NodeID: tiny.NewNode(5), NextCheckDue: clk.Now().Add(-2 * time.Minute), } l.Put(nv1) nv2 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now().Add(-1 * time.Minute), } l.Put(nv2) @@ -561,7 +561,7 @@ func TestNodeValueList(t *testing.T) { clk := clock.NewMock() l := NewNodeValueList[tiny.Key, tiny.Node]() nv1 := &nodeValue[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), NextCheckDue: clk.Now(), } l.Put(nv1) @@ -595,8 +595,8 @@ func TestProbeConnectivityCheckSuccess(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -604,7 +604,7 @@ func TestProbeConnectivityCheckSuccess(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) @@ -653,8 +653,8 @@ func TestProbeConnectivityCheckFailure(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -662,7 +662,7 @@ func TestProbeConnectivityCheckFailure(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) require.IsType(t, &StateProbeIdle{}, state) @@ -707,9 +707,9 @@ func TestProbeNotifyConnectivity(t *testing.T) { cfg.CheckInterval = 10 * time.Minute cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) - rt.AddNode(tiny.NewNode(tiny.Key(3))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) + rt.AddNode(tiny.NewNode(3)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -717,7 +717,7 @@ func TestProbeNotifyConnectivity(t *testing.T) { // after adding first node the probe should be idle since the // connectivity check will be scheduled for the future (t0+10) state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) // not time for a check yet @@ -730,7 +730,7 @@ func TestProbeNotifyConnectivity(t *testing.T) { // add a second node, which will be second in the probe list since it's // time of next check will be later (t0+2+10=t0+12) state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(3)), + NodeID: tiny.NewNode(3), }) // still not time for a check @@ -743,7 +743,7 @@ func TestProbeNotifyConnectivity(t *testing.T) { // notify that the node with key 4 was connected to successfully by another process // this will delay the time for the next check to t0+11+10=to+21 state = sm.Advance(ctx, &EventProbeNotifyConnectivity[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) // still not time for a check @@ -777,16 +777,16 @@ func TestProbeTimeout(t *testing.T) { cfg.Timeout = 3 * time.Minute cfg.Concurrency = 1 // one probe at a time, timeouts will be used to free capacity if there are more requests - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(tiny.Key(128)), 5) - rt.AddNode(tiny.NewNode(tiny.Key(4))) - rt.AddNode(tiny.NewNode(tiny.Key(3))) + rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt.AddNode(tiny.NewNode(4)) + rt.AddNode(tiny.NewNode(3)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) // add a node state := sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(4)), + NodeID: tiny.NewNode(4), }) // not time for a check yet @@ -797,7 +797,7 @@ func TestProbeTimeout(t *testing.T) { // add another node state = sm.Advance(ctx, &EventProbeAdd[tiny.Key, tiny.Node]{ - NodeID: tiny.NewNode(tiny.Key(3)), + NodeID: tiny.NewNode(3), }) // not time for a check yet diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index 7c1e87fd..ded02c3b 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -52,7 +52,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { // the event that should be passed to the bootstrap state machine expected := &routing.EventBootstrapStart[kadt.Key, kadt.PeerID]{ - KnownClosestNodes: SliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), + KnownClosestNodes: sliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), } require.Equal(t, expected, bootstrap.Received) } @@ -87,7 +87,7 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { rev := bootstrap.Received.(*routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]) require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) - require.Equal(t, SliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), rev.CloserNodes) + require.Equal(t, sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), rev.CloserNodes) } func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { diff --git a/v2/go.mod b/v2/go.mod index 1473c15b..3e11da4d 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -15,7 +15,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 + github.com/plprobelab/go-kademlia v0.0.0-20230911085009-18d957853c57 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.17.0 go.opentelemetry.io/otel/exporters/jaeger v1.16.0 @@ -24,7 +24,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.40.0 go.opentelemetry.io/otel/trace v1.17.0 go.uber.org/zap/exp v0.1.0 - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 google.golang.org/protobuf v1.31.0 ) @@ -49,10 +49,10 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect + github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/huin/goupnp v1.2.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect @@ -85,7 +85,7 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/onsi/ginkgo/v2 v2.11.0 // indirect + github.com/onsi/ginkgo/v2 v2.12.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -94,10 +94,10 @@ require ( github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.2 // indirect - github.com/quic-go/quic-go v0.37.6 // indirect + github.com/quic-go/qtls-go1-20 v0.3.4 // indirect + github.com/quic-go/quic-go v0.38.1 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -106,13 +106,13 @@ require ( go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.25.0 // indirect - golang.org/x/crypto v0.12.0 // indirect + golang.org/x/crypto v0.13.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect + golang.org/x/net v0.15.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/tools v0.13.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/v2/go.sum b/v2/go.sum index bc586fb5..df21f778 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -98,8 +98,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb h1:LCMfzVg3sflxTs4UvuP4D8CkoZnfHLe2qzqgDn/4OHs= +github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -115,8 +115,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= -github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -246,10 +246,10 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= +github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -259,8 +259,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 h1:fgo8NhFeL+p7atahZNtvo1BfWClUNRvAjzC2ikEwvsY= -github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= +github.com/plprobelab/go-kademlia v0.0.0-20230911085009-18d957853c57 h1:9qB1pIoeis/hdhTxVcQLrFYJhGVRJIJESLP/kCud5HE= +github.com/plprobelab/go-kademlia v0.0.0-20230911085009-18d957853c57/go.mod h1:9mz9/8plJj9HWiQmB6JkBNHY30AXzy9LrJ++sCvWqFQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= @@ -275,14 +275,14 @@ github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7q github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= -github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= -github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY= -github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= +github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= +github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.38.1 h1:M36YWA5dEhEeT+slOu/SwMEucbYd0YFidxG3KlGPZaE= +github.com/quic-go/quic-go v0.38.1/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -388,11 +388,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -421,8 +421,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -457,14 +457,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -484,8 +484,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 115b8bc1..e80e7192 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -12,8 +12,7 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - + "github.com/benbjohnson/clock" "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" @@ -25,6 +24,7 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" @@ -430,12 +430,12 @@ func BenchmarkDHT_handlePing(b *testing.B) { } } -func newPutIPNSRequest(t testing.TB, priv crypto.PrivKey, seq uint64, eol time.Time, ttl time.Duration) *pb.Message { +func newPutIPNSRequest(t testing.TB, clk clock.Clock, priv crypto.PrivKey, seq uint64, ttl time.Duration) *pb.Message { t.Helper() testPath := path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") - rec, err := ipns.NewRecord(priv, testPath, seq, eol, ttl) + rec, err := ipns.NewRecord(priv, testPath, seq, clk.Now().Add(ttl), ttl) require.NoError(t, err) remote, err := peer.IDFromPublicKey(priv.GetPublic()) @@ -467,7 +467,7 @@ func BenchmarkDHT_handlePutValue_unique_peers(b *testing.B) { for i := 0; i < b.N; i++ { remote, priv := newIdentity(b) peers[i] = remote - reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + reqs[i] = newPutIPNSRequest(b, d.cfg.Clock, priv, uint64(i), time.Hour) } ctx := context.Background() @@ -489,7 +489,7 @@ func BenchmarkDHT_handlePutValue_single_peer(b *testing.B) { remote, priv := newIdentity(b) reqs := make([]*pb.Message, b.N) for i := 0; i < b.N; i++ { - reqs[i] = newPutIPNSRequest(b, priv, uint64(i), time.Now().Add(time.Hour), time.Hour) + reqs[i] = newPutIPNSRequest(b, d.cfg.Clock, priv, uint64(i), time.Hour) } ctx := context.Background() @@ -508,19 +508,28 @@ func TestDHT_handlePutValue_happy_path_ipns_record(t *testing.T) { ctx := context.Background() // init new DHT - d := newTestDHT(t) + clk := clock.NewMock() + clk.Set(time.Now()) // needed because record validators don't use mock clocks + + cfg := DefaultConfig() + cfg.Clock = clk + + d := newTestDHTWithConfig(t, cfg) // generate new identity for the peer that issues the request remote, priv := newIdentity(t) // expired record - req := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + req := newPutIPNSRequest(t, clk, priv, 0, time.Hour) ns, suffix, err := record.SplitKey(string(req.Key)) require.NoError(t, err) _, err = d.backends[ns].Fetch(ctx, suffix) require.ErrorIs(t, err, ds.ErrNotFound) + // advance the clock a bit so that TimeReceived values will be definitely different + clk.Add(time.Minute) + cloned := proto.Clone(req).(*pb.Message) _, err = d.handlePutValue(ctx, remote, cloned) require.NoError(t, err) @@ -586,7 +595,7 @@ func TestDHT_handlePutValue_bad_ipns_record(t *testing.T) { remote, priv := newIdentity(t) // expired record - req := newPutIPNSRequest(t, priv, 10, time.Now().Add(-time.Hour), -time.Hour) + req := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, -time.Hour) resp, err := d.handlePutValue(context.Background(), remote, req) assert.Error(t, err) @@ -599,8 +608,8 @@ func TestDHT_handlePutValue_worse_ipns_record_after_first_put(t *testing.T) { remote, priv := newIdentity(t) - goodReq := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) - worseReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + goodReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, time.Hour) + worseReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, time.Hour) for i, req := range []*pb.Message{goodReq, worseReq} { resp, err := d.handlePutValue(context.Background(), remote, req) @@ -628,8 +637,8 @@ func TestDHT_handlePutValue_probe_race_condition(t *testing.T) { for i := 0; i < 100; i++ { - req1 := newPutIPNSRequest(t, priv, uint64(2*i), time.Now().Add(time.Hour), time.Hour) - req2 := newPutIPNSRequest(t, priv, uint64(2*i+1), time.Now().Add(time.Hour), time.Hour) + req1 := newPutIPNSRequest(t, d.cfg.Clock, priv, uint64(2*i), time.Hour) + req2 := newPutIPNSRequest(t, d.cfg.Clock, priv, uint64(2*i+1), time.Hour) var wg sync.WaitGroup wg.Add(1) @@ -671,7 +680,7 @@ func TestDHT_handlePutValue_overwrites_corrupt_stored_ipns_record(t *testing.T) remote, priv := newIdentity(t) - req := newPutIPNSRequest(t, priv, 10, time.Now().Add(time.Hour), time.Hour) + req := newPutIPNSRequest(t, d.cfg.Clock, priv, 10, time.Hour) dsKey := newDatastoreKey(namespaceIPNS, string(remote)) // string(remote) is the key suffix @@ -761,7 +770,7 @@ type atomicPutValidator struct{} var _ record.Validator = (*atomicPutValidator)(nil) -func (v atomicPutValidator) Validate(key string, value []byte) error { +func (v atomicPutValidator) Validate(_ string, value []byte) error { if bytes.Equal(value, []byte("expired")) { return errors.New("expired") } @@ -800,7 +809,7 @@ func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ds, err := InMemoryDatastore() + dstore, err := InMemoryDatastore() require.NoError(t, err) cfg, err := DefaultRecordBackendConfig() @@ -810,7 +819,7 @@ func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { cfg: cfg, log: devnull, namespace: "test", - datastore: ds, + datastore: dstore, validator: atomicPutValidator{}, } @@ -876,7 +885,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { for i := 0; i < b.N; i++ { pid, priv := newIdentity(b) - putReq := newPutIPNSRequest(b, priv, 0, time.Now().Add(time.Hour), time.Hour) + putReq := newPutIPNSRequest(b, d.cfg.Clock, priv, 0, time.Hour) data, err := putReq.Record.Marshal() require.NoError(b, err) @@ -913,7 +922,7 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { remote, priv := newIdentity(t) - putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + putReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, time.Hour) rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) require.NoError(t, err) @@ -1004,13 +1013,19 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { } func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { - d := newTestDHT(t) + clk := clock.NewMock() + clk.Set(time.Now()) // needed because record validators don't use mock clocks + + cfg := DefaultConfig() + cfg.Clock = clk + + d := newTestDHTWithConfig(t, cfg) fillRoutingTable(t, d, 250) remote, priv := newIdentity(t) - putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(time.Hour), time.Hour) + putReq := newPutIPNSRequest(t, clk, priv, 0, time.Hour) rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) require.NoError(t, err) @@ -1028,6 +1043,10 @@ func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { Key: putReq.GetKey(), } + // The following line is actually not necessary because we set the + // MaxRecordAge to 0. However, this fixes time granularity bug in Windows + clk.Add(time.Minute) + rbe.cfg.MaxRecordAge = 0 resp, err := d.handleGetValue(context.Background(), remote, req) @@ -1056,7 +1075,7 @@ func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { remote, priv := newIdentity(t) // generate expired record (doesn't pass validation) - putReq := newPutIPNSRequest(t, priv, 0, time.Now().Add(-time.Hour), -time.Hour) + putReq := newPutIPNSRequest(t, d.cfg.Clock, priv, 0, -time.Hour) data, err := putReq.Record.Marshal() require.NoError(t, err) From 1d355051930b9ba09044f264f42ad39416c2ad5a Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 18 Sep 2023 12:58:01 +0100 Subject: [PATCH 45/64] Test query interactions with routing table (#887) * Test query interactions with routing table * v2: upgrade to go1.21 * Add uci config * Use newer uci actions * Pass clock to coordinator * Use v2 working directory in actions * Set go-version input in actions * Set go-version input in actions * Use go 1.20.8 in actions * Use go 1.21.1 and relative working directory * Try default working directory on job * Remove uci.yaml which is not supported yet * Try default working directory on job * Try default working directory as input * Restore uci.yaml * Restore uci.yaml * Use modified go-check * Use modified go-test * Fix go-test * Fix go-test * Fix go-test * Update go-kademlia * Add more tracing * Use go-kademlia trie fix --- v2/coord/coordinator.go | 33 +++++++ v2/coord/event.go | 38 +++++++- v2/coord/query.go | 5 ++ v2/coord/query/query.go | 10 +++ v2/coord/routing.go | 45 +++++++++- v2/coord/routing/bootstrap.go | 14 +++ v2/coord/routing/include.go | 10 ++- v2/coord/routing/probe.go | 6 ++ v2/dht.go | 1 + v2/go.mod | 2 +- v2/go.sum | 4 +- v2/handlers.go | 3 + v2/handlers_test.go | 4 +- v2/internal/kadtest/tracing.go | 49 +++++++++-- v2/notifee.go | 10 +++ v2/notifee_test.go | 21 +++++ v2/query_test.go | 155 +++++++++++++++++++++++++++++++++ v2/router.go | 2 +- v2/routing.go | 2 - v2/tele/tele.go | 5 ++ 20 files changed, 400 insertions(+), 19 deletions(-) diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index 9641c165..9c1e8a70 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -308,6 +308,8 @@ func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { // GetNode retrieves the node associated with the given node id from the DHT's local routing table. // If the node isn't found in the table, it returns ErrNodeNotFound. func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.GetNode") + defer span.End() if _, exists := c.rt.GetNode(kadt.PeerID(id).Key()); !exists { return nil, ErrNodeNotFound } @@ -321,6 +323,8 @@ func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. func (c *Coordinator) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]Node, error) { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.GetClosestNodes") + defer span.End() closest := c.rt.NearestNodes(k, n) nodes := make([]Node, 0, len(closest)) for _, id := range closest { @@ -462,3 +466,32 @@ func (c *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { return nil } + +// NotifyConnectivity notifies the coordinator that a peer has passed a connectivity check +// which means it is connected and supports finding closer nodes +func (c *Coordinator) NotifyConnectivity(ctx context.Context, id peer.ID) error { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyConnectivity") + defer span.End() + + ai := peer.AddrInfo{ + ID: id, + } + c.routingBehaviour.Notify(ctx, &EventNotifyConnectivity{ + NodeInfo: ai, + }) + + return nil +} + +// NotifyNonConnectivity notifies the coordinator that a peer has failed a connectivity check +// which means it is not connected and/or it doesn't support finding closer nodes +func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id peer.ID) error { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyNonConnectivity") + defer span.End() + + c.routingBehaviour.Notify(ctx, &EventNotifyNonConnectivity{ + NodeID: id, + }) + + return nil +} diff --git a/v2/coord/event.go b/v2/coord/event.go index 2355e259..69a9d5d7 100644 --- a/v2/coord/event.go +++ b/v2/coord/event.go @@ -86,6 +86,8 @@ type EventStopQuery struct { func (*EventStopQuery) behaviourEvent() {} func (*EventStopQuery) queryCommand() {} +// EventAddAddrInfo notifies the routing behaviour of a potential new peer or of additional addresses for +// an existing peer. type EventAddAddrInfo struct { NodeInfo peer.AddrInfo } @@ -93,9 +95,11 @@ type EventAddAddrInfo struct { func (*EventAddAddrInfo) behaviourEvent() {} func (*EventAddAddrInfo) routingCommand() {} +// EventGetCloserNodesSuccess notifies a behaviour that a GetCloserNodes request, initiated by an +// [EventOutboundGetCloserNodes] event has produced a successful response. type EventGetCloserNodesSuccess struct { QueryID query.QueryID - To peer.AddrInfo + To peer.AddrInfo // To is the peer address that the GetCloserNodes request was sent to. Target kadt.Key CloserNodes []peer.AddrInfo } @@ -103,9 +107,11 @@ type EventGetCloserNodesSuccess struct { func (*EventGetCloserNodesSuccess) behaviourEvent() {} func (*EventGetCloserNodesSuccess) nodeHandlerResponse() {} +// EventGetCloserNodesFailure notifies a behaviour that a GetCloserNodes request, initiated by an +// [EventOutboundGetCloserNodes] event has failed to produce a valid response. type EventGetCloserNodesFailure struct { QueryID query.QueryID - To peer.AddrInfo + To peer.AddrInfo // To is the peer address that the GetCloserNodes request was sent to. Target kadt.Key Err error } @@ -141,6 +147,14 @@ type EventRoutingUpdated struct { func (*EventRoutingUpdated) behaviourEvent() {} func (*EventRoutingUpdated) routingNotification() {} +// EventRoutingRemoved is emitted by the coordinator when new node has been removed from the routing table. +type EventRoutingRemoved struct { + NodeID peer.ID +} + +func (*EventRoutingRemoved) behaviourEvent() {} +func (*EventRoutingRemoved) routingNotification() {} + // EventBootstrapFinished is emitted by the coordinator when a bootstrap has finished, either through // running to completion or by being canceled. type EventBootstrapFinished struct { @@ -149,3 +163,23 @@ type EventBootstrapFinished struct { func (*EventBootstrapFinished) behaviourEvent() {} func (*EventBootstrapFinished) routingNotification() {} + +// EventNotifyConnectivity notifies a behaviour that a peer's connectivity and support for finding closer nodes +// has been confirmed such as from a successful query response or an inbound query. This should not be used for +// general connections to the host but only when it is confirmed that the peer responds to requests for closer +// nodes. +type EventNotifyConnectivity struct { + NodeInfo peer.AddrInfo +} + +func (*EventNotifyConnectivity) behaviourEvent() {} +func (*EventNotifyConnectivity) routingNotification() {} + +// EventNotifyNonConnectivity notifies a behaviour that a peer does not have connectivity and/or does not support +// finding closer nodes is known. +type EventNotifyNonConnectivity struct { + NodeID peer.ID +} + +func (*EventNotifyNonConnectivity) behaviourEvent() {} +func (*EventNotifyNonConnectivity) routingCommand() {} diff --git a/v2/coord/query.go b/v2/coord/query.go index 8bdfbd53..6857fc6d 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -81,6 +81,11 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { CloserNodes: sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), } case *EventGetCloserNodesFailure: + // queue an event that will notify the routing behaviour of a failed node + p.pending = append(p.pending, &EventNotifyNonConnectivity{ + ev.To.ID, + }) + cmd = &query.EventPoolFindCloserFailure[kadt.Key, kadt.PeerID]{ NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, diff --git a/v2/coord/query/query.go b/v2/coord/query/query.go index e5009a04..5982448d 100644 --- a/v2/coord/query/query.go +++ b/v2/coord/query/query.go @@ -130,14 +130,18 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { switch tev := ev.(type) { case *EventQueryCancel: + span.SetAttributes(tele.AttrEvent("EventQueryCancel")) q.markFinished() return &StateQueryFinished{ QueryID: q.id, Stats: q.stats, } case *EventQueryFindCloserResponse[K, N]: + span.SetAttributes(tele.AttrEvent("EventQueryFindCloserResponse")) q.onMessageResponse(ctx, tev.NodeID, tev.CloserNodes) case *EventQueryFindCloserFailure[K, N]: + span.SetAttributes(tele.AttrEvent("EventQueryFindCloserFailure")) + span.RecordError(tev.Error) q.onMessageFailure(ctx, tev.NodeID) case nil: // TEMPORARY: no event to process @@ -170,6 +174,7 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { q.inFlight-- q.stats.Failure++ } else if atCapacity() { + span.SetAttributes(tele.AttrOutEvent("StateQueryWaitingAtCapacity")) // this is the query's tracing span returnState = &StateQueryWaitingAtCapacity{ QueryID: q.id, Stats: q.stats, @@ -186,6 +191,7 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { // If it has contacted at least NumResults nodes successfully then the iteration is done. if !progressing && successes >= q.cfg.NumResults { q.markFinished() + span.SetAttributes(tele.AttrOutEvent("StateQueryFinished")) // this is the query's tracing span returnState = &StateQueryFinished{ QueryID: q.id, Stats: q.stats, @@ -202,6 +208,7 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { if q.stats.Start.IsZero() { q.stats.Start = q.cfg.Clock.Now() } + span.SetAttributes(tele.AttrOutEvent("StateQueryFindCloser")) // this is the query's tracing span returnState = &StateQueryFindCloser[K, N]{ NodeID: ni.NodeID, QueryID: q.id, @@ -211,6 +218,7 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { return true } + span.SetAttributes(tele.AttrOutEvent("StateQueryWaitingAtCapacity")) // this is the query's tracing span returnState = &StateQueryWaitingAtCapacity{ QueryID: q.id, Stats: q.stats, @@ -233,6 +241,7 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { if q.inFlight > 0 { // The iterator is still waiting for results and not at capacity + span.SetAttributes(tele.AttrOutEvent("StateQueryWaitingWithCapacity")) return &StateQueryWaitingWithCapacity{ QueryID: q.id, Stats: q.stats, @@ -242,6 +251,7 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { // The iterator is finished because all available nodes have been contacted // and the iterator is not waiting for any more results. q.markFinished() + span.SetAttributes(tele.AttrOutEvent("StateQueryFinished")) return &StateQueryFinished{ QueryID: q.id, Stats: q.stats, diff --git a/v2/coord/routing.go b/v2/coord/routing.go index 95268cbe..f9edbe3f 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -91,7 +91,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } case *EventRoutingUpdated: - span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) + span.SetAttributes(attribute.String("event", "EventRoutingUpdated"), attribute.String("nodeid", ev.NodeInfo.ID.String())) cmd := &routing.EventProbeAdd[kadt.Key, kadt.PeerID]{ NodeID: addrInfoToKadPeerID(ev.NodeInfo), } @@ -201,6 +201,41 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { default: panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) } + case *EventNotifyConnectivity: + span.SetAttributes(attribute.String("event", "EventNotifyConnectivity"), attribute.String("nodeid", ev.NodeInfo.ID.String())) + // ignore self + if ev.NodeInfo.ID == peer.ID(r.self) { + break + } + // tell the include state machine in case this is a new peer that could be added to the routing table + cmd := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.NodeInfo.ID), + } + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + // tell the probe state machine in case there is are connectivity checks that could satisfied + cmdProbe := &routing.EventProbeNotifyConnectivity[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.NodeInfo.ID), + } + nextProbe, ok := r.advanceProbe(ctx, cmdProbe) + if ok { + r.pending = append(r.pending, nextProbe) + } + case *EventNotifyNonConnectivity: + span.SetAttributes(attribute.String("event", "EventNotifyConnectivity"), attribute.String("nodeid", ev.NodeID.String())) + + // tell the probe state machine to remove the node from the routing table and probe list + cmdProbe := &routing.EventProbeRemove[kadt.Key, kadt.PeerID]{ + NodeID: kadt.PeerID(ev.NodeID), + } + nextProbe, ok := r.advanceProbe(ctx, cmdProbe) + if ok { + r.pending = append(r.pending, nextProbe) + } + default: panic(fmt.Sprintf("unexpected dht event: %T", ev)) } @@ -351,7 +386,13 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve Notify: r, }, true case *routing.StateProbeNodeFailure[kadt.Key, kadt.PeerID]: - // a node has failed a connectivity check been removed from the routing table and the probe list + // a node has failed a connectivity check and been removed from the routing table and the probe list + + // emit an EventRoutingRemoved event to notify clients that the node has been removed + r.pending = append(r.pending, &EventRoutingRemoved{ + NodeID: peer.ID(st.NodeID), + }) + // add the node to the inclusion list for a second chance r.notify(ctx, &EventAddAddrInfo{ NodeInfo: kadPeerIDToAddrInfo(st.NodeID), diff --git a/v2/coord/routing/bootstrap.go b/v2/coord/routing/bootstrap.go index 2c674b00..683683a7 100644 --- a/v2/coord/routing/bootstrap.go +++ b/v2/coord/routing/bootstrap.go @@ -8,6 +8,7 @@ import ( "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" + "go.opentelemetry.io/otel/attribute" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" @@ -96,6 +97,7 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst switch tev := ev.(type) { case *EventBootstrapStart[K, N]: + span.SetAttributes(tele.AttrEvent("EventBootstrapStart")) // TODO: ignore start event if query is already in progress iter := query.NewClosestNodesIter[K, N](b.self.Key()) @@ -116,17 +118,21 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst return b.advanceQuery(ctx, nil) case *EventBootstrapFindCloserResponse[K, N]: + span.SetAttributes(tele.AttrEvent("EventBootstrapFindCloserResponse")) return b.advanceQuery(ctx, &query.EventQueryFindCloserResponse[K, N]{ NodeID: tev.NodeID, CloserNodes: tev.CloserNodes, }) case *EventBootstrapFindCloserFailure[K, N]: + span.SetAttributes(tele.AttrEvent("EventBootstrapFindCloserFailure")) + span.RecordError(tev.Error) return b.advanceQuery(ctx, &query.EventQueryFindCloserFailure[K, N]{ NodeID: tev.NodeID, Error: tev.Error, }) case *EventBootstrapPoll: + span.SetAttributes(tele.AttrEvent("EventBootstrapPoll")) // ignore, nothing to do default: panic(fmt.Sprintf("unexpected event: %T", tev)) @@ -140,9 +146,12 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst } func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) BootstrapState { + ctx, span := tele.StartSpan(ctx, "Bootstrap.advanceQuery") + defer span.End() state := b.qry.Advance(ctx, qev) switch st := state.(type) { case *query.StateQueryFindCloser[K, N]: + span.SetAttributes(attribute.String("out_state", "StateQueryFindCloser")) return &StateBootstrapFindCloser[K, N]{ QueryID: st.QueryID, Stats: st.Stats, @@ -150,26 +159,31 @@ func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent Target: st.Target, } case *query.StateQueryFinished: + span.SetAttributes(attribute.String("out_state", "StateBootstrapFinished")) return &StateBootstrapFinished{ Stats: st.Stats, } case *query.StateQueryWaitingAtCapacity: elapsed := b.cfg.Clock.Since(st.Stats.Start) if elapsed > b.cfg.Timeout { + span.SetAttributes(attribute.String("out_state", "StateBootstrapTimeout")) return &StateBootstrapTimeout{ Stats: st.Stats, } } + span.SetAttributes(attribute.String("out_state", "StateBootstrapWaiting")) return &StateBootstrapWaiting{ Stats: st.Stats, } case *query.StateQueryWaitingWithCapacity: elapsed := b.cfg.Clock.Since(st.Stats.Start) if elapsed > b.cfg.Timeout { + span.SetAttributes(attribute.String("out_state", "StateBootstrapTimeout")) return &StateBootstrapTimeout{ Stats: st.Stats, } } + span.SetAttributes(attribute.String("out_state", "StateBootstrapWaiting")) return &StateBootstrapWaiting{ Stats: st.Stats, } diff --git a/v2/coord/routing/include.go b/v2/coord/routing/include.go index 0830a6b1..749fe931 100644 --- a/v2/coord/routing/include.go +++ b/v2/coord/routing/include.go @@ -106,6 +106,7 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta switch tev := ev.(type) { case *EventIncludeAddCandidate[K, N]: + span.SetAttributes(tele.AttrEvent("EventIncludeAddCandidate")) // Ignore if already running a check _, checking := b.checks[key.HexString(tev.NodeID.Key())] if checking { @@ -124,20 +125,25 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta b.candidates.Enqueue(ctx, tev.NodeID) case *EventIncludeConnectivityCheckSuccess[K, N]: + span.SetAttributes(tele.AttrEvent("EventIncludeConnectivityCheckSuccess")) ch, ok := b.checks[key.HexString(tev.NodeID.Key())] if ok { delete(b.checks, key.HexString(tev.NodeID.Key())) if b.rt.AddNode(tev.NodeID) { + span.SetAttributes(tele.AttrOutEvent("StateIncludeRoutingUpdated")) return &StateIncludeRoutingUpdated[K, N]{ NodeID: ch.NodeID, } } } case *EventIncludeConnectivityCheckFailure[K, N]: + span.SetAttributes(tele.AttrEvent("EventIncludeConnectivityCheckFailure")) + span.RecordError(tev.Error) delete(b.checks, key.HexString(tev.NodeID.Key())) case *EventIncludePoll: - // ignore, nothing to do + span.SetAttributes(tele.AttrEvent("EventIncludePoll")) + // ignore, nothing to do default: panic(fmt.Sprintf("unexpected event: %T", tev)) } @@ -153,6 +159,7 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta if !ok { // No candidate in queue if len(b.checks) > 0 { + span.SetAttributes(tele.AttrOutEvent("StateIncludeWaitingWithCapacity")) return &StateIncludeWaitingWithCapacity{} } return &StateIncludeIdle{} @@ -164,6 +171,7 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta } // Ask the node to find itself + span.SetAttributes(tele.AttrOutEvent("StateIncludeConnectivityCheck")) return &StateIncludeConnectivityCheck[K, N]{ NodeID: candidate, } diff --git a/v2/coord/routing/probe.go b/v2/coord/routing/probe.go index fd044036..b8f78c60 100644 --- a/v2/coord/routing/probe.go +++ b/v2/coord/routing/probe.go @@ -160,11 +160,13 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { p.nvl.Put(nv) case *EventProbeRemove[K, N]: span.SetAttributes(tele.AttrEvent("EventProbeRemove"), attribute.String("nodeid", tev.NodeID.String())) + p.rt.RemoveKey(tev.NodeID.Key()) p.nvl.Remove(tev.NodeID) return &StateProbeNodeFailure[K, N]{ NodeID: tev.NodeID, } + case *EventProbeConnectivityCheckSuccess[K, N]: span.SetAttributes(tele.AttrEvent("EventProbeMessageResponse"), attribute.String("nodeid", tev.NodeID.String())) nv, found := p.nvl.Get(tev.NodeID) @@ -183,6 +185,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { // probe failed, so remove from routing table and from list span.SetAttributes(tele.AttrEvent("EventProbeMessageFailure"), attribute.String("nodeid", tev.NodeID.String())) span.RecordError(tev.Error) + p.rt.RemoveKey(tev.NodeID.Key()) p.nvl.Remove(tev.NodeID) return &StateProbeNodeFailure[K, N]{ @@ -211,6 +214,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { candidate, found := p.nvl.FindCheckPastDeadline(p.cfg.Clock.Now()) if !found { // nothing suitable for time out + span.SetAttributes(tele.AttrOutEvent("StateProbeWaitingAtCapacity")) return &StateProbeWaitingAtCapacity{} } @@ -228,6 +232,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { if !ok { if p.nvl.OngoingCount() > 0 { // waiting for a check but nothing else to do + span.SetAttributes(tele.AttrOutEvent("StateProbeWaitingWithCapacity")) return &StateProbeWaitingWithCapacity{} } // nothing happening and nothing to do @@ -237,6 +242,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { p.nvl.MarkOngoing(next.NodeID, p.cfg.Clock.Now().Add(p.cfg.Timeout)) // Ask the node to find itself + span.SetAttributes(tele.AttrOutEvent("StateProbeConnectivityCheck")) return &StateProbeConnectivityCheck[K, N]{ NodeID: next.NodeID, } diff --git a/v2/dht.go b/v2/dht.go index db0e0b63..06086d81 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -151,6 +151,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { // instantiate a new Kademlia DHT coordinator. coordCfg := coord.DefaultCoordinatorConfig() + coordCfg.Clock = cfg.Clock coordCfg.MeterProvider = cfg.MeterProvider coordCfg.TracerProvider = cfg.TracerProvider diff --git a/v2/go.mod b/v2/go.mod index 3e11da4d..5a0aacd8 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -15,7 +15,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/plprobelab/go-kademlia v0.0.0-20230911085009-18d957853c57 + github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.17.0 go.opentelemetry.io/otel/exporters/jaeger v1.16.0 diff --git a/v2/go.sum b/v2/go.sum index df21f778..f2feab1a 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -259,8 +259,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/plprobelab/go-kademlia v0.0.0-20230911085009-18d957853c57 h1:9qB1pIoeis/hdhTxVcQLrFYJhGVRJIJESLP/kCud5HE= -github.com/plprobelab/go-kademlia v0.0.0-20230911085009-18d957853c57/go.mod h1:9mz9/8plJj9HWiQmB6JkBNHY30AXzy9LrJ++sCvWqFQ= +github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 h1:CqaVJqntB6Gm7LILVsIZv0Sdy9kfmi74rwZRt66hPLM= +github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080/go.mod h1:9mz9/8plJj9HWiQmB6JkBNHY30AXzy9LrJ++sCvWqFQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= diff --git a/v2/handlers.go b/v2/handlers.go index 5243a79d..bcd89f9a 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -24,6 +24,9 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("handleFindPeer with empty key") } + // tell the coordinator that this peer supports finding closer nodes + d.kad.NotifyConnectivity(ctx, remote) + // "parse" requested peer ID from the key field target := peer.ID(req.GetKey()) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index e80e7192..a94816e1 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -33,7 +33,9 @@ import ( var rng = rand.New(rand.NewSource(1337)) func newTestDHT(t testing.TB) *DHT { - return newTestDHTWithConfig(t, DefaultConfig()) + cfg := DefaultConfig() + + return newTestDHTWithConfig(t, cfg) } func newTestDHTWithConfig(t testing.TB, cfg *Config) *DHT { diff --git a/v2/internal/kadtest/tracing.go b/v2/internal/kadtest/tracing.go index dc7c82c8..a8125423 100644 --- a/v2/internal/kadtest/tracing.go +++ b/v2/internal/kadtest/tracing.go @@ -2,28 +2,63 @@ package kadtest import ( "context" + "flag" "fmt" "testing" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/jaeger" - "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" ) +var ( + tracing = flag.Bool("tracing", false, "Enable or disable tracing") + tracingHost = flag.String("tracinghost", "127.0.0.1", "Hostname of tracing collector endpoint") + tracingPort = flag.Int("tracingport", 14268, "Port number of tracing collector endpoint") +) + +// MaybeTrace returns a context containing a new root span named after the test. It creates an new +// tracing provider and installs it as the global provider, restoring the previous provider at the +// end of the test. This function cannot be called from tests that are run in parallel. +func MaybeTrace(t *testing.T, ctx context.Context) (context.Context, trace.TracerProvider) { + if !*tracing { + return ctx, otel.GetTracerProvider() + } + + tp := JaegerTracerProvider(t) + t.Logf("Tracing enabled and exporting to %s:%d", *tracingHost, *tracingPort) + + ctx, span := tp.Tracer("kadtest").Start(ctx, t.Name(), trace.WithNewRoot()) + t.Cleanup(func() { + span.End() + }) + + return ctx, tp +} + // JaegerTracerProvider creates a tracer provider that exports traces to a Jaeger instance running // on localhost on port 14268 -func JaegerTracerProvider(t *testing.T) *trace.TracerProvider { +func JaegerTracerProvider(t *testing.T) trace.TracerProvider { t.Helper() - traceHost := "127.0.0.1" - tracePort := 14268 - - endpoint := fmt.Sprintf("http://%s:%d/api/traces", traceHost, tracePort) + endpoint := fmt.Sprintf("http://%s:%d/api/traces", *tracingHost, *tracingPort) exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(endpoint))) if err != nil { t.Fatalf("failed to create jaeger exporter: %v", err) } - tp := trace.NewTracerProvider(trace.WithBatcher(exp)) + tp := sdktrace.NewTracerProvider( + sdktrace.WithBatcher(exp), + sdktrace.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String(tele.TracerName), + semconv.DeploymentEnvironmentKey.String("testing"), + )), + ) t.Cleanup(func() { tp.Shutdown(context.Background()) diff --git a/v2/notifee.go b/v2/notifee.go index 8999b7c0..d1889428 100644 --- a/v2/notifee.go +++ b/v2/notifee.go @@ -1,10 +1,12 @@ package dht import ( + "context" "fmt" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" ) // networkEventsSubscription registers a subscription on the libp2p event bus @@ -49,6 +51,7 @@ func (d *DHT) consumeNetworkEvents(sub event.Subscription) { case event.EvtLocalAddressesUpdated: case event.EvtPeerProtocolsUpdated: case event.EvtPeerIdentificationCompleted: + d.onEvtPeerIdentificationCompleted(evt) case event.EvtPeerConnectednessChanged: default: d.log.Warn("unknown libp2p event", "type", fmt.Sprintf("%T", evt)) @@ -84,3 +87,10 @@ func (d *DHT) onEvtLocalReachabilityChanged(evt event.EvtLocalReachabilityChange d.log.With("reachability", evt.Reachability).Warn("unknown reachability type") } } + +func (d *DHT) onEvtPeerIdentificationCompleted(evt event.EvtPeerIdentificationCompleted) { + // tell the coordinator about a new candidate for inclusion in the routing table + d.kad.AddNodes(context.Background(), []peer.AddrInfo{ + {ID: evt.Peer}, + }) +} diff --git a/v2/notifee_test.go b/v2/notifee_test.go index e8f3d90e..a42f82bf 100644 --- a/v2/notifee_test.go +++ b/v2/notifee_test.go @@ -2,11 +2,14 @@ package dht import ( "testing" + "time" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/event" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDHT_consumeNetworkEvents_onEvtLocalReachabilityChanged(t *testing.T) { @@ -65,3 +68,21 @@ func TestDHT_consumeNetworkEvents_onEvtLocalReachabilityChanged(t *testing.T) { assert.Equal(t, modeServer, d.mode) }) } + +func TestDHT_consumeNetworkEvents_onEvtPeerIdentificationCompleted(t *testing.T) { + ctx := kadtest.CtxShort(t) + + d1 := newServerDht(t, nil) + d2 := newServerDht(t, nil) + + // make sure d1 has the address of d2 in its peerstore + d1.host.Peerstore().AddAddrs(d2.host.ID(), d2.host.Addrs(), time.Minute) + + // send the event + d1.onEvtPeerIdentificationCompleted(event.EvtPeerIdentificationCompleted{ + Peer: d2.host.ID(), + }) + + _, err := expectRoutingUpdated(t, ctx, d1.kad.RoutingNotifications(), d2.host.ID()) + require.NoError(t, err) +} diff --git a/v2/query_test.go b/v2/query_test.go index 29fa004a..b96c0b33 100644 --- a/v2/query_test.go +++ b/v2/query_test.go @@ -1,11 +1,19 @@ package dht import ( + "context" + "fmt" "testing" + "time" "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) func newServerHost(t testing.TB) host.Host { @@ -39,6 +47,7 @@ func newClientHost(t testing.TB) host.Host { func newServerDht(t testing.TB, cfg *Config) *DHT { h := newServerHost(t) + var err error if cfg == nil { cfg = DefaultConfig() } @@ -47,6 +56,10 @@ func newServerDht(t testing.TB, cfg *Config) *DHT { d, err := New(h, cfg) require.NoError(t, err) + // add at least 1 entry in the routing table so the server will pass connectivity checks + fillRoutingTable(t, d, 1) + require.NotEmpty(t, d.rt.NearestNodes(kadt.PeerID(d.host.ID()).Key(), 1)) + t.Cleanup(func() { if err = d.Close(); err != nil { t.Logf("unexpected error when closing dht: %s", err) @@ -58,6 +71,7 @@ func newServerDht(t testing.TB, cfg *Config) *DHT { func newClientDht(t testing.TB, cfg *Config) *DHT { h := newClientHost(t) + var err error if cfg == nil { cfg = DefaultConfig() } @@ -72,3 +86,144 @@ func newClientDht(t testing.TB, cfg *Config) *DHT { }) return d } + +// expectRoutingUpdated selects on the event channel until an EventRoutingUpdated event is seen for the specified peer id +func expectRoutingUpdated(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, id peer.ID) (*coord.EventRoutingUpdated, error) { + t.Helper() + for { + select { + case ev := <-events: + if tev, ok := ev.(*coord.EventRoutingUpdated); ok { + if tev.NodeInfo.ID == id { + return tev, nil + } + t.Logf("saw routing update for %s", tev.NodeInfo.ID) + } + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for routing update event") + } + } +} + +// expectRoutingUpdated selects on the event channel until an EventRoutingUpdated event is seen for the specified peer id +func expectRoutingRemoved(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, id peer.ID) (*coord.EventRoutingRemoved, error) { + t.Helper() + for { + select { + case ev := <-events: + if tev, ok := ev.(*coord.EventRoutingRemoved); ok { + if tev.NodeID == id { + return tev, nil + } + t.Logf("saw routing removed for %s", tev.NodeID) + } + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for routing removed event") + } + } +} + +func connect(t *testing.T, ctx context.Context, a, b *DHT) { + t.Helper() + + remoteAddrInfo := peer.AddrInfo{ + ID: b.host.ID(), + Addrs: b.host.Addrs(), + } + + // Add b's addresss to a + err := a.AddAddresses(ctx, []peer.AddrInfo{remoteAddrInfo}, time.Minute) + require.NoError(t, err) + + // the include state machine runs in the background for a and eventually should add the node to routing table + _, err = expectRoutingUpdated(t, ctx, a.kad.RoutingNotifications(), b.host.ID()) + require.NoError(t, err) + + // the routing table should now contain the node + _, err = a.kad.GetNode(ctx, b.host.ID()) + require.NoError(t, err) +} + +// connectLinearChain connects the dhts together in a linear chain. +// The dhts are configured with routing tables that contain immediate neighbours. +func connectLinearChain(t *testing.T, ctx context.Context, dhts ...*DHT) { + for i := 1; i < len(dhts); i++ { + connect(t, ctx, dhts[i-1], dhts[i]) + connect(t, ctx, dhts[i], dhts[i-1]) + } +} + +func TestRTAdditionOnSuccessfulQuery(t *testing.T) { + ctx := kadtest.CtxShort(t) + ctx, tp := kadtest.MaybeTrace(t, ctx) + + cfg := DefaultConfig() + cfg.TracerProvider = tp + + d1 := newServerDht(t, cfg) + d2 := newServerDht(t, cfg) + d3 := newServerDht(t, cfg) + + connectLinearChain(t, ctx, d1, d2, d3) + + // d3 does not know about d1 + _, err := d3.kad.GetNode(ctx, d1.host.ID()) + require.ErrorIs(t, err, coord.ErrNodeNotFound) + + // d1 does not know about d3 + _, err = d1.kad.GetNode(ctx, d3.host.ID()) + require.ErrorIs(t, err, coord.ErrNodeNotFound) + + // // but when d3 queries d2, d1 and d3 discover each other + _, _ = d3.FindPeer(ctx, "something") + // ignore the error + + // d3 should update its routing table to include d1 during the query + _, err = expectRoutingUpdated(t, ctx, d3.kad.RoutingNotifications(), d1.host.ID()) + require.NoError(t, err) + + // d3 now has d1 in its routing table + _, err = d3.kad.GetNode(ctx, d1.host.ID()) + require.NoError(t, err) + + // d1 should update its routing table to include d3 during the query + _, err = expectRoutingUpdated(t, ctx, d1.kad.RoutingNotifications(), d3.host.ID()) + require.NoError(t, err) + + // d1 now has d3 in its routing table + _, err = d1.kad.GetNode(ctx, d3.host.ID()) + require.NoError(t, err) +} + +func TestRTEvictionOnFailedQuery(t *testing.T) { + ctx := kadtest.CtxShort(t) + + cfg := DefaultConfig() + + d1 := newServerDht(t, cfg) + d2 := newServerDht(t, cfg) + connect(t, ctx, d1, d2) + connect(t, ctx, d2, d1) + + // close both hosts so query fails + require.NoError(t, d1.host.Close()) + require.NoError(t, d2.host.Close()) + + // peers will still be in the RT because time is paused and + // no scheduled probes will have taken place + + // d1 still has d2 in the routing table + _, err := d1.kad.GetNode(ctx, d2.host.ID()) + require.NoError(t, err) + + // d2 still has d1 in the routing table + _, err = d2.kad.GetNode(ctx, d1.host.ID()) + require.NoError(t, err) + + // failed queries should remove the queried peers from the routing table + _, _ = d1.FindPeer(ctx, "test") + + // d1 should update its routing table to remove d2 because of the failure + _, err = expectRoutingRemoved(t, ctx, d1.kad.RoutingNotifications(), d2.host.ID()) + require.NoError(t, err) +} diff --git a/v2/router.go b/v2/router.go index f18f2892..2c5ed505 100644 --- a/v2/router.go +++ b/v2/router.go @@ -58,7 +58,7 @@ func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID addr // TODO: what to do with addresses in peer.AddrInfo? if len(r.host.Peerstore().Addrs(to.ID)) == 0 { - return nil, fmt.Errorf("aaah ProtoKadMessage") + return nil, fmt.Errorf("no address for peer %s", to.ID) } var cancel context.CancelFunc diff --git a/v2/routing.go b/v2/routing.go index cc72f849..e17ae434 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -18,7 +18,6 @@ import ( "github.com/libp2p/go-libp2p/core/routing" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" - "golang.org/x/exp/slog" ) var _ routing.Routing = (*DHT)(nil) @@ -44,7 +43,6 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { var foundNode coord.Node fn := func(ctx context.Context, node coord.Node, stats coord.QueryStats) error { - slog.Info("visiting node", "id", node.ID()) if node.ID() == id { foundNode = node return coord.ErrSkipRemaining diff --git a/v2/tele/tele.go b/v2/tele/tele.go index 7163f3d0..9309f85a 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -91,6 +91,11 @@ func AttrEvent(val string) attribute.KeyValue { return attribute.String("event", val) } +// AttrOutEvent creates an attribute that records the name of an event being returned +func AttrOutEvent(val string) attribute.KeyValue { + return attribute.String("out_event", val) +} + // WithAttributes is a function that attaches the provided attributes to the // given context. The given attributes will overwrite any already existing ones. func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Context { From 3723b8a1eb6b5c2af1b07713a2c2c9741dab8f48 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 18 Sep 2023 15:15:34 +0200 Subject: [PATCH 46/64] remove jaeger dependency (#900) This PR changes tracing to use the OpenTelemetry protocol which is also recommended by Jaeger. [`go.opentelemetry.io/otel/exporters/jaeger`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger): > Deprecated: This module is no longer supported. OpenTelemetry dropped support for Jaeger exporter in July 2023. Jaeger officially accepts and recommends using OTLP. Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp) or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) instead. Works with the following docker command: ```go docker run --rm --name jaeger -p 16686:16686 -p 4317:4317 jaegertracing/all-in-one:1.49 ``` Note that `jaegertracing/all-in-one` version `1.49` is **newer** than version `1.6` which we often used in the past (5 years old). --- v2/coord/coordinator_test.go | 11 ----- v2/go.mod | 43 ++++++++++------- v2/go.sum | 87 ++++++++++++++++++++-------------- v2/internal/kadtest/tracing.go | 53 +++++++++++++-------- 4 files changed, 110 insertions(+), 84 deletions(-) diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index f6f29837..f9b0e484 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -12,7 +12,6 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" @@ -73,16 +72,6 @@ func (w *notificationWatcher) Expect(ctx context.Context, expected RoutingNotifi } } -// TracingTelemetry may be used to create a Telemetry that traces a test -func TracingTelemetry(t *testing.T) *Telemetry { - telemetry, err := NewTelemetry(otel.GetMeterProvider(), kadtest.JaegerTracerProvider(t)) - if err != nil { - t.Fatalf("unexpected error creating telemetry: %v", err) - } - - return telemetry -} - func TestConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { cfg := DefaultCoordinatorConfig() diff --git a/v2/go.mod b/v2/go.mod index 5a0aacd8..fe220453 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -17,19 +17,20 @@ require ( github.com/multiformats/go-multiaddr v0.11.0 github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/otel v1.17.0 - go.opentelemetry.io/otel/exporters/jaeger v1.16.0 - go.opentelemetry.io/otel/metric v1.17.0 - go.opentelemetry.io/otel/sdk v1.17.0 - go.opentelemetry.io/otel/sdk/metric v0.40.0 - go.opentelemetry.io/otel/trace v1.17.0 - go.uber.org/zap/exp v0.1.0 - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + go.opentelemetry.io/otel v1.18.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.18.0 + go.opentelemetry.io/otel/metric v1.18.0 + go.opentelemetry.io/otel/sdk v1.18.0 + go.opentelemetry.io/otel/sdk/metric v0.41.0 + go.opentelemetry.io/otel/trace v1.18.0 + go.uber.org/zap/exp v0.1.0 // cannot update to v0.2.0 because zapslog requires go1.21 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 google.golang.org/protobuf v1.31.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -49,10 +50,11 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb // indirect + github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/huin/goupnp v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect @@ -85,7 +87,7 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/onsi/ginkgo/v2 v2.12.0 // indirect + github.com/onsi/ginkgo/v2 v2.11.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -94,25 +96,30 @@ require ( github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.11.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.4 // indirect - github.com/quic-go/quic-go v0.38.1 // indirect + github.com/quic-go/qtls-go1-20 v0.3.2 // indirect + github.com/quic-go/quic-go v0.37.6 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.25.0 // indirect - golang.org/x/crypto v0.13.0 // indirect + golang.org/x/crypto v0.12.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.15.0 // indirect + golang.org/x/net v0.14.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.12.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/v2/go.sum b/v2/go.sum index f2feab1a..bffb7bb6 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -18,6 +18,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= @@ -75,6 +77,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -98,8 +101,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb h1:LCMfzVg3sflxTs4UvuP4D8CkoZnfHLe2qzqgDn/4OHs= -github.com/google/pprof v0.0.0-20230907193218-d3ddc7976beb/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -112,11 +115,13 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= -github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -246,10 +251,10 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= -github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -275,14 +280,14 @@ github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7q github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= +github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= -github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.1 h1:M36YWA5dEhEeT+slOu/SwMEucbYd0YFidxG3KlGPZaE= -github.com/quic-go/quic-go v0.38.1/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= +github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= +github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY= +github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -325,7 +330,6 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -346,18 +350,22 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= -go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= -go.opentelemetry.io/otel/exporters/jaeger v1.16.0 h1:YhxxmXZ011C0aDZKoNw+juVWAmEfv/0W2XBOv9aHTaA= -go.opentelemetry.io/otel/exporters/jaeger v1.16.0/go.mod h1:grYbBo/5afWlPpdPZYhyn78Bk04hnvxn2+hvxQhKIQM= -go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= -go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= -go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= -go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= -go.opentelemetry.io/otel/sdk/metric v0.40.0 h1:qOM29YaGcxipWjL5FzpyZDpCYrDREvX0mVlmXdOjCHU= -go.opentelemetry.io/otel/sdk/metric v0.40.0/go.mod h1:dWxHtdzdJvg+ciJUKLTKwrMe5P6Dv3FyDbh8UkfgkVs= -go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= -go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= +go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= +go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 h1:IAtl+7gua134xcV3NieDhJHjjOVeJhXAnYf/0hswjUY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0/go.mod h1:w+pXobnBzh95MNIkeIuAKcHe/Uu/CX2PKIvBP6ipKRA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.18.0 h1:yE32ay7mJG2leczfREEhoW3VfSZIvHaB+gvVo1o8DQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.18.0/go.mod h1:G17FHPDLt74bCI7tJ4CMitEk4BXTYG4FW6XUpkPBXa4= +go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= +go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= +go.opentelemetry.io/otel/sdk v1.18.0 h1:e3bAB0wB3MljH38sHzpV/qWrOTCFrdZF2ct9F8rBkcY= +go.opentelemetry.io/otel/sdk v1.18.0/go.mod h1:1RCygWV7plY2KmdskZEDDBs4tJeHG92MdHZIluiYs/M= +go.opentelemetry.io/otel/sdk/metric v0.41.0 h1:c3sAt9/pQ5fSIUfl0gPtClV3HhE18DCVzByD33R/zsk= +go.opentelemetry.io/otel/sdk/metric v0.41.0/go.mod h1:PmOmSt+iOklKtIg5O4Vz9H/ttcRFSNTgii+E1KGyn1w= +go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10= +go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -388,11 +396,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -421,8 +429,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -463,8 +471,8 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -484,8 +492,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -502,10 +510,17 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= +google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= diff --git a/v2/internal/kadtest/tracing.go b/v2/internal/kadtest/tracing.go index a8125423..c1781ce8 100644 --- a/v2/internal/kadtest/tracing.go +++ b/v2/internal/kadtest/tracing.go @@ -6,30 +6,40 @@ import ( "fmt" "testing" - "github.com/libp2p/go-libp2p-kad-dht/v2/tele" + "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) var ( tracing = flag.Bool("tracing", false, "Enable or disable tracing") - tracingHost = flag.String("tracinghost", "127.0.0.1", "Hostname of tracing collector endpoint") - tracingPort = flag.Int("tracingport", 14268, "Port number of tracing collector endpoint") + tracingHost = flag.String("tracinghost", "127.0.0.1", "Hostname of OTLP tracing collector endpoint") + tracingPort = flag.Int("tracingport", 4317, "Port number of OTLP gRPC tracing collector endpoint") ) -// MaybeTrace returns a context containing a new root span named after the test. It creates an new -// tracing provider and installs it as the global provider, restoring the previous provider at the -// end of the test. This function cannot be called from tests that are run in parallel. -func MaybeTrace(t *testing.T, ctx context.Context) (context.Context, trace.TracerProvider) { +// MaybeTrace returns a context containing a new root span named after the test. +// It creates a new tracing provider and installs it as the global provider, +// restoring the previous provider at the end of the test. This function cannot +// be called from tests that are run in parallel. +// +// To activate test tracing pass the `-tracing` flag to the test command. +// Assuming you chose the defaults above, run the following to collect traces: +// +// docker run --rm --name jaeger -p 16686:16686 -p 4317:4317 jaegertracing/all-in-one:1.49 +// +// Then navigate to localhost:16686 and inspect the traces. +func MaybeTrace(t testing.TB, ctx context.Context) (context.Context, trace.TracerProvider) { if !*tracing { return ctx, otel.GetTracerProvider() } - tp := JaegerTracerProvider(t) + tp := OtelTracerProvider(ctx, t) t.Logf("Tracing enabled and exporting to %s:%d", *tracingHost, *tracingPort) ctx, span := tp.Tracer("kadtest").Start(ctx, t.Name(), trace.WithNewRoot()) @@ -40,16 +50,15 @@ func MaybeTrace(t *testing.T, ctx context.Context) (context.Context, trace.Trace return ctx, tp } -// JaegerTracerProvider creates a tracer provider that exports traces to a Jaeger instance running -// on localhost on port 14268 -func JaegerTracerProvider(t *testing.T) trace.TracerProvider { +// OtelTracerProvider creates a tracer provider that exports traces to, e.g., a +// Jaeger instance running on localhost on port 14268 +func OtelTracerProvider(ctx context.Context, t testing.TB) trace.TracerProvider { t.Helper() - - endpoint := fmt.Sprintf("http://%s:%d/api/traces", *tracingHost, *tracingPort) - exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(endpoint))) - if err != nil { - t.Fatalf("failed to create jaeger exporter: %v", err) - } + exp, err := otlptracegrpc.New(ctx, + otlptracegrpc.WithEndpoint(fmt.Sprintf("%s:%d", *tracingHost, *tracingPort)), + otlptracegrpc.WithInsecure(), + ) + require.NoError(t, err, "failed to create otel exporter") tp := sdktrace.NewTracerProvider( sdktrace.WithBatcher(exp), @@ -61,7 +70,13 @@ func JaegerTracerProvider(t *testing.T) trace.TracerProvider { ) t.Cleanup(func() { - tp.Shutdown(context.Background()) + if err = tp.ForceFlush(ctx); err != nil { + t.Log("failed to shut down trace provider") + } + + if err = tp.Shutdown(ctx); err != nil { + t.Log("failed to shut down trace provider") + } }) return tp From 156aab2362ebda2e23c6ab5d482755395ddf6bbb Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 18 Sep 2023 18:44:01 +0200 Subject: [PATCH 47/64] fix: avoid panic when node is re-added to probe list (#902) --- v2/coord/routing/probe.go | 19 +++++++++++++------ v2/coord/routing/probe_test.go | 22 +++++++++++++++++++++- 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/v2/coord/routing/probe.go b/v2/coord/routing/probe.go index b8f78c60..45e5881f 100644 --- a/v2/coord/routing/probe.go +++ b/v2/coord/routing/probe.go @@ -334,11 +334,12 @@ type nodeValue[K kad.Key[K], N kad.NodeID[K]] struct { type nodeValueEntry[K kad.Key[K], N kad.NodeID[K]] struct { nv *nodeValue[K, N] - index int // the index of the item in the ordering + index int // the index of the item in the ordering, set to -1 when the item is popped from the heap } type nodeValueList[K kad.Key[K], N kad.NodeID[K]] struct { - nodes map[string]*nodeValueEntry[K, N] + nodes map[string]*nodeValueEntry[K, N] + // pending is a list of nodes ordered by the time of the next check pending *nodeValuePendingList[K, N] // ongoing is a list of nodes with ongoing/in-progress probes, loosely ordered earliest to most recent ongoing []N @@ -359,14 +360,20 @@ func (l *nodeValueList[K, N]) Put(nv *nodeValue[K, N]) { nve, exists := l.nodes[mk] if !exists { nve = &nodeValueEntry[K, N]{ - nv: nv, + nv: nv, + index: -1, } + l.nodes[mk] = nve } else { nve.nv = nv - heap.Remove(l.pending, nve.index) } - heap.Push(l.pending, nve) - l.nodes[mk] = nve + + // nve.index is -1 when the node is not already in the pending list + // this could be because it is new or if there is an ongoing check + if nve.index == -1 { + heap.Push(l.pending, nve) + } + heap.Fix(l.pending, nve.index) l.removeFromOngoing(nv.NodeID) } diff --git a/v2/coord/routing/probe_test.go b/v2/coord/routing/probe_test.go index d4106c51..e07d6445 100644 --- a/v2/coord/routing/probe_test.go +++ b/v2/coord/routing/probe_test.go @@ -512,7 +512,7 @@ func TestNodeValueList(t *testing.T) { require.Equal(t, 0, l.OngoingCount()) require.Equal(t, 1, l.NodeCount()) - l.MarkOngoing(tiny.NewNode(5), clk.Now().Add(time.Minute)) + l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) require.Equal(t, 0, l.PendingCount()) require.Equal(t, 1, l.OngoingCount()) require.Equal(t, 1, l.NodeCount()) @@ -582,6 +582,26 @@ func TestNodeValueList(t *testing.T) { require.Equal(t, 0, l.OngoingCount()) require.Equal(t, 1, l.NodeCount()) }) + + t.Run("mark ongoing pending mixed", func(t *testing.T) { + t.Parallel() + + clk := clock.NewMock() + l := NewNodeValueList[tiny.Key, tiny.Node]() + nv1 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(5), + NextCheckDue: clk.Now().Add(time.Minute), + } + nv2 := &nodeValue[tiny.Key, tiny.Node]{ + NodeID: tiny.NewNode(6), + NextCheckDue: clk.Now().Add(time.Minute), + } + + l.Put(nv1) + l.Put(nv2) + l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) + l.Put(nv1) + }) } func TestProbeConnectivityCheckSuccess(t *testing.T) { From 1220ddd91f5fd65f4d2dfdb7c8144a86d7a2c855 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:35:14 +0100 Subject: [PATCH 48/64] Decouple coord package from addressing (#903) * Decouple coord package from addressing * Go fmt * fix: garbage collection test race condition (#904) Moved ticker initialization outside the garbage collection goroutine. There was a race condition between advancing the mocked time in TestProvidersBackend_GarbageCollection and the initialization of the ticker that triggers the garbage collection runs. It happened that we were advancing the time without the ticker being initialized. Hence, advancing the time hasn't had any effect. In this PR, I moved the ticker initialization outside the garbage collection goroutine. This means the ticker will be registered with the MockClock object after StartGarbageCollection returns. Calls to mockClock.Add will therefore trigger the ticker. * Fix test flakes that wait for routing events (#905) * Increase test iterations to trigger flake * Add failfast * Replace routing notification channel with a RoutingNotifier type * Remove unused code * Remove test count --------- Co-authored-by: Dennis Trautwein --- v2/backend.go | 2 +- v2/backend_provider.go | 7 +- v2/backend_provider_test.go | 5 +- v2/coord/conversion.go | 39 ------ v2/coord/coordinator.go | 190 +++++++++++++++++++------- v2/coord/coordinator_test.go | 138 ++++++------------- v2/coord/coretypes.go | 24 +--- v2/coord/event.go | 44 +++--- v2/coord/event_test.go | 2 +- v2/coord/internal/nettest/layouts.go | 31 ++--- v2/coord/internal/nettest/routing.go | 111 +++++++-------- v2/coord/internal/nettest/topology.go | 58 ++++---- v2/coord/network.go | 54 ++------ v2/coord/network_test.go | 5 +- v2/coord/query.go | 20 +-- v2/coord/routing.go | 60 ++++---- v2/coord/routing_test.go | 77 +++++------ v2/dht.go | 10 +- v2/dht_test.go | 28 +--- v2/handlers.go | 2 +- v2/internal/kadtest/context.go | 9 +- v2/kadt/kadt.go | 5 + v2/notifee.go | 6 +- v2/notifee_test.go | 10 +- v2/pb/msg.aux.go | 21 +-- v2/query_test.go | 111 ++++++--------- v2/router.go | 83 ++++------- v2/routing.go | 7 +- 28 files changed, 505 insertions(+), 654 deletions(-) delete mode 100644 v2/coord/conversion.go diff --git a/v2/backend.go b/v2/backend.go index a8c7775a..4e5d313f 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -102,7 +102,7 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) (be *Reco // The values returned from [ProvidersBackend.Fetch] will be of type // [*providerSet] (unexported). The cfg parameter can be nil, in which case the // [DefaultProviderBackendConfig] will be used. -func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProvidersBackendConfig) (be *ProvidersBackend, err error) { +func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Datastore, cfg *ProvidersBackendConfig) (be *ProvidersBackend, err error) { if cfg == nil { if cfg, err = DefaultProviderBackendConfig(); err != nil { return nil, fmt.Errorf("default provider backend config: %w", err) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 1ddd764f..3be9d88a 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -257,14 +257,15 @@ func (p *ProvidersBackend) StartGarbageCollection() { p.gcCancel = cancel p.gcDone = make(chan struct{}) - p.log.Info("Provider backend's started garbage collection schedule") + // init ticker outside the goroutine to prevent race condition with + // clock mock in garbage collection test. + ticker := p.cfg.clk.Ticker(p.cfg.GCInterval) go func() { defer close(p.gcDone) - - ticker := p.cfg.clk.Ticker(p.cfg.GCInterval) defer ticker.Stop() + p.log.Info("Provider backend started garbage collection schedule") for { select { case <-ctx.Done(): diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index 10407e54..d3ab465d 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -9,7 +9,6 @@ import ( "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" - syncds "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,7 +21,9 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac h, err := libp2p.New(libp2p.NoListenAddrs) require.NoError(t, err) - dstore := syncds.MutexWrap(ds.NewMapDatastore()) + dstore, err := InMemoryDatastore() + require.NoError(t, err) + t.Cleanup(func() { if err = dstore.Close(); err != nil { t.Logf("closing datastore: %s", err) diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go deleted file mode 100644 index d605507b..00000000 --- a/v2/coord/conversion.go +++ /dev/null @@ -1,39 +0,0 @@ -package coord - -import ( - "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/kad" - - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" -) - -// kadPeerIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. -// This function will panic if id's underlying type is not kadt.PeerID -func kadPeerIDToAddrInfo(id kad.NodeID[kadt.Key]) peer.AddrInfo { - peerID := id.(kadt.PeerID) - return peer.AddrInfo{ - ID: peer.ID(peerID), - } -} - -// addrInfoToKadPeerID converts a peer.AddrInfo to a kad.NodeID. -func addrInfoToKadPeerID(addrInfo peer.AddrInfo) kadt.PeerID { - return kadt.PeerID(addrInfo.ID) -} - -// sliceOfPeerIDToSliceOfKadPeerID converts a slice of peer.ID to a slice of kadt.PeerID -func sliceOfPeerIDToSliceOfKadPeerID(peers []peer.ID) []kadt.PeerID { - nodes := make([]kadt.PeerID, len(peers)) - for i := range peers { - nodes[i] = kadt.PeerID(peers[i]) - } - return nodes -} - -func sliceOfAddrInfoToSliceOfKadPeerID(addrInfos []peer.AddrInfo) []kadt.PeerID { - peers := make([]kadt.PeerID, len(addrInfos)) - for i := range addrInfos { - peers[i] = kadt.PeerID(addrInfos[i].ID) - } - return peers -} diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index 9c1e8a70..4a4f3875 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -4,15 +4,14 @@ import ( "context" "errors" "fmt" + "reflect" + "sync" "time" "github.com/benbjohnson/clock" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" - "github.com/plprobelab/go-kademlia/network/address" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" @@ -23,6 +22,7 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT @@ -40,9 +40,7 @@ type Coordinator struct { rt kad.RoutingTable[kadt.Key, kadt.PeerID] // rtr is the message router used to send messages - rtr Router - - routingNotifications chan RoutingNotification + rtr Router[kadt.Key, kadt.PeerID, *pb.Message] // networkBehaviour is the behaviour responsible for communicating with the network networkBehaviour *NetworkBehaviour @@ -57,6 +55,10 @@ type Coordinator struct { tele *Telemetry } +type RoutingNotifier interface { + Notify(context.Context, RoutingNotification) +} + type CoordinatorConfig struct { PeerstoreTTL time.Duration // duration for which a peer is kept in the peerstore @@ -72,6 +74,8 @@ type CoordinatorConfig struct { MeterProvider metric.MeterProvider // the meter provider to use when initialising metric instruments TracerProvider trace.TracerProvider // the tracer provider to use when initialising tracing + + RoutingNotifier RoutingNotifier // receives notifications of routing events } // Validate checks the configuration options and returns an error if any have invalid values. @@ -131,6 +135,13 @@ func (cfg *CoordinatorConfig) Validate() error { } } + if cfg.RoutingNotifier == nil { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("routing notifier must not be nil"), + } + } + return nil } @@ -145,10 +156,11 @@ func DefaultCoordinatorConfig() *CoordinatorConfig { Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), MeterProvider: otel.GetMeterProvider(), TracerProvider: otel.GetTracerProvider(), + RoutingNotifier: nullRoutingNotifier{}, } } -func NewCoordinator(self kadt.PeerID, rtr Router, rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *CoordinatorConfig) (*Coordinator, error) { +func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Message], rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { cfg = DefaultCoordinatorConfig() } else if err := cfg.Validate(); err != nil { @@ -227,8 +239,6 @@ func NewCoordinator(self kadt.PeerID, rtr Router, rt routing.RoutingTableCpl[kad networkBehaviour: networkBehaviour, routingBehaviour: routingBehaviour, queryBehaviour: queryBehaviour, - - routingNotifications: make(chan RoutingNotification, 20), // buffered mainly to allow tests to read the channel after running an operation } go d.eventLoop(ctx) @@ -245,20 +255,6 @@ func (c *Coordinator) ID() kadt.PeerID { return c.self } -func (c *Coordinator) Addresses() []ma.Multiaddr { - // TODO: return configured listen addresses - info, err := c.rtr.GetNodeInfo(context.TODO(), peer.ID(c.self)) - if err != nil { - return nil - } - return info.Addrs -} - -// RoutingNotifications returns a channel that may be read to be notified of routing updates -func (c *Coordinator) RoutingNotifications() <-chan RoutingNotification { - return c.routingNotifications -} - func (c *Coordinator) eventLoop(ctx context.Context) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.eventLoop") defer span.End() @@ -295,11 +291,7 @@ func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { case RoutingCommand: c.routingBehaviour.Notify(ctx, ev) case RoutingNotification: - select { - case <-ctx.Done(): - case c.routingNotifications <- ev: - default: - } + c.cfg.RoutingNotifier.Notify(ctx, ev) default: panic(fmt.Sprintf("unexpected event: %T", ev)) } @@ -307,14 +299,14 @@ func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { // GetNode retrieves the node associated with the given node id from the DHT's local routing table. // If the node isn't found in the table, it returns ErrNodeNotFound. -func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { +func (c *Coordinator) GetNode(ctx context.Context, id kadt.PeerID) (Node, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.GetNode") defer span.End() - if _, exists := c.rt.GetNode(kadt.PeerID(id).Key()); !exists { + if _, exists := c.rt.GetNode(id.Key()); !exists { return nil, ErrNodeNotFound } - nh, err := c.networkBehaviour.getNodeHandler(ctx, kadt.PeerID(id)) + nh, err := c.networkBehaviour.getNodeHandler(ctx, id) if err != nil { return nil, err } @@ -362,9 +354,9 @@ func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) return QueryStats{}, err } - seedIDs := make([]peer.ID, 0, len(seeds)) + seedIDs := make([]kadt.PeerID, 0, len(seeds)) for _, s := range seeds { - seedIDs = append(seedIDs, s.ID()) + seedIDs = append(seedIDs, kadt.PeerID(s.ID())) } waiter := NewWaiter[BehaviourEvent]() @@ -373,8 +365,6 @@ func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) cmd := &EventStartQuery{ QueryID: queryID, Target: target, - ProtocolID: address.ProtocolID("TODO"), - Message: &fakeMessage{key: target}, KnownClosestNodes: seedIDs, Notify: waiter, } @@ -431,22 +421,20 @@ func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) } } -// AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. +// AddNodes suggests new DHT nodes to be added to the routing table. // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. -func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo) error { +func (c *Coordinator) AddNodes(ctx context.Context, ids []kadt.PeerID) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.AddNodes") defer span.End() - for _, ai := range ais { - if ai.ID == peer.ID(c.self) { + for _, id := range ids { + if id.Equal(c.self) { // skip self continue } - // TODO: apply address filter - - c.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ - NodeInfo: ai, + c.routingBehaviour.Notify(ctx, &EventAddNode{ + NodeID: id, }) } @@ -455,12 +443,10 @@ func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo) error { } // Bootstrap instructs the dht to begin bootstrapping the routing table. -func (c *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { +func (c *Coordinator) Bootstrap(ctx context.Context, seeds []kadt.PeerID) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Bootstrap") defer span.End() c.routingBehaviour.Notify(ctx, &EventStartBootstrap{ - // Bootstrap state machine uses the message - Message: &fakeMessage{key: kadt.PeerID(c.self).Key()}, SeedNodes: seeds, }) @@ -469,15 +455,12 @@ func (c *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { // NotifyConnectivity notifies the coordinator that a peer has passed a connectivity check // which means it is connected and supports finding closer nodes -func (c *Coordinator) NotifyConnectivity(ctx context.Context, id peer.ID) error { +func (c *Coordinator) NotifyConnectivity(ctx context.Context, id kadt.PeerID) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyConnectivity") defer span.End() - ai := peer.AddrInfo{ - ID: id, - } c.routingBehaviour.Notify(ctx, &EventNotifyConnectivity{ - NodeInfo: ai, + NodeID: id, }) return nil @@ -485,7 +468,7 @@ func (c *Coordinator) NotifyConnectivity(ctx context.Context, id peer.ID) error // NotifyNonConnectivity notifies the coordinator that a peer has failed a connectivity check // which means it is not connected and/or it doesn't support finding closer nodes -func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id peer.ID) error { +func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyNonConnectivity") defer span.End() @@ -495,3 +478,106 @@ func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id peer.ID) err return nil } + +// A BufferedRoutingNotifier is a [RoutingNotifier] that buffers [RoutingNotification] events and provides methods +// to expect occurrences of specific events. It is designed for use in a test environment. +type BufferedRoutingNotifier struct { + mu sync.Mutex + buffered []RoutingNotification + signal chan struct{} +} + +func NewBufferedRoutingNotifier() *BufferedRoutingNotifier { + return &BufferedRoutingNotifier{ + signal: make(chan struct{}, 1), + } +} + +func (w *BufferedRoutingNotifier) Notify(ctx context.Context, ev RoutingNotification) { + w.mu.Lock() + w.buffered = append(w.buffered, ev) + select { + case w.signal <- struct{}{}: + default: + } + w.mu.Unlock() +} + +func (w *BufferedRoutingNotifier) Expect(ctx context.Context, expected RoutingNotification) (RoutingNotification, error) { + for { + // look in buffered events + w.mu.Lock() + for i, ev := range w.buffered { + if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + // remove first from buffer and return it + w.buffered = w.buffered[:i+copy(w.buffered[i:], w.buffered[i+1:])] + w.mu.Unlock() + return ev, nil + } + } + w.mu.Unlock() + + // wait to be signaled that there is a new event + select { + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) + case <-w.signal: + } + } +} + +// ExpectRoutingUpdated blocks until an [EventRoutingUpdated] event is seen for the specified peer id +func (w *BufferedRoutingNotifier) ExpectRoutingUpdated(ctx context.Context, id kadt.PeerID) (*EventRoutingUpdated, error) { + for { + // look in buffered events + w.mu.Lock() + for i, ev := range w.buffered { + if tev, ok := ev.(*EventRoutingUpdated); ok { + if id.Equal(tev.NodeID) { + // remove first from buffer and return it + w.buffered = w.buffered[:i+copy(w.buffered[i:], w.buffered[i+1:])] + w.mu.Unlock() + return tev, nil + } + } + } + w.mu.Unlock() + + // wait to be signaled that there is a new event + select { + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for routing updated event") + case <-w.signal: + } + } +} + +// ExpectRoutingRemoved blocks until an [EventRoutingRemoved] event is seen for the specified peer id +func (w *BufferedRoutingNotifier) ExpectRoutingRemoved(ctx context.Context, id kadt.PeerID) (*EventRoutingRemoved, error) { + for { + // look in buffered events + w.mu.Lock() + for i, ev := range w.buffered { + if tev, ok := ev.(*EventRoutingRemoved); ok { + if id.Equal(tev.NodeID) { + // remove first from buffer and return it + w.buffered = w.buffered[:i+copy(w.buffered[i:], w.buffered[i+1:])] + w.mu.Unlock() + return tev, nil + } + } + } + w.mu.Unlock() + + // wait to be signaled that there is a new event + select { + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for routing removed event") + case <-w.signal: + } + } +} + +type nullRoutingNotifier struct{} + +func (nullRoutingNotifier) Notify(context.Context, RoutingNotification) {} diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index f9b0e484..ba32444e 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -2,15 +2,11 @@ package coord import ( "context" - "fmt" "log" - "reflect" - "sync" "testing" "time" "github.com/benbjohnson/clock" - "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" @@ -20,58 +16,6 @@ import ( const peerstoreTTL = 10 * time.Minute -type notificationWatcher struct { - mu sync.Mutex - buffered []RoutingNotification - signal chan struct{} -} - -func (w *notificationWatcher) Watch(t *testing.T, ctx context.Context, ch <-chan RoutingNotification) { - t.Helper() - w.signal = make(chan struct{}, 1) - go func() { - for { - select { - case <-ctx.Done(): - return - case ev := <-ch: - w.mu.Lock() - t.Logf("buffered routing notification: %T\n", ev) - w.buffered = append(w.buffered, ev) - select { - case w.signal <- struct{}{}: - default: - } - w.mu.Unlock() - - } - } - }() -} - -func (w *notificationWatcher) Expect(ctx context.Context, expected RoutingNotification) (RoutingNotification, error) { - for { - // look in buffered events - w.mu.Lock() - for i, ev := range w.buffered { - if reflect.TypeOf(ev) == reflect.TypeOf(expected) { - // remove first from buffer and return it - w.buffered = w.buffered[:i+copy(w.buffered[i:], w.buffered[i+1:])] - w.mu.Unlock() - return ev, nil - } - } - w.mu.Unlock() - - // wait to be signaled that there is a new event - select { - case <-ctx.Done(): - return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) - case <-w.signal: - } - } -} - func TestConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { cfg := DefaultCoordinatorConfig() @@ -140,6 +84,12 @@ func TestConfigValidate(t *testing.T) { cfg.TracerProvider = nil require.Error(t, cfg.Validate()) }) + + t.Run("routing notifier not nil", func(t *testing.T) { + cfg := DefaultCoordinatorConfig() + cfg.RoutingNotifier = nil + require.Error(t, cfg.Validate()) + }) } func TestExhaustiveQuery(t *testing.T) { @@ -156,11 +106,11 @@ func TestExhaustiveQuery(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := kadt.PeerID(nodes[0].NodeID) c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) - target := kadt.PeerID(nodes[3].NodeInfo.ID).Key() + target := kadt.PeerID(nodes[3].NodeID).Key() visited := make(map[string]int) @@ -175,9 +125,9 @@ func TestExhaustiveQuery(t *testing.T) { require.NoError(t, err) require.Equal(t, 3, len(visited)) - require.Contains(t, visited, nodes[1].NodeInfo.ID.String()) - require.Contains(t, visited, nodes[2].NodeInfo.ID.String()) - require.Contains(t, visited, nodes[3].NodeInfo.ID.String()) + require.Contains(t, visited, nodes[1].NodeID.String()) + require.Contains(t, visited, nodes[2].NodeID.String()) + require.Contains(t, visited, nodes[3].NodeID.String()) } func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { @@ -192,24 +142,24 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL + rn := NewBufferedRoutingNotifier() + ccfg.RoutingNotifier = rn + // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := kadt.PeerID(nodes[0].NodeID) c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating coordinator: %v", err) } - w := new(notificationWatcher) - w.Watch(t, ctx, c.RoutingNotifications()) - qfn := func(ctx context.Context, node Node, stats QueryStats) error { return nil } // Run a query to find the value - target := kadt.PeerID(nodes[3].NodeInfo.ID).Key() + target := nodes[3].NodeID.Key() _, err = c.Query(ctx, target, qfn) require.NoError(t, err) @@ -224,20 +174,20 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { // However the order in which these events are emitted may vary depending on timing. - ev1, err := w.Expect(ctx, &EventRoutingUpdated{}) + ev1, err := rn.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) tev1 := ev1.(*EventRoutingUpdated) - ev2, err := w.Expect(ctx, &EventRoutingUpdated{}) + ev2, err := rn.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) tev2 := ev2.(*EventRoutingUpdated) - if tev1.NodeInfo.ID == nodes[2].NodeInfo.ID { - require.Equal(t, nodes[3].NodeInfo.ID, tev2.NodeInfo.ID) - } else if tev2.NodeInfo.ID == nodes[2].NodeInfo.ID { - require.Equal(t, nodes[3].NodeInfo.ID, tev1.NodeInfo.ID) + if tev1.NodeID.Equal(nodes[2].NodeID) { + require.Equal(t, nodes[3].NodeID, tev2.NodeID) + } else if tev2.NodeID.Equal(nodes[2].NodeID) { + require.Equal(t, nodes[3].NodeID, tev1.NodeID) } else { - require.Failf(t, "did not see routing updated event for %s", nodes[2].NodeInfo.ID.String()) + require.Failf(t, "did not see routing updated event for %s", nodes[2].NodeID.String()) } } @@ -253,19 +203,19 @@ func TestBootstrap(t *testing.T) { ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL - self := kadt.PeerID(nodes[0].NodeInfo.ID) + rn := NewBufferedRoutingNotifier() + ccfg.RoutingNotifier = rn + + self := kadt.PeerID(nodes[0].NodeID) d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) - w := new(notificationWatcher) - w.Watch(t, ctx, d.RoutingNotifications()) - - seeds := []peer.ID{nodes[1].NodeInfo.ID} + seeds := []kadt.PeerID{nodes[1].NodeID} err = d.Bootstrap(ctx, seeds) require.NoError(t, err) // the query run by the dht should have completed - ev, err := w.Expect(ctx, &EventBootstrapFinished{}) + ev, err := rn.Expect(ctx, &EventBootstrapFinished{}) require.NoError(t, err) require.IsType(t, &EventBootstrapFinished{}, ev) @@ -274,22 +224,22 @@ func TestBootstrap(t *testing.T) { require.Equal(t, 3, tevf.Stats.Success) require.Equal(t, 0, tevf.Stats.Failure) - _, err = w.Expect(ctx, &EventRoutingUpdated{}) + _, err = rn.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) - _, err = w.Expect(ctx, &EventRoutingUpdated{}) + _, err = rn.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) // coordinator will have node1 in its routing table - _, err = d.GetNode(ctx, nodes[1].NodeInfo.ID) + _, err = d.GetNode(ctx, nodes[1].NodeID) require.NoError(t, err) // coordinator should now have node2 in its routing table - _, err = d.GetNode(ctx, nodes[2].NodeInfo.ID) + _, err = d.GetNode(ctx, nodes[2].NodeID) require.NoError(t, err) // coordinator should now have node3 in its routing table - _, err = d.GetNode(ctx, nodes[3].NodeInfo.ID) + _, err = d.GetNode(ctx, nodes[3].NodeID) require.NoError(t, err) } @@ -305,33 +255,33 @@ func TestIncludeNode(t *testing.T) { ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL - candidate := nodes[len(nodes)-1].NodeInfo // not in nodes[0] routing table + rn := NewBufferedRoutingNotifier() + ccfg.RoutingNotifier = rn - self := kadt.PeerID(nodes[0].NodeInfo.ID) + candidate := nodes[len(nodes)-1].NodeID // not in nodes[0] routing table + + self := nodes[0].NodeID d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating dht: %v", err) } // the routing table should not contain the node yet - _, err = d.GetNode(ctx, candidate.ID) + _, err = d.GetNode(ctx, candidate) require.ErrorIs(t, err, ErrNodeNotFound) - w := new(notificationWatcher) - w.Watch(t, ctx, d.RoutingNotifications()) - // inject a new node - err = d.AddNodes(ctx, []peer.AddrInfo{candidate}) + err = d.AddNodes(ctx, []kadt.PeerID{candidate}) require.NoError(t, err) // the include state machine runs in the background and eventually should add the node to routing table - ev, err := w.Expect(ctx, &EventRoutingUpdated{}) + ev, err := rn.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) tev := ev.(*EventRoutingUpdated) - require.Equal(t, candidate.ID, tev.NodeInfo.ID) + require.Equal(t, candidate, tev.NodeID) // the routing table should now contain the node - _, err = d.GetNode(ctx, candidate.ID) + _, err = d.GetNode(ctx, candidate) require.NoError(t, err) } diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index 8da79942..0f72cebf 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -5,12 +5,9 @@ import ( "errors" "time" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/kad" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) // Value is a value that may be stored in the DHT. @@ -22,10 +19,7 @@ type Value interface { // Node represents the local or a remote node participating in the DHT. type Node interface { // ID returns the peer ID identifying this node. - ID() peer.ID - - // Addresses returns the network addresses associated with the given node. - Addresses() []ma.Multiaddr + ID() kadt.PeerID // GetClosestNodes requests the n closest nodes to the key from the node's // local routing table. The node may return fewer nodes than requested. @@ -74,17 +68,13 @@ var ( ErrSkipRemaining = errors.New("skip remaining nodes") ) -// Router its a work in progress -// TODO figure out the role of protocol identifiers -type Router interface { - // SendMessage attempts to send a request to another node. The Router will absorb the addresses in to into its - // internal nodestore. This method blocks until a response is received or an error is encountered. - SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) +type Message interface{} - AddNodeInfo(ctx context.Context, info peer.AddrInfo, ttl time.Duration) error - GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) +type Router[K kad.Key[K], N kad.NodeID[K], M Message] interface { + // SendMessage attempts to send a request to another node. This method blocks until a response is received or an error is encountered. + SendMessage(ctx context.Context, to N, req M) (M, error) // GetClosestNodes attempts to send a request to another node asking it for nodes that it considers to be // closest to the target key. - GetClosestNodes(ctx context.Context, to peer.AddrInfo, target kadt.Key) ([]peer.AddrInfo, error) + GetClosestNodes(ctx context.Context, to N, target K) ([]N, error) } diff --git a/v2/coord/event.go b/v2/coord/event.go index 69a9d5d7..663cfee9 100644 --- a/v2/coord/event.go +++ b/v2/coord/event.go @@ -1,11 +1,6 @@ package coord import ( - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/network/address" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -48,9 +43,7 @@ type RoutingNotification interface { } type EventStartBootstrap struct { - ProtocolID address.ProtocolID - Message kad.Request[kadt.Key, ma.Multiaddr] - SeedNodes []peer.ID // TODO: peer.AddrInfo + SeedNodes []kadt.PeerID } func (*EventStartBootstrap) behaviourEvent() {} @@ -58,7 +51,7 @@ func (*EventStartBootstrap) routingCommand() {} type EventOutboundGetCloserNodes struct { QueryID query.QueryID - To peer.AddrInfo + To kadt.PeerID Target kadt.Key Notify Notify[BehaviourEvent] } @@ -70,9 +63,7 @@ func (*EventOutboundGetCloserNodes) networkCommand() {} type EventStartQuery struct { QueryID query.QueryID Target kadt.Key - ProtocolID address.ProtocolID - Message kad.Request[kadt.Key, ma.Multiaddr] - KnownClosestNodes []peer.ID + KnownClosestNodes []kadt.PeerID Notify NotifyCloser[BehaviourEvent] } @@ -86,22 +77,21 @@ type EventStopQuery struct { func (*EventStopQuery) behaviourEvent() {} func (*EventStopQuery) queryCommand() {} -// EventAddAddrInfo notifies the routing behaviour of a potential new peer or of additional addresses for -// an existing peer. -type EventAddAddrInfo struct { - NodeInfo peer.AddrInfo +// EventAddNode notifies the routing behaviour of a potential new peer. +type EventAddNode struct { + NodeID kadt.PeerID } -func (*EventAddAddrInfo) behaviourEvent() {} -func (*EventAddAddrInfo) routingCommand() {} +func (*EventAddNode) behaviourEvent() {} +func (*EventAddNode) routingCommand() {} // EventGetCloserNodesSuccess notifies a behaviour that a GetCloserNodes request, initiated by an // [EventOutboundGetCloserNodes] event has produced a successful response. type EventGetCloserNodesSuccess struct { QueryID query.QueryID - To peer.AddrInfo // To is the peer address that the GetCloserNodes request was sent to. + To kadt.PeerID // To is the peer that the GetCloserNodes request was sent to. Target kadt.Key - CloserNodes []peer.AddrInfo + CloserNodes []kadt.PeerID } func (*EventGetCloserNodesSuccess) behaviourEvent() {} @@ -111,7 +101,7 @@ func (*EventGetCloserNodesSuccess) nodeHandlerResponse() {} // [EventOutboundGetCloserNodes] event has failed to produce a valid response. type EventGetCloserNodesFailure struct { QueryID query.QueryID - To peer.AddrInfo // To is the peer address that the GetCloserNodes request was sent to. + To kadt.PeerID // To is the peer that the GetCloserNodes request was sent to. Target kadt.Key Err error } @@ -123,8 +113,8 @@ func (*EventGetCloserNodesFailure) nodeHandlerResponse() {} // response from a node. type EventQueryProgressed struct { QueryID query.QueryID - NodeID peer.ID - Response kad.Response[kadt.Key, ma.Multiaddr] + NodeID kadt.PeerID + Response Message Stats query.QueryStats } @@ -141,7 +131,7 @@ func (*EventQueryFinished) behaviourEvent() {} // EventRoutingUpdated is emitted by the coordinator when a new node has been verified and added to the routing table. type EventRoutingUpdated struct { - NodeInfo peer.AddrInfo + NodeID kadt.PeerID } func (*EventRoutingUpdated) behaviourEvent() {} @@ -149,7 +139,7 @@ func (*EventRoutingUpdated) routingNotification() {} // EventRoutingRemoved is emitted by the coordinator when new node has been removed from the routing table. type EventRoutingRemoved struct { - NodeID peer.ID + NodeID kadt.PeerID } func (*EventRoutingRemoved) behaviourEvent() {} @@ -169,7 +159,7 @@ func (*EventBootstrapFinished) routingNotification() {} // general connections to the host but only when it is confirmed that the peer responds to requests for closer // nodes. type EventNotifyConnectivity struct { - NodeInfo peer.AddrInfo + NodeID kadt.PeerID } func (*EventNotifyConnectivity) behaviourEvent() {} @@ -178,7 +168,7 @@ func (*EventNotifyConnectivity) routingNotification() {} // EventNotifyNonConnectivity notifies a behaviour that a peer does not have connectivity and/or does not support // finding closer nodes is known. type EventNotifyNonConnectivity struct { - NodeID peer.ID + NodeID kadt.PeerID } func (*EventNotifyNonConnectivity) behaviourEvent() {} diff --git a/v2/coord/event_test.go b/v2/coord/event_test.go index b6afdd4a..2944be13 100644 --- a/v2/coord/event_test.go +++ b/v2/coord/event_test.go @@ -3,7 +3,7 @@ package coord var _ NetworkCommand = (*EventOutboundGetCloserNodes)(nil) var ( - _ RoutingCommand = (*EventAddAddrInfo)(nil) + _ RoutingCommand = (*EventAddNode)(nil) _ RoutingCommand = (*EventStartBootstrap)(nil) ) diff --git a/v2/coord/internal/nettest/layouts.go b/v2/coord/internal/nettest/layouts.go index c90e544b..7fce42f0 100644 --- a/v2/coord/internal/nettest/layouts.go +++ b/v2/coord/internal/nettest/layouts.go @@ -2,10 +2,8 @@ package nettest import ( "context" - "fmt" "github.com/benbjohnson/clock" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/routing/simplert" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -17,45 +15,40 @@ import ( // The topology is not a ring: nodes[0] only has nodes[1] in its table and nodes[n-1] only has nodes[n-2] in its table. // nodes[1] has nodes[0] and nodes[2] in its routing table. // If n > 2 then the first and last nodes will not have one another in their routing tables. -func LinearTopology(n int, clk clock.Clock) (*Topology, []*Node, error) { - nodes := make([]*Node, n) +func LinearTopology(n int, clk clock.Clock) (*Topology, []*Peer, error) { + nodes := make([]*Peer, n) top := NewTopology(clk) for i := range nodes { - a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + id, err := NewPeerID() if err != nil { return nil, nil, err } - ai, err := NewAddrInfo([]ma.Multiaddr{a}) - if err != nil { - return nil, nil, err - } - - nodes[i] = &Node{ - NodeInfo: ai, - Router: NewRouter(ai.ID, top), - RoutingTable: simplert.New[kadt.Key, kadt.PeerID](kadt.PeerID(ai.ID), 20), + nodes[i] = &Peer{ + NodeID: id, + Router: NewRouter(id, top), + RoutingTable: simplert.New[kadt.Key, kadt.PeerID](id, 20), } } // Define the network topology, with default network links between every node for i := 0; i < len(nodes); i++ { for j := i + 1; j < len(nodes); j++ { - top.ConnectNodes(nodes[i], nodes[j]) + top.ConnectPeers(nodes[i], nodes[j]) } } // Connect nodes in a chain for i := 0; i < len(nodes); i++ { if i > 0 { - nodes[i].Router.AddNodeInfo(context.Background(), nodes[i-1].NodeInfo, 0) - nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i-1].NodeInfo.ID)) + nodes[i].Router.AddToPeerStore(context.Background(), nodes[i-1].NodeID) + nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i-1].NodeID)) } if i < len(nodes)-1 { - nodes[i].Router.AddNodeInfo(context.Background(), nodes[i+1].NodeInfo, 0) - nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i+1].NodeInfo.ID)) + nodes[i].Router.AddToPeerStore(context.Background(), nodes[i+1].NodeID) + nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i+1].NodeID)) } } diff --git a/v2/coord/internal/nettest/routing.go b/v2/coord/internal/nettest/routing.go index 7553674f..880e27e4 100644 --- a/v2/coord/internal/nettest/routing.go +++ b/v2/coord/internal/nettest/routing.go @@ -9,7 +9,6 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" @@ -21,20 +20,17 @@ import ( var rng = rand.New(rand.NewSource(6283185)) -func NewAddrInfo(addrs []ma.Multiaddr) (peer.AddrInfo, error) { +func NewPeerID() (kadt.PeerID, error) { _, pub, err := crypto.GenerateEd25519Key(rng) if err != nil { - return peer.AddrInfo{}, err + return kadt.PeerID(""), err } pid, err := peer.IDFromPublicKey(pub) if err != nil { - return peer.AddrInfo{}, err + return kadt.PeerID(""), err } - return peer.AddrInfo{ - ID: pid, - Addrs: addrs, - }, nil + return kadt.PeerID(pid), nil } // Link represents the route between two nodes. It allows latency and transport failures to be simulated. @@ -53,22 +49,22 @@ func (l *DefaultLink) ConnLatency() time.Duration { return 0 } func (l *DefaultLink) DialLatency() time.Duration { return 0 } type Router struct { - self peer.ID + self kadt.PeerID top *Topology mu sync.Mutex // guards nodes - nodes map[peer.ID]*nodeStatus + nodes map[string]*nodeStatus } type nodeStatus struct { - NodeInfo peer.AddrInfo + NodeID kadt.PeerID Connectedness endpoint.Connectedness } -func NewRouter(self peer.ID, top *Topology) *Router { +func NewRouter(self kadt.PeerID, top *Topology) *Router { return &Router{ self: self, top: top, - nodes: make(map[peer.ID]*nodeStatus), + nodes: make(map[string]*nodeStatus), } } @@ -76,28 +72,16 @@ func (r *Router) NodeID() kad.NodeID[kadt.Key] { return kadt.PeerID(r.self) } -func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { - if err := r.AddNodeInfo(ctx, to, 0); err != nil { - return nil, fmt.Errorf("add node info: %w", err) - } - - if err := r.Dial(ctx, to); err != nil { - return nil, fmt.Errorf("dial: %w", err) - } - - return r.top.RouteMessage(ctx, r.self, to.ID, protoID, req) -} - -func (r *Router) HandleMessage(ctx context.Context, n peer.ID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { +func (r *Router) handleMessage(ctx context.Context, n kadt.PeerID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { closer := make([]*pb.Message_Peer, 0) r.mu.Lock() for _, n := range r.nodes { // only include self if it was the target of the request - if n.NodeInfo.ID == r.self && !key.Equal(kadt.PeerID(n.NodeInfo.ID).Key(), req.Target()) { + if n.NodeID.Equal(r.self) && !key.Equal(n.NodeID.Key(), req.Target()) { continue } - closer = append(closer, pb.FromAddrInfo(n.NodeInfo)) + closer = append(closer, pb.FromAddrInfo(peer.AddrInfo{ID: peer.ID(n.NodeID)})) } r.mu.Unlock() @@ -110,65 +94,68 @@ func (r *Router) HandleMessage(ctx context.Context, n peer.ID, protoID address.P return resp, nil } -func (r *Router) Dial(ctx context.Context, to peer.AddrInfo) error { +func (r *Router) dial(ctx context.Context, to kadt.PeerID) error { r.mu.Lock() - status, ok := r.nodes[to.ID] + status, ok := r.nodes[to.String()] r.mu.Unlock() - if ok { - switch status.Connectedness { - case endpoint.Connected: - return nil - case endpoint.CanConnect: - if _, err := r.top.Dial(ctx, r.self, to.ID); err != nil { - return err - } - - status.Connectedness = endpoint.Connected - r.mu.Lock() - r.nodes[to.ID] = status - r.mu.Unlock() - return nil + if !ok { + status = &nodeStatus{ + NodeID: to, + Connectedness: endpoint.CanConnect, } } - return endpoint.ErrUnknownPeer + + if status.Connectedness == endpoint.Connected { + return nil + } + if err := r.top.Dial(ctx, r.self, to); err != nil { + return err + } + + status.Connectedness = endpoint.Connected + r.mu.Lock() + r.nodes[to.String()] = status + r.mu.Unlock() + return nil } -func (r *Router) AddNodeInfo(ctx context.Context, info peer.AddrInfo, ttl time.Duration) error { +func (r *Router) AddToPeerStore(ctx context.Context, id kadt.PeerID) error { r.mu.Lock() defer r.mu.Unlock() - if _, ok := r.nodes[info.ID]; !ok { - r.nodes[info.ID] = &nodeStatus{ - NodeInfo: info, + if _, ok := r.nodes[id.String()]; !ok { + r.nodes[id.String()] = &nodeStatus{ + NodeID: id, Connectedness: endpoint.CanConnect, } } return nil } -func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { - r.mu.Lock() - defer r.mu.Unlock() - - status, ok := r.nodes[id] - if !ok { - return peer.AddrInfo{}, fmt.Errorf("unknown node") +func (r *Router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { + if err := r.dial(ctx, to); err != nil { + return nil, fmt.Errorf("dial: %w", err) } - return status.NodeInfo, nil -} -func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target kadt.Key) ([]peer.AddrInfo, error) { - protoID := address.ProtocolID("/test/1.0.0") + return r.top.RouteMessage(ctx, r.self, to, "", req) +} +func (r *Router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kadt.Key) ([]kadt.PeerID, error) { req := &pb.Message{ Type: pb.Message_FIND_NODE, Key: []byte("random-key"), } - resp, err := r.SendMessage(ctx, to, protoID, req) + resp, err := r.SendMessage(ctx, to, req) if err != nil { return nil, err } - return resp.CloserPeersAddrInfos(), nil + + // possibly learned about some new nodes + for _, id := range resp.CloserNodes() { + r.AddToPeerStore(ctx, id) + } + + return resp.CloserNodes(), nil } diff --git a/v2/coord/internal/nettest/topology.go b/v2/coord/internal/nettest/topology.go index 61653f23..96d6380a 100644 --- a/v2/coord/internal/nettest/topology.go +++ b/v2/coord/internal/nettest/topology.go @@ -13,8 +13,8 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) -type Node struct { - NodeInfo peer.AddrInfo +type Peer struct { + NodeID kadt.PeerID Router *Router RoutingTable routing.RoutingTableCpl[kadt.Key, kadt.PeerID] } @@ -22,37 +22,37 @@ type Node struct { type Topology struct { clk clock.Clock links map[string]Link - nodes []*Node - nodeIndex map[peer.ID]*Node - routers map[peer.ID]*Router + nodes []*Peer + nodeIndex map[string]*Peer + routers map[string]*Router } func NewTopology(clk clock.Clock) *Topology { return &Topology{ clk: clk, links: make(map[string]Link), - nodeIndex: make(map[peer.ID]*Node), - routers: make(map[peer.ID]*Router), + nodeIndex: make(map[string]*Peer), + routers: make(map[string]*Router), } } -func (t *Topology) Nodes() []*Node { +func (t *Topology) Peers() []*Peer { return t.nodes } -func (t *Topology) ConnectNodes(a *Node, b *Node) { - t.ConnectNodesWithRoute(a, b, &DefaultLink{}) +func (t *Topology) ConnectPeers(a *Peer, b *Peer) { + t.ConnectPeersWithRoute(a, b, &DefaultLink{}) } -func (t *Topology) ConnectNodesWithRoute(a *Node, b *Node, l Link) { - akey := a.NodeInfo.ID +func (t *Topology) ConnectPeersWithRoute(a *Peer, b *Peer, l Link) { + akey := a.NodeID.String() if _, exists := t.nodeIndex[akey]; !exists { t.nodeIndex[akey] = a t.nodes = append(t.nodes, a) t.routers[akey] = a.Router } - bkey := b.NodeInfo.ID + bkey := b.NodeID.String() if _, exists := t.nodeIndex[bkey]; !exists { t.nodeIndex[bkey] = b t.nodes = append(t.nodes, b) @@ -67,8 +67,8 @@ func (t *Topology) ConnectNodesWithRoute(a *Node, b *Node, l Link) { t.links[btoa] = l } -func (t *Topology) findRoute(ctx context.Context, from peer.ID, to peer.ID) (Link, error) { - key := fmt.Sprintf("%s->%s", from, to) +func (t *Topology) findRoute(ctx context.Context, from kadt.PeerID, to kadt.PeerID) (Link, error) { + key := fmt.Sprintf("%s->%s", peer.ID(from), peer.ID(to)) route, ok := t.links[key] if !ok { @@ -78,19 +78,19 @@ func (t *Topology) findRoute(ctx context.Context, from peer.ID, to peer.ID) (Lin return route, nil } -func (t *Topology) Dial(ctx context.Context, from peer.ID, to peer.ID) (peer.AddrInfo, error) { +func (t *Topology) Dial(ctx context.Context, from kadt.PeerID, to kadt.PeerID) error { if from == to { - node, ok := t.nodeIndex[to] + _, ok := t.nodeIndex[to.String()] if !ok { - return peer.AddrInfo{}, fmt.Errorf("unknown node") + return fmt.Errorf("unknown node") } - return node.NodeInfo, nil + return nil } route, err := t.findRoute(ctx, from, to) if err != nil { - return peer.AddrInfo{}, fmt.Errorf("find route: %w", err) + return fmt.Errorf("find route: %w", err) } latency := route.DialLatency() @@ -99,25 +99,25 @@ func (t *Topology) Dial(ctx context.Context, from peer.ID, to peer.ID) (peer.Add } if err := route.DialErr(); err != nil { - return peer.AddrInfo{}, err + return err } - node, ok := t.nodeIndex[to] + _, ok := t.nodeIndex[to.String()] if !ok { - return peer.AddrInfo{}, fmt.Errorf("unknown node") + return fmt.Errorf("unknown node") } - return node.NodeInfo, nil + return nil } -func (t *Topology) RouteMessage(ctx context.Context, from peer.ID, to peer.ID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { +func (t *Topology) RouteMessage(ctx context.Context, from kadt.PeerID, to kadt.PeerID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { if from == to { - node, ok := t.nodeIndex[to] + node, ok := t.nodeIndex[to.String()] if !ok { return nil, fmt.Errorf("unknown node") } - return node.Router.HandleMessage(ctx, from, protoID, req) + return node.Router.handleMessage(ctx, from, protoID, req) } route, err := t.findRoute(ctx, from, to) @@ -130,10 +130,10 @@ func (t *Topology) RouteMessage(ctx context.Context, from peer.ID, to peer.ID, p t.clk.Sleep(latency) } - node, ok := t.nodeIndex[to] + node, ok := t.nodeIndex[to.String()] if !ok { return nil, fmt.Errorf("no route to node") } - return node.Router.HandleMessage(ctx, from, protoID, req) + return node.Router.handleMessage(ctx, from, protoID, req) } diff --git a/v2/coord/network.go b/v2/coord/network.go index d2da896c..72369b6f 100644 --- a/v2/coord/network.go +++ b/v2/coord/network.go @@ -5,20 +5,18 @@ import ( "fmt" "sync" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) type NetworkBehaviour struct { // rtr is the message router used to send messages - rtr Router + rtr Router[kadt.Key, kadt.PeerID, *pb.Message] nodeHandlersMu sync.Mutex nodeHandlers map[kadt.PeerID]*NodeHandler // TODO: garbage collect node handlers @@ -31,7 +29,7 @@ type NetworkBehaviour struct { tracer trace.Tracer } -func NewNetworkBehaviour(rtr Router, logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { +func NewNetworkBehaviour(rtr Router[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { b := &NetworkBehaviour{ rtr: rtr, nodeHandlers: make(map[kadt.PeerID]*NodeHandler), @@ -53,10 +51,11 @@ func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { switch ev := ev.(type) { case *EventOutboundGetCloserNodes: b.nodeHandlersMu.Lock() - nh, ok := b.nodeHandlers[kadt.PeerID(ev.To.ID)] + p := kadt.PeerID(ev.To) + nh, ok := b.nodeHandlers[p] if !ok { - nh = NewNodeHandler(ev.To, b.rtr, b.logger, b.tracer) - b.nodeHandlers[kadt.PeerID(ev.To.ID)] = nh + nh = NewNodeHandler(p, b.rtr, b.logger, b.tracer) + b.nodeHandlers[p] = nh } b.nodeHandlersMu.Unlock() nh.Notify(ctx, ev) @@ -103,12 +102,8 @@ func (b *NetworkBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id kadt.PeerID) (*NodeHandler, error) { b.nodeHandlersMu.Lock() nh, ok := b.nodeHandlers[id] - if !ok || len(nh.Addresses()) == 0 { - info, err := b.rtr.GetNodeInfo(ctx, peer.ID(id)) - if err != nil { - return nil, err - } - nh = NewNodeHandler(info, b.rtr, b.logger, b.tracer) + if !ok { + nh = NewNodeHandler(id, b.rtr, b.logger, b.tracer) b.nodeHandlers[id] = nh } b.nodeHandlersMu.Unlock() @@ -116,14 +111,14 @@ func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id kadt.PeerID) ( } type NodeHandler struct { - self peer.AddrInfo - rtr Router + self kadt.PeerID + rtr Router[kadt.Key, kadt.PeerID, *pb.Message] queue *WorkQueue[NodeHandlerRequest] logger *slog.Logger tracer trace.Tracer } -func NewNodeHandler(self peer.AddrInfo, rtr Router, logger *slog.Logger, tracer trace.Tracer) *NodeHandler { +func NewNodeHandler(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *NodeHandler { h := &NodeHandler{ self: self, rtr: rtr, @@ -172,12 +167,8 @@ func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { return false } -func (h *NodeHandler) ID() peer.ID { - return h.self.ID -} - -func (h *NodeHandler) Addresses() []ma.Multiaddr { - return h.self.Addrs +func (h *NodeHandler) ID() kadt.PeerID { + return h.self } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. @@ -233,20 +224,3 @@ func (h *NodeHandler) GetValue(ctx context.Context, key kadt.Key) (Value, error) func (h *NodeHandler) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } - -type fakeMessage struct { - key kadt.Key - infos []kad.NodeInfo[kadt.Key, ma.Multiaddr] -} - -func (r fakeMessage) Target() kadt.Key { - return r.key -} - -func (r fakeMessage) CloserNodes() []kad.NodeInfo[kadt.Key, ma.Multiaddr] { - return r.infos -} - -func (r fakeMessage) EmptyResponse() kad.Response[kadt.Key, ma.Multiaddr] { - return &fakeMessage{} -} diff --git a/v2/coord/network_test.go b/v2/coord/network_test.go index 4d2ca5b5..6baacf53 100644 --- a/v2/coord/network_test.go +++ b/v2/coord/network_test.go @@ -10,7 +10,6 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // TODO: this is just a basic is-it-working test that needs to be improved @@ -21,10 +20,10 @@ func TestGetClosestNodes(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - h := NewNodeHandler(nodes[1].NodeInfo, nodes[1].Router, slog.Default(), trace.NewNoopTracerProvider().Tracer("")) + h := NewNodeHandler(nodes[1].NodeID, nodes[1].Router, slog.Default(), trace.NewNoopTracerProvider().Tracer("")) // node 1 has node 2 in its routing table so it will return it along with node 0 - found, err := h.GetClosestNodes(ctx, kadt.PeerID(nodes[2].NodeInfo.ID).Key(), 2) + found, err := h.GetClosestNodes(ctx, nodes[2].NodeID.Key(), 2) require.NoError(t, err) for _, f := range found { t.Logf("found node %v", f.ID()) diff --git a/v2/coord/query.go b/v2/coord/query.go index 6857fc6d..b8ebb982 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -48,7 +48,7 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { cmd = &query.EventPoolAddQuery[kadt.Key, kadt.PeerID]{ QueryID: ev.QueryID, Target: ev.Target, - KnownClosestNodes: sliceOfPeerIDToSliceOfKadPeerID(ev.KnownClosestNodes), + KnownClosestNodes: ev.KnownClosestNodes, } if ev.Notify != nil { p.waiters[ev.QueryID] = ev.Notify @@ -60,34 +60,36 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { } case *EventGetCloserNodesSuccess: + // TODO: add addresses for discovered nodes in DHT + for _, info := range ev.CloserNodes { // TODO: do this after advancing pool - p.pending = append(p.pending, &EventAddAddrInfo{ - NodeInfo: info, + p.pending = append(p.pending, &EventAddNode{ + NodeID: info, }) } waiter, ok := p.waiters[ev.QueryID] if ok { waiter.Notify(ctx, &EventQueryProgressed{ - NodeID: ev.To.ID, + NodeID: ev.To, QueryID: ev.QueryID, // CloserNodes: CloserNodeIDs(ev.CloserNodes), // Stats: stats, }) } cmd = &query.EventPoolFindCloserResponse[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, QueryID: ev.QueryID, - CloserNodes: sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), + CloserNodes: ev.CloserNodes, } case *EventGetCloserNodesFailure: // queue an event that will notify the routing behaviour of a failed node p.pending = append(p.pending, &EventNotifyNonConnectivity{ - ev.To.ID, + ev.To, }) cmd = &query.EventPoolFindCloserFailure[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, QueryID: ev.QueryID, Error: ev.Err, } @@ -156,7 +158,7 @@ func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEve case *query.StatePoolFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ QueryID: st.QueryID, - To: kadPeerIDToAddrInfo(st.NodeID), + To: st.NodeID, Target: st.Target, Notify: p, }, true diff --git a/v2/coord/routing.go b/v2/coord/routing.go index f9edbe3f..ead1b107 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -5,7 +5,6 @@ import ( "fmt" "sync" - "github.com/libp2p/go-libp2p/core/peer" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" @@ -66,7 +65,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventStartBootstrap: span.SetAttributes(attribute.String("event", "EventStartBootstrap")) cmd := &routing.EventBootstrapStart[kadt.Key, kadt.PeerID]{ - KnownClosestNodes: sliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), + KnownClosestNodes: ev.SeedNodes, } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -74,15 +73,15 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { r.pending = append(r.pending, next) } - case *EventAddAddrInfo: + case *EventAddNode: span.SetAttributes(attribute.String("event", "EventAddAddrInfo")) // Ignore self - if ev.NodeInfo.ID == peer.ID(r.self) { + if r.self.Equal(ev.NodeID) { break } // TODO: apply ttl cmd := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.NodeInfo.ID), + NodeID: ev.NodeID, } // attempt to advance the include next, ok := r.advanceInclude(ctx, cmd) @@ -91,9 +90,9 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } case *EventRoutingUpdated: - span.SetAttributes(attribute.String("event", "EventRoutingUpdated"), attribute.String("nodeid", ev.NodeInfo.ID.String())) + span.SetAttributes(attribute.String("event", "EventRoutingUpdated"), attribute.String("nodeid", ev.NodeID.String())) cmd := &routing.EventProbeAdd[kadt.Key, kadt.PeerID]{ - NodeID: addrInfoToKadPeerID(ev.NodeInfo), + NodeID: ev.NodeID, } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -107,13 +106,13 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case "bootstrap": for _, info := range ev.CloserNodes { // TODO: do this after advancing bootstrap - r.pending = append(r.pending, &EventAddAddrInfo{ - NodeInfo: info, + r.pending = append(r.pending, &EventAddNode{ + NodeID: info, }) } cmd := &routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), - CloserNodes: sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), + NodeID: ev.To, + CloserNodes: ev.CloserNodes, } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -123,15 +122,14 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case "include": var cmd routing.IncludeEvent - // require that the node responded with at least one closer node if len(ev.CloserNodes) > 0 { cmd = &routing.EventIncludeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, } } else { cmd = &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, Error: fmt.Errorf("response did not include any closer nodes"), } } @@ -146,11 +144,11 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { // require that the node responded with at least one closer node if len(ev.CloserNodes) > 0 { cmd = &routing.EventProbeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, } } else { cmd = &routing.EventProbeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, Error: fmt.Errorf("response did not include any closer nodes"), } } @@ -169,7 +167,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { switch ev.QueryID { case "bootstrap": cmd := &routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, Error: ev.Err, } // attempt to advance the bootstrap @@ -179,7 +177,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } case "include": cmd := &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, Error: ev.Err, } // attempt to advance the include state machine @@ -189,7 +187,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { } case "probe": cmd := &routing.EventProbeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.To.ID), + NodeID: ev.To, Error: ev.Err, } // attempt to advance the probe state machine @@ -202,14 +200,14 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) } case *EventNotifyConnectivity: - span.SetAttributes(attribute.String("event", "EventNotifyConnectivity"), attribute.String("nodeid", ev.NodeInfo.ID.String())) + span.SetAttributes(attribute.String("event", "EventNotifyConnectivity"), attribute.String("nodeid", ev.NodeID.String())) // ignore self - if ev.NodeInfo.ID == peer.ID(r.self) { + if r.self.Equal(ev.NodeID) { break } // tell the include state machine in case this is a new peer that could be added to the routing table cmd := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.NodeInfo.ID), + NodeID: ev.NodeID, } next, ok := r.advanceInclude(ctx, cmd) if ok { @@ -218,7 +216,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { // tell the probe state machine in case there is are connectivity checks that could satisfied cmdProbe := &routing.EventProbeNotifyConnectivity[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.NodeInfo.ID), + NodeID: ev.NodeID, } nextProbe, ok := r.advanceProbe(ctx, cmdProbe) if ok { @@ -308,7 +306,7 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot case *routing.StateBootstrapFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ QueryID: "bootstrap", - To: kadPeerIDToAddrInfo(st.NodeID), + To: st.NodeID, Target: st.Target, Notify: r, }, true @@ -339,7 +337,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "include", - To: kadPeerIDToAddrInfo(st.NodeID), + To: st.NodeID, Target: st.NodeID.Key(), Notify: r, }, true @@ -349,13 +347,13 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ // notify other routing state machines that there is a new node in the routing table r.notify(ctx, &EventRoutingUpdated{ - NodeInfo: kadPeerIDToAddrInfo(st.NodeID), + NodeID: st.NodeID, }) // return the event to notify outwards too span.SetAttributes(attribute.String("out_event", "EventRoutingUpdated")) return &EventRoutingUpdated{ - NodeInfo: kadPeerIDToAddrInfo(st.NodeID), + NodeID: st.NodeID, }, true case *routing.StateIncludeWaitingAtCapacity: // nothing to do except wait for message response or timeout @@ -381,7 +379,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "probe", - To: kadPeerIDToAddrInfo(st.NodeID), + To: st.NodeID, Target: st.NodeID.Key(), Notify: r, }, true @@ -390,12 +388,12 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve // emit an EventRoutingRemoved event to notify clients that the node has been removed r.pending = append(r.pending, &EventRoutingRemoved{ - NodeID: peer.ID(st.NodeID), + NodeID: st.NodeID, }) // add the node to the inclusion list for a second chance - r.notify(ctx, &EventAddAddrInfo{ - NodeInfo: kadPeerIDToAddrInfo(st.NodeID), + r.notify(ctx, &EventAddNode{ + NodeID: st.NodeID, }) case *routing.StateProbeWaitingAtCapacity: // the probe state machine is waiting for responses for checks and the maximum number of concurrent checks has been reached. diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index ded02c3b..2b07d6d1 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -9,7 +9,6 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/network/address" "github.com/stretchr/testify/require" "golang.org/x/exp/slog" @@ -18,7 +17,6 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) func TestRoutingStartBootstrapSendsEvent(t *testing.T) { @@ -28,7 +26,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) @@ -37,22 +35,15 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) - req := &pb.Message{ - Type: pb.Message_FIND_NODE, - Key: []byte(self), - } - ev := &EventStartBootstrap{ - ProtocolID: address.ProtocolID("test"), - Message: req, - SeedNodes: []peer.ID{nodes[1].NodeInfo.ID}, + SeedNodes: []kadt.PeerID{nodes[1].NodeID}, } routingBehaviour.Notify(ctx, ev) // the event that should be passed to the bootstrap state machine expected := &routing.EventBootstrapStart[kadt.Key, kadt.PeerID]{ - KnownClosestNodes: sliceOfPeerIDToSliceOfKadPeerID(ev.SeedNodes), + KnownClosestNodes: ev.SeedNodes, } require.Equal(t, expected, bootstrap.Received) } @@ -64,7 +55,7 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) @@ -75,9 +66,9 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { ev := &EventGetCloserNodesSuccess{ QueryID: query.QueryID("bootstrap"), - To: nodes[1].NodeInfo, - Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), - CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, + To: nodes[1].NodeID, + Target: nodes[0].NodeID.Key(), + CloserNodes: []kadt.PeerID{nodes[2].NodeID}, } routingBehaviour.Notify(ctx, ev) @@ -86,8 +77,8 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { require.IsType(t, &routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]{}, bootstrap.Received) rev := bootstrap.Received.(*routing.EventBootstrapFindCloserResponse[kadt.Key, kadt.PeerID]) - require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) - require.Equal(t, sliceOfAddrInfoToSliceOfKadPeerID(ev.CloserNodes), rev.CloserNodes) + require.True(t, nodes[1].NodeID.Equal(rev.NodeID)) + require.Equal(t, ev.CloserNodes, rev.CloserNodes) } func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { @@ -97,7 +88,7 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) @@ -109,8 +100,8 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ QueryID: query.QueryID("bootstrap"), - To: nodes[1].NodeInfo, - Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + To: nodes[1].NodeID, + Target: nodes[0].NodeID.Key(), Err: failure, } @@ -120,7 +111,7 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { require.IsType(t, &routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]{}, bootstrap.Received) rev := bootstrap.Received.(*routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]) - require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) + require.Equal(t, peer.ID(nodes[1].NodeID), peer.ID(rev.NodeID)) require.Equal(t, failure, rev.Error) } @@ -131,7 +122,7 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -141,15 +132,15 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) - ev := &EventAddAddrInfo{ - NodeInfo: nodes[2].NodeInfo, + ev := &EventAddNode{ + NodeID: nodes[2].NodeID, } routingBehaviour.Notify(ctx, ev) // the event that should be passed to the include state machine expected := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.NodeInfo.ID), + NodeID: ev.NodeID, } require.Equal(t, expected, include.Received) } @@ -161,7 +152,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -173,9 +164,9 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { ev := &EventGetCloserNodesSuccess{ QueryID: query.QueryID("include"), - To: nodes[1].NodeInfo, - Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), - CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, + To: nodes[1].NodeID, + Target: nodes[0].NodeID.Key(), + CloserNodes: []kadt.PeerID{nodes[2].NodeID}, } routingBehaviour.Notify(ctx, ev) @@ -184,7 +175,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { require.IsType(t, &routing.EventIncludeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]{}, include.Received) rev := include.Received.(*routing.EventIncludeConnectivityCheckSuccess[kadt.Key, kadt.PeerID]) - require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) + require.Equal(t, peer.ID(nodes[1].NodeID), peer.ID(rev.NodeID)) } func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { @@ -194,7 +185,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -207,8 +198,8 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ QueryID: query.QueryID("include"), - To: nodes[1].NodeInfo, - Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + To: nodes[1].NodeID, + Target: nodes[0].NodeID.Key(), Err: failure, } @@ -218,7 +209,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { require.IsType(t, &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{}, include.Received) rev := include.Received.(*routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]) - require.Equal(t, nodes[1].NodeInfo.ID, peer.ID(rev.NodeID)) + require.Equal(t, peer.ID(nodes[1].NodeID), peer.ID(rev.NodeID)) require.Equal(t, failure, rev.Error) } @@ -229,7 +220,7 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - self := kadt.PeerID(nodes[0].NodeInfo.ID) + self := nodes[0].NodeID rt := nodes[0].RoutingTable includeCfg := routing.DefaultIncludeConfig() @@ -249,15 +240,15 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) // a new node to be included - candidate := nodes[len(nodes)-1].NodeInfo + candidate := nodes[len(nodes)-1].NodeID // the routing table should not contain the node yet - _, intable := rt.GetNode(kadt.PeerID(candidate.ID).Key()) + _, intable := rt.GetNode(candidate.Key()) require.False(t, intable) // notify that there is a new node to be included - routingBehaviour.Notify(ctx, &EventAddAddrInfo{ - NodeInfo: candidate, + routingBehaviour.Notify(ctx, &EventAddNode{ + NodeID: candidate, }) // collect the result of the notify @@ -277,11 +268,11 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { QueryID: oev.QueryID, To: oev.To, Target: oev.Target, - CloserNodes: []peer.AddrInfo{nodes[1].NodeInfo}, // must include one for include check to pass + CloserNodes: []kadt.PeerID{nodes[1].NodeID}, // must include one for include check to pass }) // the routing table should now contain the node - _, intable = rt.GetNode(kadt.PeerID(candidate.ID).Key()) + _, intable = rt.GetNode(candidate.Key()) require.True(t, intable) // routing update event should be emitted from the include state machine @@ -300,5 +291,5 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { // confirm that the message is for the correct node oev = dev.(*EventOutboundGetCloserNodes) require.Equal(t, query.QueryID("probe"), oev.QueryID) - require.Equal(t, candidate.ID, oev.To.ID) + require.Equal(t, candidate, oev.To) } diff --git a/v2/dht.go b/v2/dht.go index 06086d81..559a5288 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -150,12 +150,12 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - coordCfg := coord.DefaultCoordinatorConfig() + coordCfg := cfg.Kademlia coordCfg.Clock = cfg.Clock coordCfg.MeterProvider = cfg.MeterProvider coordCfg.TracerProvider = cfg.TracerProvider - d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &Router{host: h}, d.rt, coordCfg) + d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &Router{host: h, ProtocolID: cfg.ProtocolID}, d.rt, coordCfg) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } @@ -309,12 +309,16 @@ func (d *DHT) AddAddresses(ctx context.Context, ais []peer.AddrInfo, ttl time.Du ctx, span := d.tele.Tracer.Start(ctx, "DHT.AddAddresses") defer span.End() + ids := make([]kadt.PeerID, 0, len(ais)) + ps := d.host.Peerstore() for _, ai := range ais { + // TODO: apply address filter ps.AddAddrs(ai.ID, ai.Addrs, ttl) + ids = append(ids, kadt.PeerID(ai.ID)) } - return d.kad.AddNodes(ctx, ais) + return d.kad.AddNodes(ctx, ids) } // newSHA256Key returns a [kadt.KadKey] that conforms to the [kad.Key] interface by diff --git a/v2/dht_test.go b/v2/dht_test.go index 29993a58..6296dbf3 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -1,9 +1,6 @@ package dht import ( - "context" - "fmt" - "reflect" "testing" "time" @@ -14,6 +11,7 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) func TestNew(t *testing.T) { @@ -75,26 +73,12 @@ func TestNew(t *testing.T) { } } -// expectEventType selects on the event channel until an event of the expected type is sent. -func expectEventType(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, expected coord.RoutingNotification) (coord.RoutingNotification, error) { - t.Helper() - for { - select { - case ev := <-events: - t.Logf("saw event: %T\n", ev) - if reflect.TypeOf(ev) == reflect.TypeOf(expected) { - return ev, nil - } - case <-ctx.Done(): - return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) - } - } -} - func TestAddAddresses(t *testing.T) { ctx := kadtest.CtxShort(t) localCfg := DefaultConfig() + rn := coord.NewBufferedRoutingNotifier() + localCfg.Kademlia.RoutingNotifier = rn local := newClientDht(t, localCfg) @@ -104,7 +88,7 @@ func TestAddAddresses(t *testing.T) { fillRoutingTable(t, remote, 1) // local routing table should not contain the node - _, err := local.kad.GetNode(ctx, remote.host.ID()) + _, err := local.kad.GetNode(ctx, kadt.PeerID(remote.host.ID())) require.ErrorIs(t, err, coord.ErrNodeNotFound) remoteAddrInfo := peer.AddrInfo{ @@ -119,10 +103,10 @@ func TestAddAddresses(t *testing.T) { require.NoError(t, err) // the include state machine runs in the background and eventually should add the node to routing table - _, err = expectEventType(t, ctx, local.kad.RoutingNotifications(), &coord.EventRoutingUpdated{}) + _, err = rn.Expect(ctx, &coord.EventRoutingUpdated{}) require.NoError(t, err) // the routing table should now contain the node - _, err = local.kad.GetNode(ctx, remote.host.ID()) + _, err = local.kad.GetNode(ctx, kadt.PeerID(remote.host.ID())) require.NoError(t, err) } diff --git a/v2/handlers.go b/v2/handlers.go index bcd89f9a..5b8536f3 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -25,7 +25,7 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag } // tell the coordinator that this peer supports finding closer nodes - d.kad.NotifyConnectivity(ctx, remote) + d.kad.NotifyConnectivity(ctx, kadt.PeerID(remote)) // "parse" requested peer ID from the key field target := peer.ID(req.GetKey()) diff --git a/v2/internal/kadtest/context.go b/v2/internal/kadtest/context.go index 41623c08..8a69328c 100644 --- a/v2/internal/kadtest/context.go +++ b/v2/internal/kadtest/context.go @@ -2,6 +2,7 @@ package kadtest import ( "context" + "runtime" "testing" "time" ) @@ -13,7 +14,13 @@ import ( func CtxShort(t *testing.T) context.Context { t.Helper() - timeout := 10 * time.Second + var timeout time.Duration + // Increase the timeout for 32-bit Windows + if runtime.GOOS == "windows" && runtime.GOARCH == "386" { + timeout = 60 * time.Second + } else { + timeout = 10 * time.Second + } goal := time.Now().Add(timeout) deadline, ok := t.Deadline() diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index 9de3e6e9..f87057a8 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -39,6 +39,11 @@ func (p PeerID) String() string { return peer.ID(p).String() } +// Equal compares the [PeerID] with another by comparing the underlying [peer.ID]. +func (p PeerID) Equal(o PeerID) bool { + return peer.ID(p) == peer.ID(o) +} + // AddrInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo // interface. This means we can use AddrInfo for any operation that interfaces // with go-kademlia. diff --git a/v2/notifee.go b/v2/notifee.go index d1889428..0666e836 100644 --- a/v2/notifee.go +++ b/v2/notifee.go @@ -4,9 +4,9 @@ import ( "context" "fmt" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" ) // networkEventsSubscription registers a subscription on the libp2p event bus @@ -90,7 +90,5 @@ func (d *DHT) onEvtLocalReachabilityChanged(evt event.EvtLocalReachabilityChange func (d *DHT) onEvtPeerIdentificationCompleted(evt event.EvtPeerIdentificationCompleted) { // tell the coordinator about a new candidate for inclusion in the routing table - d.kad.AddNodes(context.Background(), []peer.AddrInfo{ - {ID: evt.Peer}, - }) + d.kad.AddNodes(context.Background(), []kadt.PeerID{kadt.PeerID(evt.Peer)}) } diff --git a/v2/notifee_test.go b/v2/notifee_test.go index a42f82bf..b7079ac6 100644 --- a/v2/notifee_test.go +++ b/v2/notifee_test.go @@ -4,7 +4,9 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/event" @@ -72,7 +74,11 @@ func TestDHT_consumeNetworkEvents_onEvtLocalReachabilityChanged(t *testing.T) { func TestDHT_consumeNetworkEvents_onEvtPeerIdentificationCompleted(t *testing.T) { ctx := kadtest.CtxShort(t) - d1 := newServerDht(t, nil) + cfg1 := DefaultConfig() + rn1 := coord.NewBufferedRoutingNotifier() + cfg1.Kademlia.RoutingNotifier = rn1 + d1 := newServerDht(t, cfg1) + d2 := newServerDht(t, nil) // make sure d1 has the address of d2 in its peerstore @@ -83,6 +89,6 @@ func TestDHT_consumeNetworkEvents_onEvtPeerIdentificationCompleted(t *testing.T) Peer: d2.host.ID(), }) - _, err := expectRoutingUpdated(t, ctx, d1.kad.RoutingNotifications(), d2.host.ID()) + _, err := rn1.ExpectRoutingUpdated(ctx, kadt.PeerID(d2.host.ID())) require.NoError(t, err) } diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go index b0bf4ef0..68ac067a 100644 --- a/v2/pb/msg.aux.go +++ b/v2/pb/msg.aux.go @@ -7,7 +7,6 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" ) @@ -15,20 +14,11 @@ import ( // this file contains auxiliary methods to augment the protobuf generated types. // It is used to let these types conform to interfaces or add convenience methods. -var _ kad.Request[kadt.Key, ma.Multiaddr] = (*Message)(nil) - func (m *Message) Target() kadt.Key { b := sha256.Sum256(m.Key) return key.NewKey256(b[:]) } -func (m *Message) EmptyResponse() kad.Response[kadt.Key, ma.Multiaddr] { - return &Message{ - Type: m.Type, - Key: m.Key, - } -} - // FromAddrInfo constructs a [Message_Peer] from the given [peer.AddrInfo]. func FromAddrInfo(p peer.AddrInfo) *Message_Peer { mp := &Message_Peer{ @@ -90,20 +80,17 @@ func (m *Message) CloserPeersAddrInfos() []peer.AddrInfo { return addrInfos } -func (m *Message) CloserNodes() []kad.NodeInfo[kadt.Key, ma.Multiaddr] { +func (m *Message) CloserNodes() []kadt.PeerID { if m == nil { return nil } - infos := make([]kad.NodeInfo[kadt.Key, ma.Multiaddr], 0, len(m.CloserPeers)) + ids := make([]kadt.PeerID, 0, len(m.CloserPeers)) for _, p := range m.CloserPeers { - infos = append(infos, &kadt.AddrInfo{Info: peer.AddrInfo{ - ID: peer.ID(p.Id), - Addrs: p.Addresses(), - }}) + ids = append(ids, kadt.PeerID(peer.ID(p.Id))) } - return infos + return ids } // Addresses returns the Multiaddresses associated with the Message_Peer entry diff --git a/v2/query_test.go b/v2/query_test.go index b96c0b33..bf8e3ee1 100644 --- a/v2/query_test.go +++ b/v2/query_test.go @@ -2,7 +2,6 @@ package dht import ( "context" - "fmt" "testing" "time" @@ -87,43 +86,7 @@ func newClientDht(t testing.TB, cfg *Config) *DHT { return d } -// expectRoutingUpdated selects on the event channel until an EventRoutingUpdated event is seen for the specified peer id -func expectRoutingUpdated(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, id peer.ID) (*coord.EventRoutingUpdated, error) { - t.Helper() - for { - select { - case ev := <-events: - if tev, ok := ev.(*coord.EventRoutingUpdated); ok { - if tev.NodeInfo.ID == id { - return tev, nil - } - t.Logf("saw routing update for %s", tev.NodeInfo.ID) - } - case <-ctx.Done(): - return nil, fmt.Errorf("test deadline exceeded while waiting for routing update event") - } - } -} - -// expectRoutingUpdated selects on the event channel until an EventRoutingUpdated event is seen for the specified peer id -func expectRoutingRemoved(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, id peer.ID) (*coord.EventRoutingRemoved, error) { - t.Helper() - for { - select { - case ev := <-events: - if tev, ok := ev.(*coord.EventRoutingRemoved); ok { - if tev.NodeID == id { - return tev, nil - } - t.Logf("saw routing removed for %s", tev.NodeID) - } - case <-ctx.Done(): - return nil, fmt.Errorf("test deadline exceeded while waiting for routing removed event") - } - } -} - -func connect(t *testing.T, ctx context.Context, a, b *DHT) { +func connect(t *testing.T, ctx context.Context, a, b *DHT, arn *coord.BufferedRoutingNotifier) { t.Helper() remoteAddrInfo := peer.AddrInfo{ @@ -136,42 +99,44 @@ func connect(t *testing.T, ctx context.Context, a, b *DHT) { require.NoError(t, err) // the include state machine runs in the background for a and eventually should add the node to routing table - _, err = expectRoutingUpdated(t, ctx, a.kad.RoutingNotifications(), b.host.ID()) + _, err = arn.ExpectRoutingUpdated(ctx, kadt.PeerID(b.host.ID())) require.NoError(t, err) // the routing table should now contain the node - _, err = a.kad.GetNode(ctx, b.host.ID()) + _, err = a.kad.GetNode(ctx, kadt.PeerID(b.host.ID())) require.NoError(t, err) } -// connectLinearChain connects the dhts together in a linear chain. -// The dhts are configured with routing tables that contain immediate neighbours. -func connectLinearChain(t *testing.T, ctx context.Context, dhts ...*DHT) { - for i := 1; i < len(dhts); i++ { - connect(t, ctx, dhts[i-1], dhts[i]) - connect(t, ctx, dhts[i], dhts[i-1]) - } -} - func TestRTAdditionOnSuccessfulQuery(t *testing.T) { ctx := kadtest.CtxShort(t) - ctx, tp := kadtest.MaybeTrace(t, ctx) - cfg := DefaultConfig() - cfg.TracerProvider = tp + // create dhts and associated routing notifiers so we can inspect routing events + cfg1 := DefaultConfig() + rn1 := coord.NewBufferedRoutingNotifier() + cfg1.Kademlia.RoutingNotifier = rn1 + d1 := newServerDht(t, cfg1) + + cfg2 := DefaultConfig() + rn2 := coord.NewBufferedRoutingNotifier() + cfg2.Kademlia.RoutingNotifier = rn2 + d2 := newServerDht(t, cfg2) - d1 := newServerDht(t, cfg) - d2 := newServerDht(t, cfg) - d3 := newServerDht(t, cfg) + cfg3 := DefaultConfig() + rn3 := coord.NewBufferedRoutingNotifier() + cfg3.Kademlia.RoutingNotifier = rn3 + d3 := newServerDht(t, cfg3) - connectLinearChain(t, ctx, d1, d2, d3) + connect(t, ctx, d1, d2, rn1) + connect(t, ctx, d2, d1, rn2) + connect(t, ctx, d2, d3, rn2) + connect(t, ctx, d3, d2, rn3) // d3 does not know about d1 - _, err := d3.kad.GetNode(ctx, d1.host.ID()) + _, err := d3.kad.GetNode(ctx, kadt.PeerID(d1.host.ID())) require.ErrorIs(t, err, coord.ErrNodeNotFound) // d1 does not know about d3 - _, err = d1.kad.GetNode(ctx, d3.host.ID()) + _, err = d1.kad.GetNode(ctx, kadt.PeerID(d3.host.ID())) require.ErrorIs(t, err, coord.ErrNodeNotFound) // // but when d3 queries d2, d1 and d3 discover each other @@ -179,31 +144,37 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) { // ignore the error // d3 should update its routing table to include d1 during the query - _, err = expectRoutingUpdated(t, ctx, d3.kad.RoutingNotifications(), d1.host.ID()) + _, err = rn3.ExpectRoutingUpdated(ctx, kadt.PeerID(d1.host.ID())) require.NoError(t, err) // d3 now has d1 in its routing table - _, err = d3.kad.GetNode(ctx, d1.host.ID()) + _, err = d3.kad.GetNode(ctx, kadt.PeerID(d1.host.ID())) require.NoError(t, err) // d1 should update its routing table to include d3 during the query - _, err = expectRoutingUpdated(t, ctx, d1.kad.RoutingNotifications(), d3.host.ID()) + _, err = rn1.ExpectRoutingUpdated(ctx, kadt.PeerID(d3.host.ID())) require.NoError(t, err) // d1 now has d3 in its routing table - _, err = d1.kad.GetNode(ctx, d3.host.ID()) + _, err = d1.kad.GetNode(ctx, kadt.PeerID(d3.host.ID())) require.NoError(t, err) } func TestRTEvictionOnFailedQuery(t *testing.T) { ctx := kadtest.CtxShort(t) - cfg := DefaultConfig() + cfg1 := DefaultConfig() + rn1 := coord.NewBufferedRoutingNotifier() + cfg1.Kademlia.RoutingNotifier = rn1 + d1 := newServerDht(t, cfg1) + + cfg2 := DefaultConfig() + rn2 := coord.NewBufferedRoutingNotifier() + cfg2.Kademlia.RoutingNotifier = rn2 + d2 := newServerDht(t, cfg2) - d1 := newServerDht(t, cfg) - d2 := newServerDht(t, cfg) - connect(t, ctx, d1, d2) - connect(t, ctx, d2, d1) + connect(t, ctx, d1, d2, rn1) + connect(t, ctx, d2, d1, rn2) // close both hosts so query fails require.NoError(t, d1.host.Close()) @@ -213,17 +184,17 @@ func TestRTEvictionOnFailedQuery(t *testing.T) { // no scheduled probes will have taken place // d1 still has d2 in the routing table - _, err := d1.kad.GetNode(ctx, d2.host.ID()) + _, err := d1.kad.GetNode(ctx, kadt.PeerID(d2.host.ID())) require.NoError(t, err) // d2 still has d1 in the routing table - _, err = d2.kad.GetNode(ctx, d1.host.ID()) + _, err = d2.kad.GetNode(ctx, kadt.PeerID(d1.host.ID())) require.NoError(t, err) // failed queries should remove the queried peers from the routing table _, _ = d1.FindPeer(ctx, "test") // d1 should update its routing table to remove d2 because of the failure - _, err = expectRoutingRemoved(t, ctx, d1.kad.RoutingNotifications(), d2.host.ID()) + _, err = rn1.ExpectRoutingRemoved(ctx, kadt.PeerID(d2.host.ID())) require.NoError(t, err) } diff --git a/v2/router.go b/v2/router.go index 2c5ed505..14db2cd9 100644 --- a/v2/router.go +++ b/v2/router.go @@ -11,10 +11,7 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-msgio" "github.com/libp2p/go-msgio/pbio" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/network/address" "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -23,42 +20,26 @@ import ( type Router struct { host host.Host + // ProtocolID represents the DHT [protocol] we can query with and respond to. + // + // [protocol]: https://docs.libp2p.io/concepts/fundamentals/protocols/ + ProtocolID protocol.ID } -var _ coord.Router = (*Router)(nil) +var _ coord.Router[kadt.Key, kadt.PeerID, *pb.Message] = (*Router)(nil) -func WriteMsg(s network.Stream, msg protoreflect.ProtoMessage) error { - w := pbio.NewDelimitedWriter(s) - return w.WriteMsg(msg) -} - -func ReadMsg(s network.Stream, msg proto.Message) error { - r := pbio.NewDelimitedReader(s, network.MessageSizeMax) - return r.ReadMsg(msg) -} - -type ProtoKadMessage interface { - proto.Message -} - -type ProtoKadRequestMessage[K kad.Key[K], A kad.Address[A]] interface { - ProtoKadMessage - kad.Request[K, A] -} - -type ProtoKadResponseMessage[K kad.Key[K], A kad.Address[A]] interface { - ProtoKadMessage - kad.Response[K, A] -} - -func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { - if err := r.AddNodeInfo(ctx, to, time.Hour); err != nil { - return nil, fmt.Errorf("add node info: %w", err) +func FindKeyRequest(k kadt.Key) *pb.Message { + marshalledKey, _ := k.MarshalBinary() + return &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: marshalledKey, } +} +func (r *Router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { // TODO: what to do with addresses in peer.AddrInfo? - if len(r.host.Peerstore().Addrs(to.ID)) == 0 { - return nil, fmt.Errorf("no address for peer %s", to.ID) + if len(r.host.Peerstore().Addrs(peer.ID(to))) == 0 { + return nil, fmt.Errorf("no address for peer %s", to) } var cancel context.CancelFunc @@ -68,7 +49,7 @@ func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID addr var err error var s network.Stream - s, err = r.host.NewStream(ctx, to.ID, protocol.ID(protoID)) + s, err = r.host.NewStream(ctx, peer.ID(to), r.ProtocolID) if err != nil { return nil, fmt.Errorf("stream creation: %w", err) } @@ -92,39 +73,27 @@ func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID addr } for _, info := range protoResp.CloserPeersAddrInfos() { - _ = r.AddNodeInfo(ctx, info, time.Hour) + _ = r.addToPeerStore(ctx, info, time.Hour) // TODO: replace hard coded time.Hour with config } return &protoResp, err } -func (r *Router) AddNodeInfo(ctx context.Context, ai peer.AddrInfo, ttl time.Duration) error { - // Don't add addresses for self or our connected peers. We have better ones. - if ai.ID == r.host.ID() || r.host.Network().Connectedness(ai.ID) == network.Connected { - return nil - } - - r.host.Peerstore().AddAddrs(ai.ID, ai.Addrs, ttl) - return nil -} - -func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { - return r.host.Peerstore().PeerInfo(id), nil -} - -func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target kadt.Key) ([]peer.AddrInfo, error) { - resp, err := r.SendMessage(ctx, to, address.ProtocolID(ProtocolIPFS), FindKeyRequest(target)) +func (r *Router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kadt.Key) ([]kadt.PeerID, error) { + resp, err := r.SendMessage(ctx, to, FindKeyRequest(target)) if err != nil { return nil, err } - return resp.CloserPeersAddrInfos(), nil + return resp.CloserNodes(), nil } -func FindKeyRequest(k kadt.Key) *pb.Message { - marshalledKey, _ := k.MarshalBinary() - return &pb.Message{ - Type: pb.Message_FIND_NODE, - Key: marshalledKey, +func (r *Router) addToPeerStore(ctx context.Context, ai peer.AddrInfo, ttl time.Duration) error { + // Don't add addresses for self or our connected peers. We have better ones. + if ai.ID == r.host.ID() || r.host.Network().Connectedness(ai.ID) == network.Connected { + return nil } + + r.host.Peerstore().AddAddrs(ai.ID, ai.Addrs, ttl) + return nil } diff --git a/v2/routing.go b/v2/routing.go index e17ae434..396104a9 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -43,7 +43,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { var foundNode coord.Node fn := func(ctx context.Context, node coord.Node, stats coord.QueryStats) error { - if node.ID() == id { + if peer.ID(node.ID()) == id { foundNode = node return coord.ErrSkipRemaining } @@ -59,10 +59,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { return peer.AddrInfo{}, fmt.Errorf("peer record not found") } - return peer.AddrInfo{ - ID: foundNode.ID(), - Addrs: foundNode.Addresses(), - }, nil + return d.host.Peerstore().PeerInfo(peer.ID(foundNode.ID())), nil } func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { From e86381e824dd2797cf931f14070292fe763a0c93 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 19 Sep 2023 15:44:12 +0200 Subject: [PATCH 49/64] refactor: v2 simplify tracing (#924) --- v2/config.go | 30 ++++++++-- v2/config_test.go | 13 ++++ v2/coord/coordinator.go | 13 +++- v2/coord/query/query.go | 23 ++++--- v2/coord/routing.go | 3 +- v2/coord/routing/bootstrap.go | 10 +--- v2/coord/routing/include.go | 19 +++--- v2/coord/routing/probe.go | 24 ++++---- v2/dht.go | 106 +++++++++++++++++++-------------- v2/go.mod | 2 +- v2/internal/kadtest/tracing.go | 1 + v2/routing.go | 13 ++-- v2/stream.go | 2 +- v2/tele/tele.go | 17 +++--- 14 files changed, 165 insertions(+), 111 deletions(-) diff --git a/v2/config.go b/v2/config.go index 0bb6150a..ba41447c 100644 --- a/v2/config.go +++ b/v2/config.go @@ -8,6 +8,7 @@ import ( ds "github.com/ipfs/go-datastore" leveldb "github.com/ipfs/go-ds-leveldb" logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -27,9 +28,9 @@ import ( const ServiceName = "libp2p.DHT" const ( - // ProtocolIPFS is the protocol identifier for the main IPFS network. If the - // DHT is configured with this protocol, you must configure backends for - // IPNS, Public Key, and provider records (ipns, pk, and providers + // ProtocolIPFS is the protocol identifier for the main Amino DHT network. + // If the DHT is configured with this protocol, you must configure backends + // for IPNS, Public Key, and provider records (ipns, pk, and providers // namespaces). Configuration validation will fail if backends are missing. ProtocolIPFS protocol.ID = "/ipfs/kad/1.0.0" @@ -117,6 +118,10 @@ type Config struct { // BucketSize determines the number of closer peers to return BucketSize int + // BootstrapPeers is the list of peers that should be used to bootstrap + // into the DHT network. + BootstrapPeers []peer.AddrInfo + // ProtocolID represents the DHT [protocol] we can query with and respond to. // // [protocol]: https://docs.libp2p.io/concepts/fundamentals/protocols/ @@ -167,10 +172,16 @@ type Config struct { // used to filter out private addresses. AddressFilter AddressFilter - // MeterProvider . + // MeterProvider provides access to named Meter instances. It's used to, + // e.g., expose prometheus metrics. Check out the [opentelemetry docs]: + // + // [opentelemetry docs]: https://opentelemetry.io/docs/specs/otel/metrics/api/#meterprovider MeterProvider metric.MeterProvider - // TracerProvider . + // TracerProvider provides Tracers that are used by instrumentation code to + // trace computational workflows. Check out the [opentelemetry docs]: + // + // [opentelemetry docs]: https://opentelemetry.io/docs/concepts/signals/traces/#tracer-provider TracerProvider trace.TracerProvider } @@ -184,6 +195,7 @@ func DefaultConfig() *Config { Mode: ModeOptAutoClient, Kademlia: coord.DefaultCoordinatorConfig(), BucketSize: 20, // MAGIC + BootstrapPeers: DefaultBootstrapPeers(), ProtocolID: ProtocolIPFS, RoutingTable: nil, // nil because a routing table requires information about the local node. triert.TrieRT will be used if this field is nil. Backends: map[string]Backend{}, // if empty and [ProtocolIPFS] is used, it'll be populated with the ipns, pk and providers backends @@ -238,6 +250,14 @@ func (c *Config) Validate() error { return fmt.Errorf("invalid kademlia configuration: %w", err) } + if c.BucketSize == 0 { + return fmt.Errorf("bucket size must not be 0") + } + + if len(c.BootstrapPeers) == 0 { + return fmt.Errorf("no bootstrap peer") + } + if c.ProtocolID == "" { return fmt.Errorf("protocolID must not be empty") } diff --git a/v2/config_test.go b/v2/config_test.go index 892f9f8a..6787fff9 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" ) @@ -100,4 +101,16 @@ func TestConfig_Validate(t *testing.T) { cfg.Clock = nil assert.Error(t, cfg.Validate()) }) + + t.Run("zero bucket size", func(t *testing.T) { + cfg := DefaultConfig() + cfg.BucketSize = 0 + assert.Error(t, cfg.Validate()) + }) + + t.Run("empty bootstrap peers", func(t *testing.T) { + cfg := DefaultConfig() + cfg.BootstrapPeers = []peer.AddrInfo{} + assert.Error(t, cfg.Validate()) + }) } diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index 4a4f3875..63bad31c 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -33,6 +33,11 @@ type Coordinator struct { // cancel is used to cancel all running goroutines when the coordinator is cleaning up cancel context.CancelFunc + // done will be closed when the coordinator's eventLoop exits. Block-read + // from this channel to wait until resources of this coordinator were + // cleaned up + done chan struct{} + // cfg is a copy of the optional configuration supplied to the dht cfg CoordinatorConfig @@ -180,7 +185,7 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess qpCfg.QueryConcurrency = cfg.RequestConcurrency qpCfg.RequestTimeout = cfg.RequestTimeout - qp, err := query.NewPool[kadt.Key](kadt.PeerID(self), qpCfg) + qp, err := query.NewPool[kadt.Key](self, qpCfg) if err != nil { return nil, fmt.Errorf("query pool: %w", err) } @@ -235,11 +240,13 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess rtr: rtr, rt: rt, cancel: cancel, + done: make(chan struct{}), networkBehaviour: networkBehaviour, routingBehaviour: routingBehaviour, queryBehaviour: queryBehaviour, } + go d.eventLoop(ctx) return d, nil @@ -248,6 +255,7 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess // Close cleans up all resources associated with this Coordinator. func (c *Coordinator) Close() error { c.cancel() + <-c.done return nil } @@ -256,6 +264,8 @@ func (c *Coordinator) ID() kadt.PeerID { } func (c *Coordinator) eventLoop(ctx context.Context) { + defer close(c.done) + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.eventLoop") defer span.End() for { @@ -446,6 +456,7 @@ func (c *Coordinator) AddNodes(ctx context.Context, ids []kadt.PeerID) error { func (c *Coordinator) Bootstrap(ctx context.Context, seeds []kadt.PeerID) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Bootstrap") defer span.End() + c.routingBehaviour.Notify(ctx, &EventStartBootstrap{ SeedNodes: seeds, }) diff --git a/v2/coord/query/query.go b/v2/coord/query/query.go index 5982448d..9b0d87eb 100644 --- a/v2/coord/query/query.go +++ b/v2/coord/query/query.go @@ -9,6 +9,7 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/key" + "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -118,9 +119,13 @@ func NewQuery[K kad.Key[K], N kad.NodeID[K]](self N, id QueryID, target K, iter }, nil } -func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { - ctx, span := tele.StartSpan(ctx, "Query.Advance") - defer span.End() +func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) (out QueryState) { + ctx, span := tele.StartSpan(ctx, "Query.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + if q.finished { return &StateQueryFinished{ QueryID: q.id, @@ -130,17 +135,14 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { switch tev := ev.(type) { case *EventQueryCancel: - span.SetAttributes(tele.AttrEvent("EventQueryCancel")) q.markFinished() return &StateQueryFinished{ QueryID: q.id, Stats: q.stats, } case *EventQueryFindCloserResponse[K, N]: - span.SetAttributes(tele.AttrEvent("EventQueryFindCloserResponse")) q.onMessageResponse(ctx, tev.NodeID, tev.CloserNodes) case *EventQueryFindCloserFailure[K, N]: - span.SetAttributes(tele.AttrEvent("EventQueryFindCloserFailure")) span.RecordError(tev.Error) q.onMessageFailure(ctx, tev.NodeID) case nil: @@ -174,7 +176,6 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { q.inFlight-- q.stats.Failure++ } else if atCapacity() { - span.SetAttributes(tele.AttrOutEvent("StateQueryWaitingAtCapacity")) // this is the query's tracing span returnState = &StateQueryWaitingAtCapacity{ QueryID: q.id, Stats: q.stats, @@ -191,7 +192,6 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { // If it has contacted at least NumResults nodes successfully then the iteration is done. if !progressing && successes >= q.cfg.NumResults { q.markFinished() - span.SetAttributes(tele.AttrOutEvent("StateQueryFinished")) // this is the query's tracing span returnState = &StateQueryFinished{ QueryID: q.id, Stats: q.stats, @@ -208,7 +208,6 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { if q.stats.Start.IsZero() { q.stats.Start = q.cfg.Clock.Now() } - span.SetAttributes(tele.AttrOutEvent("StateQueryFindCloser")) // this is the query's tracing span returnState = &StateQueryFindCloser[K, N]{ NodeID: ni.NodeID, QueryID: q.id, @@ -216,13 +215,13 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { Target: q.target, } return true - } - span.SetAttributes(tele.AttrOutEvent("StateQueryWaitingAtCapacity")) // this is the query's tracing span + returnState = &StateQueryWaitingAtCapacity{ QueryID: q.id, Stats: q.stats, } + return true case *StateNodeUnresponsive: // ignore @@ -241,7 +240,6 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { if q.inFlight > 0 { // The iterator is still waiting for results and not at capacity - span.SetAttributes(tele.AttrOutEvent("StateQueryWaitingWithCapacity")) return &StateQueryWaitingWithCapacity{ QueryID: q.id, Stats: q.stats, @@ -251,7 +249,6 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) QueryState { // The iterator is finished because all available nodes have been contacted // and the iterator is not waiting for any more results. q.markFinished() - span.SetAttributes(tele.AttrOutEvent("StateQueryFinished")) return &StateQueryFinished{ QueryID: q.id, Stats: q.stats, diff --git a/v2/coord/routing.go b/v2/coord/routing.go index ead1b107..1c34bca8 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -59,8 +59,9 @@ func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { // notify must only be called while r.pendingMu is held func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.notify") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.notify", trace.WithAttributes(attribute.String("event", fmt.Sprintf("%T", ev)))) defer span.End() + switch ev := ev.(type) { case *EventStartBootstrap: span.SetAttributes(attribute.String("event", "EventStartBootstrap")) diff --git a/v2/coord/routing/bootstrap.go b/v2/coord/routing/bootstrap.go index 683683a7..e4b9d452 100644 --- a/v2/coord/routing/bootstrap.go +++ b/v2/coord/routing/bootstrap.go @@ -9,6 +9,7 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" @@ -92,13 +93,11 @@ func NewBootstrap[K kad.Key[K], N kad.NodeID[K]](self N, cfg *BootstrapConfig[K] // Advance advances the state of the bootstrap by attempting to advance its query if running. func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) BootstrapState { - ctx, span := tele.StartSpan(ctx, "Bootstrap.Advance") + ctx, span := tele.StartSpan(ctx, "Bootstrap.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) defer span.End() switch tev := ev.(type) { case *EventBootstrapStart[K, N]: - span.SetAttributes(tele.AttrEvent("EventBootstrapStart")) - // TODO: ignore start event if query is already in progress iter := query.NewClosestNodesIter[K, N](b.self.Key()) @@ -118,13 +117,11 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst return b.advanceQuery(ctx, nil) case *EventBootstrapFindCloserResponse[K, N]: - span.SetAttributes(tele.AttrEvent("EventBootstrapFindCloserResponse")) return b.advanceQuery(ctx, &query.EventQueryFindCloserResponse[K, N]{ NodeID: tev.NodeID, CloserNodes: tev.CloserNodes, }) case *EventBootstrapFindCloserFailure[K, N]: - span.SetAttributes(tele.AttrEvent("EventBootstrapFindCloserFailure")) span.RecordError(tev.Error) return b.advanceQuery(ctx, &query.EventQueryFindCloserFailure[K, N]{ NodeID: tev.NodeID, @@ -132,8 +129,7 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst }) case *EventBootstrapPoll: - span.SetAttributes(tele.AttrEvent("EventBootstrapPoll")) - // ignore, nothing to do + // ignore, nothing to do default: panic(fmt.Sprintf("unexpected event: %T", tev)) } diff --git a/v2/coord/routing/include.go b/v2/coord/routing/include.go index 749fe931..2ec3c5bd 100644 --- a/v2/coord/routing/include.go +++ b/v2/coord/routing/include.go @@ -6,10 +6,10 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/key" + "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -99,14 +99,15 @@ func NewInclude[K kad.Key[K], N kad.NodeID[K]](rt kad.RoutingTable[K, N], cfg *I } // Advance advances the state of the include state machine by attempting to advance its query if running. -func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeState { - ctx, span := tele.StartSpan(ctx, "Include.Advance") - defer span.End() +func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) (out IncludeState) { + ctx, span := tele.StartSpan(ctx, "Include.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() switch tev := ev.(type) { - case *EventIncludeAddCandidate[K, N]: - span.SetAttributes(tele.AttrEvent("EventIncludeAddCandidate")) // Ignore if already running a check _, checking := b.checks[key.HexString(tev.NodeID.Key())] if checking { @@ -125,24 +126,20 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta b.candidates.Enqueue(ctx, tev.NodeID) case *EventIncludeConnectivityCheckSuccess[K, N]: - span.SetAttributes(tele.AttrEvent("EventIncludeConnectivityCheckSuccess")) ch, ok := b.checks[key.HexString(tev.NodeID.Key())] if ok { delete(b.checks, key.HexString(tev.NodeID.Key())) if b.rt.AddNode(tev.NodeID) { - span.SetAttributes(tele.AttrOutEvent("StateIncludeRoutingUpdated")) return &StateIncludeRoutingUpdated[K, N]{ NodeID: ch.NodeID, } } } case *EventIncludeConnectivityCheckFailure[K, N]: - span.SetAttributes(tele.AttrEvent("EventIncludeConnectivityCheckFailure")) span.RecordError(tev.Error) delete(b.checks, key.HexString(tev.NodeID.Key())) case *EventIncludePoll: - span.SetAttributes(tele.AttrEvent("EventIncludePoll")) // ignore, nothing to do default: panic(fmt.Sprintf("unexpected event: %T", tev)) @@ -159,7 +156,6 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta if !ok { // No candidate in queue if len(b.checks) > 0 { - span.SetAttributes(tele.AttrOutEvent("StateIncludeWaitingWithCapacity")) return &StateIncludeWaitingWithCapacity{} } return &StateIncludeIdle{} @@ -171,7 +167,6 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) IncludeSta } // Ask the node to find itself - span.SetAttributes(tele.AttrOutEvent("StateIncludeConnectivityCheck")) return &StateIncludeConnectivityCheck[K, N]{ NodeID: candidate, } diff --git a/v2/coord/routing/probe.go b/v2/coord/routing/probe.go index 45e5881f..248d450b 100644 --- a/v2/coord/routing/probe.go +++ b/v2/coord/routing/probe.go @@ -12,6 +12,7 @@ import ( "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -133,17 +134,19 @@ func NewProbe[K kad.Key[K], N kad.NodeID[K]](rt RoutingTableCpl[K, N], cfg *Prob } // Advance advances the state of the probe state machine by attempting to advance its query if running. -func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { - _, span := tele.StartSpan(ctx, "Probe.Advance") - defer span.End() +func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeState) { + _, span := tele.StartSpan(ctx, "Probe.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() switch tev := ev.(type) { case *EventProbePoll: // ignore, nothing to do - span.SetAttributes(tele.AttrEvent("EventProbePoll")) case *EventProbeAdd[K, N]: // check presence in routing table - span.SetAttributes(tele.AttrEvent("EventProbeAdd"), attribute.String("nodeid", tev.NodeID.String())) + span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) if _, found := p.rt.GetNode(tev.NodeID.Key()); !found { // ignore if not in routing table span.RecordError(errors.New("node not in routing table")) @@ -159,7 +162,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { // TODO: if node was in ongoing list return a state that can signal the caller to cancel any prior outbound message p.nvl.Put(nv) case *EventProbeRemove[K, N]: - span.SetAttributes(tele.AttrEvent("EventProbeRemove"), attribute.String("nodeid", tev.NodeID.String())) + span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) p.rt.RemoveKey(tev.NodeID.Key()) p.nvl.Remove(tev.NodeID) @@ -168,7 +171,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { } case *EventProbeConnectivityCheckSuccess[K, N]: - span.SetAttributes(tele.AttrEvent("EventProbeMessageResponse"), attribute.String("nodeid", tev.NodeID.String())) + span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) nv, found := p.nvl.Get(tev.NodeID) if !found { // ignore message for unknown node, which might have been removed @@ -183,7 +186,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { case *EventProbeConnectivityCheckFailure[K, N]: // probe failed, so remove from routing table and from list - span.SetAttributes(tele.AttrEvent("EventProbeMessageFailure"), attribute.String("nodeid", tev.NodeID.String())) + span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) span.RecordError(tev.Error) p.rt.RemoveKey(tev.NodeID.Key()) @@ -192,7 +195,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { NodeID: tev.NodeID, } case *EventProbeNotifyConnectivity[K, N]: - span.SetAttributes(tele.AttrEvent("EventProbeNotifyConnectivity"), attribute.String("nodeid", tev.NodeID.String())) + span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) nv, found := p.nvl.Get(tev.NodeID) if !found { // ignore message for unknown node, which might have been removed @@ -214,7 +217,6 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { candidate, found := p.nvl.FindCheckPastDeadline(p.cfg.Clock.Now()) if !found { // nothing suitable for time out - span.SetAttributes(tele.AttrOutEvent("StateProbeWaitingAtCapacity")) return &StateProbeWaitingAtCapacity{} } @@ -232,7 +234,6 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { if !ok { if p.nvl.OngoingCount() > 0 { // waiting for a check but nothing else to do - span.SetAttributes(tele.AttrOutEvent("StateProbeWaitingWithCapacity")) return &StateProbeWaitingWithCapacity{} } // nothing happening and nothing to do @@ -242,7 +243,6 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) ProbeState { p.nvl.MarkOngoing(next.NodeID, p.cfg.Clock.Now().Add(p.cfg.Timeout)) // Ask the node to find itself - span.SetAttributes(tele.AttrOutEvent("StateProbeConnectivityCheck")) return &StateProbeConnectivityCheck[K, N]{ NodeID: next.NodeID, } diff --git a/v2/dht.go b/v2/dht.go index 559a5288..1fd857d7 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -91,56 +91,13 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("init telemetry: %w", err) } + // initialize backends if len(cfg.Backends) != 0 { d.backends = cfg.Backends } else if cfg.ProtocolID == ProtocolIPFS { - - var dstore Datastore - if cfg.Datastore != nil { - dstore = cfg.Datastore - } else if dstore, err = InMemoryDatastore(); err != nil { - return nil, fmt.Errorf("new default datastore: %w", err) - } - - // wrap datastore in open telemetry tracing - dstore = trace.New(dstore, d.tele.Tracer) - - pbeCfg, err := DefaultProviderBackendConfig() - if err != nil { - return nil, fmt.Errorf("default provider config: %w", err) - } - pbeCfg.Logger = cfg.Logger - pbeCfg.AddressFilter = cfg.AddressFilter - pbeCfg.Tele = d.tele - pbeCfg.clk = d.cfg.Clock - - pbe, err := NewBackendProvider(h.Peerstore(), dstore, pbeCfg) - if err != nil { - return nil, fmt.Errorf("new provider backend: %w", err) - } - - rbeCfg, err := DefaultRecordBackendConfig() + d.backends, err = d.initAminoBackends() if err != nil { - return nil, fmt.Errorf("default provider config: %w", err) - } - rbeCfg.Logger = cfg.Logger - rbeCfg.Tele = d.tele - rbeCfg.clk = d.cfg.Clock - - ipnsBe, err := NewBackendIPNS(dstore, h.Peerstore(), rbeCfg) - if err != nil { - return nil, fmt.Errorf("new ipns backend: %w", err) - } - - pkBe, err := NewBackendPublicKey(dstore, rbeCfg) - if err != nil { - return nil, fmt.Errorf("new public key backend: %w", err) - } - - d.backends = map[string]Backend{ - "ipns": ipnsBe, - "pk": pkBe, - "providers": pbe, + return nil, fmt.Errorf("init amino backends: %w", err) } } @@ -183,6 +140,63 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return d, nil } +// initAminoBackends initializes the default backends for the Amino DHT. This +// includes the ipns, public key, and providers backends. A [DHT] with these +// backends will support these three record types. +func (d *DHT) initAminoBackends() (map[string]Backend, error) { + var ( + err error + dstore Datastore + ) + + if d.cfg.Datastore != nil { + dstore = d.cfg.Datastore + } else if dstore, err = InMemoryDatastore(); err != nil { + return nil, fmt.Errorf("new default datastore: %w", err) + } + + // wrap datastore in open telemetry tracing + dstore = trace.New(dstore, d.tele.Tracer) + + pbeCfg, err := DefaultProviderBackendConfig() + if err != nil { + return nil, fmt.Errorf("default provider config: %w", err) + } + pbeCfg.Logger = d.cfg.Logger + pbeCfg.AddressFilter = d.cfg.AddressFilter + pbeCfg.Tele = d.tele + pbeCfg.clk = d.cfg.Clock + + pbe, err := NewBackendProvider(d.host.Peerstore(), dstore, pbeCfg) + if err != nil { + return nil, fmt.Errorf("new provider backend: %w", err) + } + + rbeCfg, err := DefaultRecordBackendConfig() + if err != nil { + return nil, fmt.Errorf("default provider config: %w", err) + } + rbeCfg.Logger = d.cfg.Logger + rbeCfg.Tele = d.tele + rbeCfg.clk = d.cfg.Clock + + ipnsBe, err := NewBackendIPNS(dstore, d.host.Peerstore(), rbeCfg) + if err != nil { + return nil, fmt.Errorf("new ipns backend: %w", err) + } + + pkBe, err := NewBackendPublicKey(dstore, rbeCfg) + if err != nil { + return nil, fmt.Errorf("new public key backend: %w", err) + } + + return map[string]Backend{ + namespaceIPNS: ipnsBe, + namespacePublicKey: pkBe, + namespaceProviders: pbe, + }, nil +} + // Close cleans up all resources associated with this DHT. func (d *DHT) Close() error { if err := d.sub.Close(); err != nil { diff --git a/v2/go.mod b/v2/go.mod index fe220453..cd8a0748 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -16,6 +16,7 @@ require ( github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 + github.com/prometheus/client_golang v1.16.0 // indirect github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.18.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.18.0 @@ -93,7 +94,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.0 // indirect diff --git a/v2/internal/kadtest/tracing.go b/v2/internal/kadtest/tracing.go index c1781ce8..a724a5c4 100644 --- a/v2/internal/kadtest/tracing.go +++ b/v2/internal/kadtest/tracing.go @@ -54,6 +54,7 @@ func MaybeTrace(t testing.TB, ctx context.Context) (context.Context, trace.Trace // Jaeger instance running on localhost on port 14268 func OtelTracerProvider(ctx context.Context, t testing.TB) trace.TracerProvider { t.Helper() + exp, err := otlptracegrpc.New(ctx, otlptracegrpc.WithEndpoint(fmt.Sprintf("%s:%d", *tracingHost, *tracingPort)), otlptracegrpc.WithInsecure(), diff --git a/v2/routing.go b/v2/routing.go index 396104a9..82553a62 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -6,10 +6,9 @@ import ( "fmt" "time" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" - "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" @@ -175,8 +174,14 @@ func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Optio } func (d *DHT) Bootstrap(ctx context.Context) error { - _, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") defer span.End() - panic("implement me") + seed := make([]kadt.PeerID, len(d.cfg.BootstrapPeers)) + for i, addrInfo := range d.cfg.BootstrapPeers { + seed[i] = kadt.PeerID(addrInfo.ID) + d.host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, time.Hour) // TODO: TTL + } + + return d.kad.Bootstrap(ctx, seed) } diff --git a/v2/stream.go b/v2/stream.go index ea8c3a8b..e9e747a3 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -174,7 +174,7 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R data, err := r.ReadMsg() if err != nil { // log any other errors than stream resets - if !errors.Is(err, network.ErrReset) { + if !errors.Is(err, network.ErrReset) && !errors.Is(err, io.EOF) { slogger.LogAttrs(ctx, slog.LevelDebug, "error reading message", slog.String("err", err.Error())) } diff --git a/v2/tele/tele.go b/v2/tele/tele.go index 9309f85a..29b0e4ab 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -2,6 +2,7 @@ package tele import ( "context" + "fmt" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -86,14 +87,14 @@ func AttrKey(val string) attribute.KeyValue { return attribute.String("key", val) } -// AttrEvent creates an attribute that records the name of an event -func AttrEvent(val string) attribute.KeyValue { - return attribute.String("event", val) +// AttrInEvent creates an attribute that records the type of an event +func AttrInEvent(t any) attribute.KeyValue { + return attribute.String("in_event", fmt.Sprintf("%T", t)) } -// AttrOutEvent creates an attribute that records the name of an event being returned -func AttrOutEvent(val string) attribute.KeyValue { - return attribute.String("out_event", val) +// AttrOutEvent creates an attribute that records the type of an event being returned +func AttrOutEvent(t any) attribute.KeyValue { + return attribute.String("out_event", fmt.Sprintf("%T", t)) } // WithAttributes is a function that attaches the provided attributes to the @@ -129,6 +130,6 @@ func FromContext(ctx context.Context, attrs ...attribute.KeyValue) attribute.Set } // StartSpan creates a span and a [context.Context] containing the newly-created span. -func StartSpan(ctx context.Context, name string) (context.Context, trace.Span) { - return otel.Tracer(TracerName).Start(ctx, name) +func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer(TracerName).Start(ctx, name, opts...) } From 83329a4ee36c4ea7f0ce202cdb9d4adc0f2ce605 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Tue, 19 Sep 2023 15:49:32 +0100 Subject: [PATCH 50/64] Clean up DHT test helpers (#928) * Implement GetValue * Add failing TestGetValueOnePeer test * Unexport methods --- v2/dht_test.go | 15 +--- v2/notifee_test.go | 13 +-- v2/query_test.go | 142 +++---------------------------- v2/routing.go | 48 ++++++++--- v2/routing_test.go | 67 +++++++++++++++ v2/topology_test.go | 197 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 319 insertions(+), 163 deletions(-) create mode 100644 v2/routing_test.go create mode 100644 v2/topology_test.go diff --git a/v2/dht_test.go b/v2/dht_test.go index 6296dbf3..2f45e82c 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -76,16 +76,9 @@ func TestNew(t *testing.T) { func TestAddAddresses(t *testing.T) { ctx := kadtest.CtxShort(t) - localCfg := DefaultConfig() - rn := coord.NewBufferedRoutingNotifier() - localCfg.Kademlia.RoutingNotifier = rn - - local := newClientDht(t, localCfg) - - remote := newServerDht(t, nil) - - // Populate entries in remote's routing table so it passes a connectivity check - fillRoutingTable(t, remote, 1) + top := NewTopology(t) + local := top.AddClient(nil) + remote := top.AddServer(nil) // local routing table should not contain the node _, err := local.kad.GetNode(ctx, kadt.PeerID(remote.host.ID())) @@ -103,7 +96,7 @@ func TestAddAddresses(t *testing.T) { require.NoError(t, err) // the include state machine runs in the background and eventually should add the node to routing table - _, err = rn.Expect(ctx, &coord.EventRoutingUpdated{}) + _, err = top.ExpectRoutingUpdated(ctx, local, remote.host.ID()) require.NoError(t, err) // the routing table should now contain the node diff --git a/v2/notifee_test.go b/v2/notifee_test.go index b7079ac6..5cf117ef 100644 --- a/v2/notifee_test.go +++ b/v2/notifee_test.go @@ -4,9 +4,7 @@ import ( "testing" "time" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/event" @@ -74,12 +72,9 @@ func TestDHT_consumeNetworkEvents_onEvtLocalReachabilityChanged(t *testing.T) { func TestDHT_consumeNetworkEvents_onEvtPeerIdentificationCompleted(t *testing.T) { ctx := kadtest.CtxShort(t) - cfg1 := DefaultConfig() - rn1 := coord.NewBufferedRoutingNotifier() - cfg1.Kademlia.RoutingNotifier = rn1 - d1 := newServerDht(t, cfg1) - - d2 := newServerDht(t, nil) + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) // make sure d1 has the address of d2 in its peerstore d1.host.Peerstore().AddAddrs(d2.host.ID(), d2.host.Addrs(), time.Minute) @@ -89,6 +84,6 @@ func TestDHT_consumeNetworkEvents_onEvtPeerIdentificationCompleted(t *testing.T) Peer: d2.host.ID(), }) - _, err := rn1.ExpectRoutingUpdated(ctx, kadt.PeerID(d2.host.ID())) + _, err := top.ExpectRoutingUpdated(ctx, d1, d2.host.ID()) require.NoError(t, err) } diff --git a/v2/query_test.go b/v2/query_test.go index bf8e3ee1..3fa63336 100644 --- a/v2/query_test.go +++ b/v2/query_test.go @@ -1,13 +1,8 @@ package dht import ( - "context" "testing" - "time" - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/coord" @@ -15,121 +10,15 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -func newServerHost(t testing.TB) host.Host { - listenAddr := libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0") - - h, err := libp2p.New(listenAddr) - require.NoError(t, err) - - t.Cleanup(func() { - if err = h.Close(); err != nil { - t.Logf("unexpected error when closing host: %s", err) - } - }) - - return h -} - -func newClientHost(t testing.TB) host.Host { - h, err := libp2p.New(libp2p.NoListenAddrs) - require.NoError(t, err) - - t.Cleanup(func() { - if err = h.Close(); err != nil { - t.Logf("unexpected error when closing host: %s", err) - } - }) - - return h -} - -func newServerDht(t testing.TB, cfg *Config) *DHT { - h := newServerHost(t) - - var err error - if cfg == nil { - cfg = DefaultConfig() - } - cfg.Mode = ModeOptServer - - d, err := New(h, cfg) - require.NoError(t, err) - - // add at least 1 entry in the routing table so the server will pass connectivity checks - fillRoutingTable(t, d, 1) - require.NotEmpty(t, d.rt.NearestNodes(kadt.PeerID(d.host.ID()).Key(), 1)) - - t.Cleanup(func() { - if err = d.Close(); err != nil { - t.Logf("unexpected error when closing dht: %s", err) - } - }) - return d -} - -func newClientDht(t testing.TB, cfg *Config) *DHT { - h := newClientHost(t) - - var err error - if cfg == nil { - cfg = DefaultConfig() - } - cfg.Mode = ModeOptClient - d, err := New(h, cfg) - require.NoError(t, err) - - t.Cleanup(func() { - if err = d.Close(); err != nil { - t.Logf("unexpected error when closing dht: %s", err) - } - }) - return d -} - -func connect(t *testing.T, ctx context.Context, a, b *DHT, arn *coord.BufferedRoutingNotifier) { - t.Helper() - - remoteAddrInfo := peer.AddrInfo{ - ID: b.host.ID(), - Addrs: b.host.Addrs(), - } - - // Add b's addresss to a - err := a.AddAddresses(ctx, []peer.AddrInfo{remoteAddrInfo}, time.Minute) - require.NoError(t, err) - - // the include state machine runs in the background for a and eventually should add the node to routing table - _, err = arn.ExpectRoutingUpdated(ctx, kadt.PeerID(b.host.ID())) - require.NoError(t, err) - - // the routing table should now contain the node - _, err = a.kad.GetNode(ctx, kadt.PeerID(b.host.ID())) - require.NoError(t, err) -} - func TestRTAdditionOnSuccessfulQuery(t *testing.T) { ctx := kadtest.CtxShort(t) - // create dhts and associated routing notifiers so we can inspect routing events - cfg1 := DefaultConfig() - rn1 := coord.NewBufferedRoutingNotifier() - cfg1.Kademlia.RoutingNotifier = rn1 - d1 := newServerDht(t, cfg1) - - cfg2 := DefaultConfig() - rn2 := coord.NewBufferedRoutingNotifier() - cfg2.Kademlia.RoutingNotifier = rn2 - d2 := newServerDht(t, cfg2) + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + d3 := top.AddServer(nil) - cfg3 := DefaultConfig() - rn3 := coord.NewBufferedRoutingNotifier() - cfg3.Kademlia.RoutingNotifier = rn3 - d3 := newServerDht(t, cfg3) - - connect(t, ctx, d1, d2, rn1) - connect(t, ctx, d2, d1, rn2) - connect(t, ctx, d2, d3, rn2) - connect(t, ctx, d3, d2, rn3) + top.ConnectChain(ctx, d1, d2, d3) // d3 does not know about d1 _, err := d3.kad.GetNode(ctx, kadt.PeerID(d1.host.ID())) @@ -144,7 +33,7 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) { // ignore the error // d3 should update its routing table to include d1 during the query - _, err = rn3.ExpectRoutingUpdated(ctx, kadt.PeerID(d1.host.ID())) + _, err = top.ExpectRoutingUpdated(ctx, d3, d1.host.ID()) require.NoError(t, err) // d3 now has d1 in its routing table @@ -152,7 +41,7 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) { require.NoError(t, err) // d1 should update its routing table to include d3 during the query - _, err = rn1.ExpectRoutingUpdated(ctx, kadt.PeerID(d3.host.ID())) + _, err = top.ExpectRoutingUpdated(ctx, d1, d3.host.ID()) require.NoError(t, err) // d1 now has d3 in its routing table @@ -163,18 +52,11 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) { func TestRTEvictionOnFailedQuery(t *testing.T) { ctx := kadtest.CtxShort(t) - cfg1 := DefaultConfig() - rn1 := coord.NewBufferedRoutingNotifier() - cfg1.Kademlia.RoutingNotifier = rn1 - d1 := newServerDht(t, cfg1) - - cfg2 := DefaultConfig() - rn2 := coord.NewBufferedRoutingNotifier() - cfg2.Kademlia.RoutingNotifier = rn2 - d2 := newServerDht(t, cfg2) + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) - connect(t, ctx, d1, d2, rn1) - connect(t, ctx, d2, d1, rn2) + top.Connect(ctx, d1, d2) // close both hosts so query fails require.NoError(t, d1.host.Close()) @@ -195,6 +77,6 @@ func TestRTEvictionOnFailedQuery(t *testing.T) { _, _ = d1.FindPeer(ctx, "test") // d1 should update its routing table to remove d2 because of the failure - _, err = rn1.ExpectRoutingRemoved(ctx, kadt.PeerID(d2.host.ID())) + _, err = top.ExpectRoutingRemoved(ctx, d1, d2.host.ID()) require.NoError(t, err) } diff --git a/v2/routing.go b/v2/routing.go index 82553a62..bfb76805 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -113,6 +113,18 @@ func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ... ctx, span := d.tele.Tracer.Start(ctx, "DHT.PutValue") defer span.End() + if err := d.putValueLocal(ctx, key, value); err != nil { + return fmt.Errorf("put value locally: %w", err) + } + + panic("implement me") +} + +// putValueLocal stores a value in the local datastore without querying the network. +func (d *DHT) putValueLocal(ctx context.Context, key string, value []byte) error { + ctx, span := d.tele.Tracer.Start(ctx, "DHT.PutValueLocal") + defer span.End() + ns, path, err := record.SplitKey(key) if err != nil { return fmt.Errorf("splitting key: %w", err) @@ -131,14 +143,29 @@ func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ... return fmt.Errorf("store record locally: %w", err) } - // TODO reach out to Zikade - panic("implement me") + return nil } func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option) ([]byte, error) { ctx, span := d.tele.Tracer.Start(ctx, "DHT.GetValue") defer span.End() + v, err := d.getValueLocal(ctx, key) + if err != nil { + return v, nil + } + if !errors.Is(err, ds.ErrNotFound) { + return nil, fmt.Errorf("put value locally: %w", err) + } + + panic("implement me") +} + +// getValueLocal retrieves a value from the local datastore without querying the network. +func (d *DHT) getValueLocal(ctx context.Context, key string) ([]byte, error) { + ctx, span := d.tele.Tracer.Start(ctx, "DHT.GetValueLocal") + defer span.End() + ns, path, err := record.SplitKey(key) if err != nil { return nil, fmt.Errorf("splitting key: %w", err) @@ -151,19 +178,14 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option val, err := b.Fetch(ctx, path) if err != nil { - if !errors.Is(err, ds.ErrNotFound) { - return nil, fmt.Errorf("fetch value locally: %w", err) - } - } else { - rec, ok := val.(*recpb.Record) - if !ok { - return nil, fmt.Errorf("expected *recpb.Record from backend, got: %T", val) - } - return rec.GetValue(), nil + return nil, fmt.Errorf("fetch from backend: %w", err) } - // TODO reach out to Zikade - panic("implement me") + rec, ok := val.(*recpb.Record) + if !ok { + return nil, fmt.Errorf("expected *recpb.Record from backend, got: %T", val) + } + return rec.GetValue(), nil } func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { diff --git a/v2/routing_test.go b/v2/routing_test.go new file mode 100644 index 00000000..5204ae48 --- /dev/null +++ b/v2/routing_test.go @@ -0,0 +1,67 @@ +package dht + +import ( + "fmt" + "testing" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" +) + +func makePkKeyValue(t *testing.T) (string, []byte) { + t.Helper() + + _, pub, _ := crypto.GenerateEd25519Key(rng) + v, err := crypto.MarshalPublicKey(pub) + require.NoError(t, err) + + id, err := peer.IDFromPublicKey(pub) + require.NoError(t, err) + + key := fmt.Sprintf("/pk/%s", string(id)) + + return key, v +} + +func TestGetSetValueLocal(t *testing.T) { + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d := top.AddServer(nil) + + key, v := makePkKeyValue(t) + + err := d.putValueLocal(ctx, key, v) + require.NoError(t, err) + + val, err := d.getValueLocal(ctx, key) + require.NoError(t, err) + + require.Equal(t, v, val) +} + +func TestGetValueOnePeer(t *testing.T) { + t.Skip("not implemented yet") + + ctx := kadtest.CtxShort(t) + top := NewTopology(t) + local := top.AddServer(nil) + remote := top.AddServer(nil) + + // store the value on the remote DHT + key, v := makePkKeyValue(t) + err := remote.putValueLocal(ctx, key, v) + require.NoError(t, err) + + // connect the two DHTs + top.Connect(ctx, local, remote) + + // ask the local DHT to find the value + val, err := local.GetValue(ctx, key) + require.NoError(t, err) + + require.Equal(t, v, val) +} diff --git a/v2/topology_test.go b/v2/topology_test.go new file mode 100644 index 00000000..1af5353f --- /dev/null +++ b/v2/topology_test.go @@ -0,0 +1,197 @@ +package dht + +import ( + "context" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +// A Topology is an arrangement of DHTs intended to simulate a network +type Topology struct { + clk clock.Clock + tb testing.TB + dhts map[string]*DHT + rns map[string]*coord.BufferedRoutingNotifier +} + +func NewTopology(tb testing.TB) *Topology { + return &Topology{ + clk: clock.New(), + tb: tb, + dhts: make(map[string]*DHT), + rns: make(map[string]*coord.BufferedRoutingNotifier), + } +} + +func (t *Topology) SetClock(clk clock.Clock) { + t.clk = clk +} + +// AddServer adds a DHT configured as a server to the topology. +// If cfg is nil the default DHT config is used with Mode set to ModeOptServer +func (t *Topology) AddServer(cfg *Config) *DHT { + t.tb.Helper() + + listenAddr := libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0") + + h, err := libp2p.New(listenAddr) + require.NoError(t.tb, err) + + t.tb.Cleanup(func() { + if err = h.Close(); err != nil { + t.tb.Logf("unexpected error when closing host: %s", err) + } + }) + + if cfg == nil { + cfg = DefaultConfig() + } + cfg.Mode = ModeOptServer + + rn := coord.NewBufferedRoutingNotifier() + cfg.Kademlia.RoutingNotifier = rn + + d, err := New(h, cfg) + require.NoError(t.tb, err) + + // add at least 1 entry in the routing table so the server will pass connectivity checks + fillRoutingTable(t.tb, d, 1) + require.NotEmpty(t.tb, d.rt.NearestNodes(kadt.PeerID(d.host.ID()).Key(), 1)) + + t.tb.Cleanup(func() { + if err = d.Close(); err != nil { + t.tb.Logf("unexpected error when closing dht: %s", err) + } + }) + + did := t.makeid(d) + t.dhts[did] = d + t.rns[did] = rn + + return d +} + +// AddServer adds a DHT configured as a client to the topology. +// If cfg is nil the default DHT config is used with Mode set to ModeOptClient +func (t *Topology) AddClient(cfg *Config) *DHT { + t.tb.Helper() + + h, err := libp2p.New(libp2p.NoListenAddrs) + require.NoError(t.tb, err) + + t.tb.Cleanup(func() { + if err = h.Close(); err != nil { + t.tb.Logf("unexpected error when closing host: %s", err) + } + }) + + if cfg == nil { + cfg = DefaultConfig() + } + cfg.Mode = ModeOptClient + + rn := coord.NewBufferedRoutingNotifier() + cfg.Kademlia.RoutingNotifier = rn + + d, err := New(h, cfg) + require.NoError(t.tb, err) + + t.tb.Cleanup(func() { + if err = d.Close(); err != nil { + t.tb.Logf("unexpected error when closing dht: %s", err) + } + }) + + did := t.makeid(d) + t.dhts[did] = d + t.rns[did] = rn + + return d +} + +func (t *Topology) makeid(d *DHT) string { + return kadt.PeerID(d.host.ID()).String() +} + +// Connect ensures that a has b in its routing table and vice versa. +func (t *Topology) Connect(ctx context.Context, a *DHT, b *DHT) { + t.tb.Helper() + + aid := t.makeid(a) + arn, ok := t.rns[aid] + require.True(t.tb, ok, "expected routing notifier for supplied DHT") + + aAddr := peer.AddrInfo{ + ID: a.host.ID(), + Addrs: a.host.Addrs(), + } + + bid := t.makeid(b) + brn, ok := t.rns[bid] + require.True(t.tb, ok, "expected routing notifier for supplied DHT") + + bAddr := peer.AddrInfo{ + ID: b.host.ID(), + Addrs: b.host.Addrs(), + } + + // Add b's addresses to a + err := a.AddAddresses(ctx, []peer.AddrInfo{bAddr}, time.Hour) + require.NoError(t.tb, err) + + // Add a's addresses to b + err = b.AddAddresses(ctx, []peer.AddrInfo{aAddr}, time.Hour) + require.NoError(t.tb, err) + + // include state machine runs in the background for a and eventually should add the node to routing table + _, err = arn.ExpectRoutingUpdated(ctx, kadt.PeerID(b.host.ID())) + require.NoError(t.tb, err) + + // the routing table should now contain the node + _, err = a.kad.GetNode(ctx, kadt.PeerID(b.host.ID())) + require.NoError(t.tb, err) + + // include state machine runs in the background for b and eventually should add the node to routing table + _, err = brn.ExpectRoutingUpdated(ctx, kadt.PeerID(a.host.ID())) + require.NoError(t.tb, err) + + // the routing table should now contain the node + _, err = b.kad.GetNode(ctx, kadt.PeerID(a.host.ID())) + require.NoError(t.tb, err) +} + +// ConnectChain connects the DHTs in a linear chain. +// The DHTs are configured with routing tables that contain immediate neighbours, +// such that DHT[x] has DHT[x-1] and DHT[x+1] in its routing table. +// The connections do not form a ring: DHT[0] only has DHT[1] in its table and DHT[n-1] only has DHT[n-2] in its table. +// If n > 2 then the first and last DHTs are guaranteed not have one another in their routing tables. +func (t *Topology) ConnectChain(ctx context.Context, ds ...*DHT) { + for i := 1; i < len(ds); i++ { + t.Connect(ctx, ds[i-1], ds[i]) + } +} + +// ExpectRoutingUpdated blocks until an [EventRoutingUpdated] event is emitted by the supplied [DHT] the specified peer id. +func (t *Topology) ExpectRoutingUpdated(ctx context.Context, d *DHT, id peer.ID) (*coord.EventRoutingUpdated, error) { + did := t.makeid(d) + rn, ok := t.rns[did] + require.True(t.tb, ok, "expected routing notifier for supplied DHT") + + return rn.ExpectRoutingUpdated(ctx, kadt.PeerID(id)) +} + +// ExpectRoutingRemoved blocks until an [EventRoutingRemoved] event is emitted by the supplied [DHT] the specified peer id. +func (t *Topology) ExpectRoutingRemoved(ctx context.Context, d *DHT, id peer.ID) (*coord.EventRoutingRemoved, error) { + did := t.makeid(d) + rn, ok := t.rns[did] + require.True(t.tb, ok, "expected routing notifier for supplied DHT") + + return rn.ExpectRoutingRemoved(ctx, kadt.PeerID(id)) +} From 2da54ab8067c1a38a9acbfd5051aa5aeeeab70a2 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:04:53 +0100 Subject: [PATCH 51/64] Improve query capabilities (#932) * Improve query capabilities * go mod tidy * Review feedback * go mod tidy * Move coord packages to internal (#933) * Move coord and kadt packages to internal * go mod tidy * go fmt * Move kadt out of internal and add RoutingTable interface --- v2/config.go | 143 ++++- v2/config_test.go | 51 +- v2/dht.go | 12 +- v2/dht_test.go | 2 +- v2/errors.go | 22 + v2/{ => internal}/coord/behaviour.go | 0 v2/{ => internal}/coord/behaviour_test.go | 0 v2/{ => internal}/coord/coordinator.go | 138 +++-- v2/{ => internal}/coord/coordinator_test.go | 45 +- v2/{ => internal}/coord/coretypes.go | 3 +- v2/{ => internal}/coord/event.go | 64 ++- v2/{ => internal}/coord/event_test.go | 3 +- .../coord/internal/nettest/layouts.go | 0 .../coord/internal/nettest/routing.go | 0 .../coord/internal/nettest/topology.go | 2 +- v2/{ => internal}/coord/internal/tiny/node.go | 4 + .../coord/internal/tiny/node_test.go | 0 v2/{ => internal}/coord/network.go | 32 +- v2/{ => internal}/coord/network_test.go | 2 +- v2/{ => internal}/coord/query.go | 72 ++- v2/{ => internal}/coord/query/iter.go | 0 v2/{ => internal}/coord/query/iter_test.go | 2 +- v2/{ => internal}/coord/query/node.go | 0 v2/{ => internal}/coord/query/pool.go | 147 ++++-- v2/{ => internal}/coord/query/pool_test.go | 89 +++- v2/{ => internal}/coord/query/query.go | 176 +++++-- v2/{ => internal}/coord/query/query_test.go | 494 +++++++++++++----- v2/{ => internal}/coord/routing.go | 2 +- v2/{ => internal}/coord/routing/bootstrap.go | 18 +- .../coord/routing/bootstrap_test.go | 4 +- v2/{ => internal}/coord/routing/include.go | 0 .../coord/routing/include_test.go | 2 +- v2/{ => internal}/coord/routing/probe.go | 0 v2/{ => internal}/coord/routing/probe_test.go | 2 +- v2/{ => internal}/coord/routing_test.go | 6 +- v2/{ => internal}/coord/telemetry.go | 0 v2/kadt/kadt.go | 15 +- v2/query_test.go | 2 +- v2/router.go | 12 +- v2/routing.go | 51 +- v2/routing_test.go | 2 - v2/topology_test.go | 14 +- 42 files changed, 1221 insertions(+), 412 deletions(-) create mode 100644 v2/errors.go rename v2/{ => internal}/coord/behaviour.go (100%) rename v2/{ => internal}/coord/behaviour_test.go (100%) rename v2/{ => internal}/coord/coordinator.go (78%) rename v2/{ => internal}/coord/coordinator_test.go (90%) rename v2/{ => internal}/coord/coretypes.go (95%) rename v2/{ => internal}/coord/event.go (70%) rename v2/{ => internal}/coord/event_test.go (84%) rename v2/{ => internal}/coord/internal/nettest/layouts.go (100%) rename v2/{ => internal}/coord/internal/nettest/routing.go (100%) rename v2/{ => internal}/coord/internal/nettest/topology.go (97%) rename v2/{ => internal}/coord/internal/tiny/node.go (92%) rename v2/{ => internal}/coord/internal/tiny/node_test.go (100%) rename v2/{ => internal}/coord/network.go (87%) rename v2/{ => internal}/coord/network_test.go (92%) rename v2/{ => internal}/coord/query.go (67%) rename v2/{ => internal}/coord/query/iter.go (100%) rename v2/{ => internal}/coord/query/iter_test.go (97%) rename v2/{ => internal}/coord/query/node.go (100%) rename v2/{ => internal}/coord/query/pool.go (64%) rename v2/{ => internal}/coord/query/pool_test.go (75%) rename v2/{ => internal}/coord/query/query.go (65%) rename v2/{ => internal}/coord/query/query_test.go (64%) rename v2/{ => internal}/coord/routing.go (99%) rename v2/{ => internal}/coord/routing/bootstrap.go (93%) rename v2/{ => internal}/coord/routing/bootstrap_test.go (98%) rename v2/{ => internal}/coord/routing/include.go (100%) rename v2/{ => internal}/coord/routing/include_test.go (99%) rename v2/{ => internal}/coord/routing/probe.go (100%) rename v2/{ => internal}/coord/routing/probe_test.go (99%) rename v2/{ => internal}/coord/routing_test.go (97%) rename v2/{ => internal}/coord/telemetry.go (100%) diff --git a/v2/config.go b/v2/config.go index ba41447c..696b8dfe 100644 --- a/v2/config.go +++ b/v2/config.go @@ -19,8 +19,7 @@ import ( "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -112,8 +111,8 @@ type Config struct { // between both automatically (see ModeOpt). Mode ModeOpt - // Kademlia holds the configuration of the underlying Kademlia implementation. - Kademlia *coord.CoordinatorConfig + // Query holds the configuration used for queries managed by the DHT. + Query *QueryConfig // BucketSize determines the number of closer peers to return BucketSize int @@ -132,7 +131,7 @@ type Config struct { // [triert.TrieRT] routing table will be used. This field will be nil // in the default configuration because a routing table requires information // about the local node. - RoutingTable routing.RoutingTableCpl[kadt.Key, kadt.PeerID] + RoutingTable kadt.RoutingTable // The Backends field holds a map of key namespaces to their corresponding // backend implementation. For example, if we received an IPNS record, the @@ -193,7 +192,6 @@ func DefaultConfig() *Config { return &Config{ Clock: clock.New(), Mode: ModeOptAutoClient, - Kademlia: coord.DefaultCoordinatorConfig(), BucketSize: 20, // MAGIC BootstrapPeers: DefaultBootstrapPeers(), ProtocolID: ProtocolIPFS, @@ -205,6 +203,7 @@ func DefaultConfig() *Config { AddressFilter: AddrFilterPrivate, MeterProvider: otel.GetMeterProvider(), TracerProvider: otel.GetTracerProvider(), + Query: DefaultQueryConfig(), } } @@ -242,62 +241,104 @@ func (c *Config) Validate() error { return fmt.Errorf("invalid mode option: %s", c.Mode) } - if c.Kademlia == nil { - return fmt.Errorf("kademlia configuration must not be nil") + if c.Query == nil { + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("query configuration must not be nil"), + } } - if err := c.Kademlia.Validate(); err != nil { - return fmt.Errorf("invalid kademlia configuration: %w", err) + if err := c.Query.Validate(); err != nil { + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("invalid query configuration: %w", err), + } } if c.BucketSize == 0 { - return fmt.Errorf("bucket size must not be 0") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("bucket size must not be 0"), + } } if len(c.BootstrapPeers) == 0 { - return fmt.Errorf("no bootstrap peer") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("no bootstrap peer"), + } } if c.ProtocolID == "" { - return fmt.Errorf("protocolID must not be empty") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("protocolID must not be empty"), + } } if c.Logger == nil { - return fmt.Errorf("logger must not be nil") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("logger must not be nil"), + } } if c.TimeoutStreamIdle <= 0 { - return fmt.Errorf("stream idle timeout must be a positive duration") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("stream idle timeout must be a positive duration"), + } } if c.ProtocolID == ProtocolIPFS && len(c.Backends) != 0 { if len(c.Backends) != 3 { - return fmt.Errorf("ipfs protocol requires exactly three backends") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("ipfs protocol requires exactly three backends"), + } } if _, found := c.Backends[namespaceIPNS]; !found { - return fmt.Errorf("ipfs protocol requires an IPNS backend") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("ipfs protocol requires an IPNS backend"), + } } if _, found := c.Backends[namespacePublicKey]; !found { - return fmt.Errorf("ipfs protocol requires a public key backend") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("ipfs protocol requires a public key backend"), + } } if _, found := c.Backends[namespaceProviders]; !found { - return fmt.Errorf("ipfs protocol requires a providers backend") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("ipfs protocol requires a providers backend"), + } } } if c.AddressFilter == nil { - return fmt.Errorf("address filter must not be nil - use AddrFilterIdentity to disable filtering") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("address filter must not be nil - use AddrFilterIdentity to disable filtering"), + } } if c.MeterProvider == nil { - return fmt.Errorf("opentelemetry meter provider must not be nil") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("opentelemetry meter provider must not be nil"), + } } if c.TracerProvider == nil { - return fmt.Errorf("opentelemetry tracer provider must not be nil") + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("opentelemetry tracer provider must not be nil"), + } } return nil @@ -322,3 +363,61 @@ func AddrFilterPrivate(maddrs []ma.Multiaddr) []ma.Multiaddr { func AddrFilterPublic(maddrs []ma.Multiaddr) []ma.Multiaddr { return ma.FilterAddrs(maddrs, func(maddr ma.Multiaddr) bool { return !manet.IsIPLoopback(maddr) }) } + +// QueryConfig contains the configuration options for queries managed by a [DHT]. +type QueryConfig struct { + // Concurrency defines the maximum number of in-flight queries that may be waiting for message responses at any one time. + Concurrency int + + // Timeout defines the time to wait before terminating a query that is not making progress + Timeout time.Duration + + // RequestConcurrency defines the maximum number of concurrent requests that each query may have in flight. + // The maximum number of concurrent requests is equal to [RequestConcurrency] multiplied by [Concurrency]. + RequestConcurrency int + + // RequestTimeout defines the time to wait before terminating a request to a node that has not responded. + RequestTimeout time.Duration +} + +// DefaultQueryConfig returns the default query configuration options for a DHT. +func DefaultQueryConfig() *QueryConfig { + return &QueryConfig{ + Concurrency: 3, // MAGIC + Timeout: 5 * time.Minute, // MAGIC + RequestConcurrency: 3, // MAGIC + RequestTimeout: time.Minute, // MAGIC + } +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *QueryConfig) Validate() error { + if cfg.Concurrency < 1 { + return &ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("concurrency must be greater than zero"), + } + } + if cfg.Timeout < 1 { + return &ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("timeout must be greater than zero"), + } + } + + if cfg.RequestConcurrency < 1 { + return &ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("request concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + + return nil +} diff --git a/v2/config_test.go b/v2/config_test.go index 6787fff9..739216ab 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -20,15 +20,15 @@ func TestConfig_Validate(t *testing.T) { assert.Error(t, cfg.Validate()) }) - t.Run("nil Kademlia configuration", func(t *testing.T) { + t.Run("nil Query configuration", func(t *testing.T) { cfg := DefaultConfig() - cfg.Kademlia = nil + cfg.Query = nil assert.Error(t, cfg.Validate()) }) - t.Run("invalid Kademlia configuration", func(t *testing.T) { + t.Run("invalid Query configuration", func(t *testing.T) { cfg := DefaultConfig() - cfg.Kademlia.Clock = nil + cfg.Query.Concurrency = -1 assert.Error(t, cfg.Validate()) }) @@ -114,3 +114,46 @@ func TestConfig_Validate(t *testing.T) { assert.Error(t, cfg.Validate()) }) } + +func TestQueryConfig_Validate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultQueryConfig() + assert.NoError(t, cfg.Validate()) + }) + + t.Run("concurrency positive", func(t *testing.T) { + cfg := DefaultQueryConfig() + + cfg.Concurrency = 0 + assert.Error(t, cfg.Validate()) + cfg.Concurrency = -1 + assert.Error(t, cfg.Validate()) + }) + + t.Run("timeout positive", func(t *testing.T) { + cfg := DefaultQueryConfig() + + cfg.Timeout = 0 + assert.Error(t, cfg.Validate()) + cfg.Timeout = -1 + assert.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultQueryConfig() + + cfg.RequestConcurrency = 0 + assert.Error(t, cfg.Validate()) + cfg.RequestConcurrency = -1 + assert.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultQueryConfig() + + cfg.RequestTimeout = 0 + assert.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + assert.Error(t, cfg.Validate()) + }) +} diff --git a/v2/dht.go b/v2/dht.go index 1fd857d7..0afeb408 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -16,8 +16,8 @@ import ( "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -107,12 +107,16 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - coordCfg := cfg.Kademlia + coordCfg := coord.DefaultCoordinatorConfig() + coordCfg.QueryConcurrency = cfg.Query.Concurrency + coordCfg.QueryTimeout = cfg.Query.Timeout + coordCfg.RequestConcurrency = cfg.Query.RequestConcurrency + coordCfg.RequestTimeout = cfg.Query.RequestTimeout coordCfg.Clock = cfg.Clock coordCfg.MeterProvider = cfg.MeterProvider coordCfg.TracerProvider = cfg.TracerProvider - d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &Router{host: h, ProtocolID: cfg.ProtocolID}, d.rt, coordCfg) + d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &router{host: h, ProtocolID: cfg.ProtocolID}, d.rt, coordCfg) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } diff --git a/v2/dht_test.go b/v2/dht_test.go index 2f45e82c..44d68bc4 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) diff --git a/v2/errors.go b/v2/errors.go new file mode 100644 index 00000000..55c6f84b --- /dev/null +++ b/v2/errors.go @@ -0,0 +1,22 @@ +package dht + +import "fmt" + +// A ConfigurationError is returned when a component's configuration is found to be invalid or unusable. +type ConfigurationError struct { + Component string + Err error +} + +var _ error = (*ConfigurationError)(nil) + +func (e *ConfigurationError) Error() string { + if e.Err == nil { + return fmt.Sprintf("configuration error: %s", e.Component) + } + return fmt.Sprintf("configuration error: %s: %s", e.Component, e.Err.Error()) +} + +func (e *ConfigurationError) Unwrap() error { + return e.Err +} diff --git a/v2/coord/behaviour.go b/v2/internal/coord/behaviour.go similarity index 100% rename from v2/coord/behaviour.go rename to v2/internal/coord/behaviour.go diff --git a/v2/coord/behaviour_test.go b/v2/internal/coord/behaviour_test.go similarity index 100% rename from v2/coord/behaviour_test.go rename to v2/internal/coord/behaviour_test.go diff --git a/v2/coord/coordinator.go b/v2/internal/coord/coordinator.go similarity index 78% rename from v2/coord/coordinator.go rename to v2/internal/coord/coordinator.go index 63bad31c..d3d619b2 100644 --- a/v2/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -6,6 +6,7 @@ import ( "fmt" "reflect" "sync" + "sync/atomic" "time" "github.com/benbjohnson/clock" @@ -19,8 +20,8 @@ import ( "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) @@ -58,6 +59,15 @@ type Coordinator struct { // tele provides tracing and metric reporting capabilities tele *Telemetry + + // routingNotifierMu guards access to routingNotifier which may be changed during coordinator operation + routingNotifierMu sync.RWMutex + + // routingNotifier receives routing notifications + routingNotifier RoutingNotifier + + // lastQueryID holds the last numeric query id generated + lastQueryID atomic.Uint64 } type RoutingNotifier interface { @@ -65,8 +75,6 @@ type RoutingNotifier interface { } type CoordinatorConfig struct { - PeerstoreTTL time.Duration // duration for which a peer is kept in the peerstore - Clock clock.Clock // a clock that may replaced by a mock when testing QueryConcurrency int // the maximum number of queries that may be waiting for message responses at any one time @@ -79,8 +87,6 @@ type CoordinatorConfig struct { MeterProvider metric.MeterProvider // the meter provider to use when initialising metric instruments TracerProvider trace.TracerProvider // the tracer provider to use when initialising tracing - - RoutingNotifier RoutingNotifier // receives notifications of routing events } // Validate checks the configuration options and returns an error if any have invalid values. @@ -140,20 +146,12 @@ func (cfg *CoordinatorConfig) Validate() error { } } - if cfg.RoutingNotifier == nil { - return &kaderr.ConfigurationError{ - Component: "CoordinatorConfig", - Err: fmt.Errorf("routing notifier must not be nil"), - } - } - return nil } func DefaultCoordinatorConfig() *CoordinatorConfig { return &CoordinatorConfig{ Clock: clock.New(), - PeerstoreTTL: 10 * time.Minute, QueryConcurrency: 3, QueryTimeout: 5 * time.Minute, RequestConcurrency: 3, @@ -161,7 +159,6 @@ func DefaultCoordinatorConfig() *CoordinatorConfig { Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), MeterProvider: otel.GetMeterProvider(), TracerProvider: otel.GetTracerProvider(), - RoutingNotifier: nullRoutingNotifier{}, } } @@ -185,7 +182,7 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess qpCfg.QueryConcurrency = cfg.RequestConcurrency qpCfg.RequestTimeout = cfg.RequestTimeout - qp, err := query.NewPool[kadt.Key](self, qpCfg) + qp, err := query.NewPool[kadt.Key, kadt.PeerID, *pb.Message](self, qpCfg) if err != nil { return nil, fmt.Errorf("query pool: %w", err) } @@ -197,7 +194,7 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency bootstrapCfg.RequestTimeout = cfg.RequestTimeout - bootstrap, err := routing.NewBootstrap[kadt.Key](kadt.PeerID(self), bootstrapCfg) + bootstrap, err := routing.NewBootstrap(kadt.PeerID(self), bootstrapCfg) if err != nil { return nil, fmt.Errorf("bootstrap: %w", err) } @@ -245,6 +242,7 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess networkBehaviour: networkBehaviour, routingBehaviour: routingBehaviour, queryBehaviour: queryBehaviour, + routingNotifier: nullRoutingNotifier{}, } go d.eventLoop(ctx) @@ -301,12 +299,21 @@ func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { case RoutingCommand: c.routingBehaviour.Notify(ctx, ev) case RoutingNotification: - c.cfg.RoutingNotifier.Notify(ctx, ev) + c.routingNotifierMu.RLock() + rn := c.routingNotifier + c.routingNotifierMu.RUnlock() + rn.Notify(ctx, ev) default: panic(fmt.Sprintf("unexpected event: %T", ev)) } } +func (c *Coordinator) SetRoutingNotifier(rn RoutingNotifier) { + c.routingNotifierMu.Lock() + c.routingNotifier = rn + c.routingNotifierMu.Unlock() +} + // GetNode retrieves the node associated with the given node id from the DHT's local routing table. // If the node isn't found in the table, it returns ErrNodeNotFound. func (c *Coordinator) GetNode(ctx context.Context, id kadt.PeerID) (Node, error) { @@ -351,8 +358,18 @@ func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } -// Query traverses the DHT calling fn for each node visited. -func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) (QueryStats, error) { +// QueryClosest starts a query that attempts to find the closest nodes to the target key. +// It returns the closest nodes found to the target key and statistics on the actions of the query. +// +// The supplied [QueryFunc] is called after each successful request to a node with the ID of the node, +// the response received from the find nodes request made to the node and the current query stats. The query +// terminates when [QueryFunc] returns an error or when the query has visited the configured minimum number +// of closest nodes (default 20) +// +// numResults specifies the minimum number of nodes to successfully contact before considering iteration complete. +// The query is considered to be exhausted when it has received responses from at least this number of nodes +// and there are no closer nodes remaining to be contacted. A default of 20 is used if this value is less than 1. +func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn QueryFunc, numResults int) ([]kadt.PeerID, QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Query") defer span.End() @@ -361,7 +378,7 @@ func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) seeds, err := c.GetClosestNodes(ctx, target, 20) if err != nil { - return QueryStats{}, err + return nil, QueryStats{}, err } seedIDs := make([]kadt.PeerID, 0, len(seeds)) @@ -370,23 +387,79 @@ func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) } waiter := NewWaiter[BehaviourEvent]() - queryID := query.QueryID("foo") // TODO: choose query ID + queryID := c.newQueryID() - cmd := &EventStartQuery{ + cmd := &EventStartFindCloserQuery{ QueryID: queryID, Target: target, KnownClosestNodes: seedIDs, Notify: waiter, + NumResults: numResults, } // queue the start of the query c.queryBehaviour.Notify(ctx, cmd) + return c.waitForQuery(ctx, queryID, waiter, fn) +} + +// QueryMessage starts a query that iterates over the closest nodes to the target key in the supplied message. +// The message is sent to each node that is visited. +// +// The supplied [QueryFunc] is called after each successful request to a node with the ID of the node, +// the response received from the find nodes request made to the node and the current query stats. The query +// terminates when [QueryFunc] returns an error or when the query has visited the configured minimum number +// of closest nodes (default 20) +// +// numResults specifies the minimum number of nodes to successfully contact before considering iteration complete. +// The query is considered to be exhausted when it has received responses from at least this number of nodes +// and there are no closer nodes remaining to be contacted. A default of 20 is used if this value is less than 1. +func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn QueryFunc, numResults int) (QueryStats, error) { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.QueryMessage") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if numResults < 1 { + numResults = 20 + } + + seeds, err := c.GetClosestNodes(ctx, msg.Target(), numResults) + if err != nil { + return QueryStats{}, err + } + + seedIDs := make([]kadt.PeerID, 0, len(seeds)) + for _, s := range seeds { + seedIDs = append(seedIDs, kadt.PeerID(s.ID())) + } + + waiter := NewWaiter[BehaviourEvent]() + queryID := c.newQueryID() + + cmd := &EventStartMessageQuery{ + QueryID: queryID, + Target: msg.Target(), + Message: msg, + KnownClosestNodes: seedIDs, + Notify: waiter, + NumResults: numResults, + } + + // queue the start of the query + c.queryBehaviour.Notify(ctx, cmd) + + _, stats, err := c.waitForQuery(ctx, queryID, waiter, fn) + return stats, err +} + +func (c *Coordinator) waitForQuery(ctx context.Context, queryID query.QueryID, waiter *Waiter[BehaviourEvent], fn QueryFunc) ([]kadt.PeerID, QueryStats, error) { var lastStats QueryStats for { select { case <-ctx.Done(): - return lastStats, ctx.Err() + return nil, lastStats, ctx.Err() case wev := <-waiter.Chan(): ctx, ev := wev.Ctx, wev.Event switch ev := ev.(type) { @@ -403,26 +476,22 @@ func (c *Coordinator) Query(ctx context.Context, target kadt.Key, fn QueryFunc) break } - err = fn(ctx, nh, lastStats) + err = fn(ctx, nh.ID(), ev.Response, lastStats) if errors.Is(err, ErrSkipRemaining) { // done c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) - return lastStats, nil - } - if errors.Is(err, ErrSkipNode) { - // TODO: don't add closer nodes from this node - break + return nil, lastStats, nil } if err != nil { // user defined error that terminates the query c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) - return lastStats, err + return nil, lastStats, err } case *EventQueryFinished: // query is done lastStats.Exhausted = true - return lastStats, nil + return ev.ClosestNodes, lastStats, nil default: panic(fmt.Sprintf("unexpected event: %T", ev)) @@ -490,6 +559,11 @@ func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) return nil } +func (c *Coordinator) newQueryID() query.QueryID { + next := c.lastQueryID.Add(1) + return query.QueryID(fmt.Sprintf("%016x", next)) +} + // A BufferedRoutingNotifier is a [RoutingNotifier] that buffers [RoutingNotification] events and provides methods // to expect occurrences of specific events. It is designed for use in a test environment. type BufferedRoutingNotifier struct { diff --git a/v2/coord/coordinator_test.go b/v2/internal/coord/coordinator_test.go similarity index 90% rename from v2/coord/coordinator_test.go rename to v2/internal/coord/coordinator_test.go index ba32444e..c267b4a0 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/internal/coord/coordinator_test.go @@ -4,18 +4,16 @@ import ( "context" "log" "testing" - "time" "github.com/benbjohnson/clock" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) -const peerstoreTTL = 10 * time.Minute - func TestConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { cfg := DefaultCoordinatorConfig() @@ -53,7 +51,7 @@ func TestConfigValidate(t *testing.T) { cfg.RequestConcurrency = 0 require.Error(t, cfg.Validate()) - cfg.QueryConcurrency = -1 + cfg.RequestConcurrency = -1 require.Error(t, cfg.Validate()) }) @@ -84,12 +82,6 @@ func TestConfigValidate(t *testing.T) { cfg.TracerProvider = nil require.Error(t, cfg.Validate()) }) - - t.Run("routing notifier not nil", func(t *testing.T) { - cfg := DefaultCoordinatorConfig() - cfg.RoutingNotifier = nil - require.Error(t, cfg.Validate()) - }) } func TestExhaustiveQuery(t *testing.T) { @@ -101,7 +93,6 @@ func TestExhaustiveQuery(t *testing.T) { ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk - ccfg.PeerstoreTTL = peerstoreTTL // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) @@ -115,13 +106,13 @@ func TestExhaustiveQuery(t *testing.T) { visited := make(map[string]int) // Record the nodes as they are visited - qfn := func(ctx context.Context, node Node, stats QueryStats) error { - visited[node.ID().String()]++ + qfn := func(ctx context.Context, id kadt.PeerID, msg *pb.Message, stats QueryStats) error { + visited[id.String()]++ return nil } // Run a query to find the value - _, err = c.Query(ctx, target, qfn) + _, _, err = c.QueryClosest(ctx, target, qfn, 20) require.NoError(t, err) require.Equal(t, 3, len(visited)) @@ -140,10 +131,6 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk - ccfg.PeerstoreTTL = peerstoreTTL - - rn := NewBufferedRoutingNotifier() - ccfg.RoutingNotifier = rn // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) @@ -154,13 +141,16 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { log.Fatalf("unexpected error creating coordinator: %v", err) } - qfn := func(ctx context.Context, node Node, stats QueryStats) error { + rn := NewBufferedRoutingNotifier() + c.SetRoutingNotifier(rn) + + qfn := func(ctx context.Context, id kadt.PeerID, msg *pb.Message, stats QueryStats) error { return nil } // Run a query to find the value target := nodes[3].NodeID.Key() - _, err = c.Query(ctx, target, qfn) + _, _, err = c.QueryClosest(ctx, target, qfn, 20) require.NoError(t, err) // the query run by the dht should have received a response from nodes[1] with closer nodes @@ -201,15 +191,14 @@ func TestBootstrap(t *testing.T) { ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk - ccfg.PeerstoreTTL = peerstoreTTL - - rn := NewBufferedRoutingNotifier() - ccfg.RoutingNotifier = rn self := kadt.PeerID(nodes[0].NodeID) d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) + rn := NewBufferedRoutingNotifier() + d.SetRoutingNotifier(rn) + seeds := []kadt.PeerID{nodes[1].NodeID} err = d.Bootstrap(ctx, seeds) require.NoError(t, err) @@ -253,10 +242,6 @@ func TestIncludeNode(t *testing.T) { ccfg := DefaultCoordinatorConfig() ccfg.Clock = clk - ccfg.PeerstoreTTL = peerstoreTTL - - rn := NewBufferedRoutingNotifier() - ccfg.RoutingNotifier = rn candidate := nodes[len(nodes)-1].NodeID // not in nodes[0] routing table @@ -265,6 +250,8 @@ func TestIncludeNode(t *testing.T) { if err != nil { log.Fatalf("unexpected error creating dht: %v", err) } + rn := NewBufferedRoutingNotifier() + d.SetRoutingNotifier(rn) // the routing table should not contain the node yet _, err = d.GetNode(ctx, candidate) diff --git a/v2/coord/coretypes.go b/v2/internal/coord/coretypes.go similarity index 95% rename from v2/coord/coretypes.go rename to v2/internal/coord/coretypes.go index 0f72cebf..12c9ba26 100644 --- a/v2/coord/coretypes.go +++ b/v2/internal/coord/coretypes.go @@ -8,6 +8,7 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) // Value is a value that may be stored in the DHT. @@ -49,7 +50,7 @@ var ( // Query stops entirely and returns that error. // // The stats argument contains statistics on the progress of the query so far. -type QueryFunc func(ctx context.Context, node Node, stats QueryStats) error +type QueryFunc func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats QueryStats) error type QueryStats struct { Start time.Time // Start is the time the query began executing. diff --git a/v2/coord/event.go b/v2/internal/coord/event.go similarity index 70% rename from v2/coord/event.go rename to v2/internal/coord/event.go index 663cfee9..a0037732 100644 --- a/v2/coord/event.go +++ b/v2/internal/coord/event.go @@ -1,8 +1,9 @@ package coord import ( - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) type BehaviourEvent interface { @@ -60,15 +61,39 @@ func (*EventOutboundGetCloserNodes) behaviourEvent() {} func (*EventOutboundGetCloserNodes) nodeHandlerRequest() {} func (*EventOutboundGetCloserNodes) networkCommand() {} -type EventStartQuery struct { +type EventOutboundSendMessage struct { + QueryID query.QueryID + To kadt.PeerID + Message *pb.Message + Notify Notify[BehaviourEvent] +} + +func (*EventOutboundSendMessage) behaviourEvent() {} +func (*EventOutboundSendMessage) nodeHandlerRequest() {} +func (*EventOutboundSendMessage) networkCommand() {} + +type EventStartMessageQuery struct { + QueryID query.QueryID + Target kadt.Key + Message *pb.Message + KnownClosestNodes []kadt.PeerID + Notify NotifyCloser[BehaviourEvent] + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete +} + +func (*EventStartMessageQuery) behaviourEvent() {} +func (*EventStartMessageQuery) queryCommand() {} + +type EventStartFindCloserQuery struct { QueryID query.QueryID Target kadt.Key KnownClosestNodes []kadt.PeerID Notify NotifyCloser[BehaviourEvent] + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete } -func (*EventStartQuery) behaviourEvent() {} -func (*EventStartQuery) queryCommand() {} +func (*EventStartFindCloserQuery) behaviourEvent() {} +func (*EventStartFindCloserQuery) queryCommand() {} type EventStopQuery struct { QueryID query.QueryID @@ -109,12 +134,36 @@ type EventGetCloserNodesFailure struct { func (*EventGetCloserNodesFailure) behaviourEvent() {} func (*EventGetCloserNodesFailure) nodeHandlerResponse() {} +// EventSendMessageSuccess notifies a behaviour that a SendMessage request, initiated by an +// [EventOutboundSendMessage] event has produced a successful response. +type EventSendMessageSuccess struct { + QueryID query.QueryID + To kadt.PeerID // To is the peer that the SendMessage request was sent to. + Response *pb.Message + CloserNodes []kadt.PeerID +} + +func (*EventSendMessageSuccess) behaviourEvent() {} +func (*EventSendMessageSuccess) nodeHandlerResponse() {} + +// EventSendMessageFailure notifies a behaviour that a SendMessage request, initiated by an +// [EventOutboundSendMessage] event has failed to produce a valid response. +type EventSendMessageFailure struct { + QueryID query.QueryID + To kadt.PeerID // To is the peer that the SendMessage request was sent to. + Target kadt.Key + Err error +} + +func (*EventSendMessageFailure) behaviourEvent() {} +func (*EventSendMessageFailure) nodeHandlerResponse() {} + // EventQueryProgressed is emitted by the coordinator when a query has received a // response from a node. type EventQueryProgressed struct { QueryID query.QueryID NodeID kadt.PeerID - Response Message + Response *pb.Message Stats query.QueryStats } @@ -123,8 +172,9 @@ func (*EventQueryProgressed) behaviourEvent() {} // EventQueryFinished is emitted by the coordinator when a query has finished, either through // running to completion or by being canceled. type EventQueryFinished struct { - QueryID query.QueryID - Stats query.QueryStats + QueryID query.QueryID + Stats query.QueryStats + ClosestNodes []kadt.PeerID } func (*EventQueryFinished) behaviourEvent() {} diff --git a/v2/coord/event_test.go b/v2/internal/coord/event_test.go similarity index 84% rename from v2/coord/event_test.go rename to v2/internal/coord/event_test.go index 2944be13..99abc2fc 100644 --- a/v2/coord/event_test.go +++ b/v2/internal/coord/event_test.go @@ -8,7 +8,8 @@ var ( ) var ( - _ QueryCommand = (*EventStartQuery)(nil) + _ QueryCommand = (*EventStartMessageQuery)(nil) + _ QueryCommand = (*EventStartFindCloserQuery)(nil) _ QueryCommand = (*EventStopQuery)(nil) ) diff --git a/v2/coord/internal/nettest/layouts.go b/v2/internal/coord/internal/nettest/layouts.go similarity index 100% rename from v2/coord/internal/nettest/layouts.go rename to v2/internal/coord/internal/nettest/layouts.go diff --git a/v2/coord/internal/nettest/routing.go b/v2/internal/coord/internal/nettest/routing.go similarity index 100% rename from v2/coord/internal/nettest/routing.go rename to v2/internal/coord/internal/nettest/routing.go diff --git a/v2/coord/internal/nettest/topology.go b/v2/internal/coord/internal/nettest/topology.go similarity index 97% rename from v2/coord/internal/nettest/topology.go rename to v2/internal/coord/internal/nettest/topology.go index 96d6380a..dda13ade 100644 --- a/v2/coord/internal/nettest/topology.go +++ b/v2/internal/coord/internal/nettest/topology.go @@ -8,7 +8,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/plprobelab/go-kademlia/network/address" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) diff --git a/v2/coord/internal/tiny/node.go b/v2/internal/coord/internal/tiny/node.go similarity index 92% rename from v2/coord/internal/tiny/node.go rename to v2/internal/coord/internal/tiny/node.go index 72c67887..2ad224cc 100644 --- a/v2/coord/internal/tiny/node.go +++ b/v2/internal/coord/internal/tiny/node.go @@ -12,6 +12,10 @@ type Node struct { key Key } +type Message struct { + Content string +} + var _ kad.NodeID[Key] = Node{} func NewNode(k Key) Node { diff --git a/v2/coord/internal/tiny/node_test.go b/v2/internal/coord/internal/tiny/node_test.go similarity index 100% rename from v2/coord/internal/tiny/node_test.go rename to v2/internal/coord/internal/tiny/node_test.go diff --git a/v2/coord/network.go b/v2/internal/coord/network.go similarity index 87% rename from v2/coord/network.go rename to v2/internal/coord/network.go index 72369b6f..d4087564 100644 --- a/v2/coord/network.go +++ b/v2/internal/coord/network.go @@ -9,7 +9,7 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) @@ -59,6 +59,16 @@ func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { } b.nodeHandlersMu.Unlock() nh.Notify(ctx, ev) + case *EventOutboundSendMessage: + b.nodeHandlersMu.Lock() + p := kadt.PeerID(ev.To) + nh, ok := b.nodeHandlers[p] + if !ok { + nh = NewNodeHandler(p, b.rtr, b.logger, b.tracer) + b.nodeHandlers[p] = nh + } + b.nodeHandlersMu.Unlock() + nh.Notify(ctx, ev) default: panic(fmt.Sprintf("unexpected dht event: %T", ev)) } @@ -160,6 +170,26 @@ func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { Target: cmd.Target, CloserNodes: nodes, }) + case *EventOutboundSendMessage: + if cmd.Notify == nil { + break + } + resp, err := h.rtr.SendMessage(ctx, h.self, cmd.Message) + if err != nil { + cmd.Notify.Notify(ctx, &EventSendMessageFailure{ + QueryID: cmd.QueryID, + To: h.self, + Err: fmt.Errorf("NodeHandler: %w", err), + }) + return false + } + + cmd.Notify.Notify(ctx, &EventSendMessageSuccess{ + QueryID: cmd.QueryID, + To: h.self, + Response: resp, + CloserNodes: resp.CloserNodes(), + }) default: panic(fmt.Sprintf("unexpected command type: %T", cmd)) } diff --git a/v2/coord/network_test.go b/v2/internal/coord/network_test.go similarity index 92% rename from v2/coord/network_test.go rename to v2/internal/coord/network_test.go index 6baacf53..924565b8 100644 --- a/v2/coord/network_test.go +++ b/v2/internal/coord/network_test.go @@ -8,7 +8,7 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" ) diff --git a/v2/coord/query.go b/v2/internal/coord/query.go similarity index 67% rename from v2/coord/query.go rename to v2/internal/coord/query.go index b8ebb982..91cbab09 100644 --- a/v2/coord/query.go +++ b/v2/internal/coord/query.go @@ -6,14 +6,15 @@ import ( "sync" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" ) type PooledQueryBehaviour struct { - pool *query.Pool[kadt.Key, kadt.PeerID] + pool *query.Pool[kadt.Key, kadt.PeerID, *pb.Message] waiters map[query.QueryID]NotifyCloser[BehaviourEvent] pendingMu sync.Mutex @@ -24,7 +25,7 @@ type PooledQueryBehaviour struct { tracer trace.Tracer } -func NewPooledQueryBehaviour(pool *query.Pool[kadt.Key, kadt.PeerID], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { +func NewPooledQueryBehaviour(pool *query.Pool[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { h := &PooledQueryBehaviour{ pool: pool, waiters: make(map[query.QueryID]NotifyCloser[BehaviourEvent]), @@ -44,8 +45,8 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { var cmd query.PoolEvent switch ev := ev.(type) { - case *EventStartQuery: - cmd = &query.EventPoolAddQuery[kadt.Key, kadt.PeerID]{ + case *EventStartFindCloserQuery: + cmd = &query.EventPoolAddFindCloserQuery[kadt.Key, kadt.PeerID]{ QueryID: ev.QueryID, Target: ev.Target, KnownClosestNodes: ev.KnownClosestNodes, @@ -53,6 +54,16 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { if ev.Notify != nil { p.waiters[ev.QueryID] = ev.Notify } + case *EventStartMessageQuery: + cmd = &query.EventPoolAddQuery[kadt.Key, kadt.PeerID, *pb.Message]{ + QueryID: ev.QueryID, + Target: ev.Target, + Message: ev.Message, + KnownClosestNodes: ev.KnownClosestNodes, + } + if ev.Notify != nil { + p.waiters[ev.QueryID] = ev.Notify + } case *EventStopQuery: cmd = &query.EventPoolStopQuery{ @@ -60,8 +71,6 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { } case *EventGetCloserNodesSuccess: - // TODO: add addresses for discovered nodes in DHT - for _, info := range ev.CloserNodes { // TODO: do this after advancing pool p.pending = append(p.pending, &EventAddNode{ @@ -77,7 +86,7 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { // Stats: stats, }) } - cmd = &query.EventPoolFindCloserResponse[kadt.Key, kadt.PeerID]{ + cmd = &query.EventPoolNodeResponse[kadt.Key, kadt.PeerID]{ NodeID: ev.To, QueryID: ev.QueryID, CloserNodes: ev.CloserNodes, @@ -88,7 +97,38 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { ev.To, }) - cmd = &query.EventPoolFindCloserFailure[kadt.Key, kadt.PeerID]{ + cmd = &query.EventPoolNodeFailure[kadt.Key, kadt.PeerID]{ + NodeID: ev.To, + QueryID: ev.QueryID, + Error: ev.Err, + } + case *EventSendMessageSuccess: + for _, info := range ev.CloserNodes { + // TODO: do this after advancing pool + p.pending = append(p.pending, &EventAddNode{ + NodeID: info, + }) + } + waiter, ok := p.waiters[ev.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryProgressed{ + NodeID: ev.To, + QueryID: ev.QueryID, + Response: ev.Response, + }) + } + cmd = &query.EventPoolNodeResponse[kadt.Key, kadt.PeerID]{ + NodeID: ev.To, + QueryID: ev.QueryID, + CloserNodes: ev.CloserNodes, + } + case *EventSendMessageFailure: + // queue an event that will notify the routing behaviour of a failed node + p.pending = append(p.pending, &EventNotifyNonConnectivity{ + ev.To, + }) + + cmd = &query.EventPoolNodeFailure[kadt.Key, kadt.PeerID]{ NodeID: ev.To, QueryID: ev.QueryID, Error: ev.Err, @@ -162,16 +202,24 @@ func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEve Target: st.Target, Notify: p, }, true + case *query.StatePoolSendMessage[kadt.Key, kadt.PeerID, *pb.Message]: + return &EventOutboundSendMessage{ + QueryID: st.QueryID, + To: st.NodeID, + Message: st.Message, + Notify: p, + }, true case *query.StatePoolWaitingAtCapacity: // nothing to do except wait for message response or timeout case *query.StatePoolWaitingWithCapacity: // nothing to do except wait for message response or timeout - case *query.StatePoolQueryFinished: + case *query.StatePoolQueryFinished[kadt.Key, kadt.PeerID]: waiter, ok := p.waiters[st.QueryID] if ok { waiter.Notify(ctx, &EventQueryFinished{ - QueryID: st.QueryID, - Stats: st.Stats, + QueryID: st.QueryID, + Stats: st.Stats, + ClosestNodes: st.ClosestNodes, }) waiter.Close() } diff --git a/v2/coord/query/iter.go b/v2/internal/coord/query/iter.go similarity index 100% rename from v2/coord/query/iter.go rename to v2/internal/coord/query/iter.go diff --git a/v2/coord/query/iter_test.go b/v2/internal/coord/query/iter_test.go similarity index 97% rename from v2/coord/query/iter_test.go rename to v2/internal/coord/query/iter_test.go index d5d02de9..cb987349 100644 --- a/v2/coord/query/iter_test.go +++ b/v2/internal/coord/query/iter_test.go @@ -7,7 +7,7 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) var ( diff --git a/v2/coord/query/node.go b/v2/internal/coord/query/node.go similarity index 100% rename from v2/coord/query/node.go rename to v2/internal/coord/query/node.go diff --git a/v2/coord/query/pool.go b/v2/internal/coord/query/pool.go similarity index 64% rename from v2/coord/query/pool.go rename to v2/internal/coord/query/pool.go index 2fffc706..a94566cb 100644 --- a/v2/coord/query/pool.go +++ b/v2/internal/coord/query/pool.go @@ -12,11 +12,13 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) -type Pool[K kad.Key[K], N kad.NodeID[K]] struct { +type Message interface{} + +type Pool[K kad.Key[K], N kad.NodeID[K], M Message] struct { // self is the node id of the system the pool is running on self N - queries []*Query[K, N] - queryIndex map[QueryID]*Query[K, N] + queries []*Query[K, N, M] + queryIndex map[QueryID]*Query[K, N, M] // cfg is a copy of the optional configuration supplied to the pool cfg PoolConfig @@ -92,23 +94,23 @@ func DefaultPoolConfig() *PoolConfig { } } -func NewPool[K kad.Key[K], N kad.NodeID[K]](self N, cfg *PoolConfig) (*Pool[K, N], error) { +func NewPool[K kad.Key[K], N kad.NodeID[K], M Message](self N, cfg *PoolConfig) (*Pool[K, N, M], error) { if cfg == nil { cfg = DefaultPoolConfig() } else if err := cfg.Validate(); err != nil { return nil, err } - return &Pool[K, N]{ + return &Pool[K, N, M]{ self: self, cfg: *cfg, - queries: make([]*Query[K, N], 0), - queryIndex: make(map[QueryID]*Query[K, N]), + queries: make([]*Query[K, N, M], 0), + queryIndex: make(map[QueryID]*Query[K, N, M]), }, nil } // Advance advances the state of the pool by attempting to advance one of its queries -func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { +func (p *Pool[K, N, M]) Advance(ctx context.Context, ev PoolEvent) PoolState { ctx, span := tele.StartSpan(ctx, "Pool.Advance") defer span.End() @@ -120,8 +122,10 @@ func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { eventQueryID := InvalidQueryID switch tev := ev.(type) { - case *EventPoolAddQuery[K, N]: - p.addQuery(ctx, tev.QueryID, tev.Target, tev.KnownClosestNodes) + case *EventPoolAddFindCloserQuery[K, N]: + p.addFindCloserQuery(ctx, tev.QueryID, tev.Target, tev.KnownClosestNodes, tev.NumResults) + case *EventPoolAddQuery[K, N, M]: + p.addQuery(ctx, tev.QueryID, tev.Target, tev.Message, tev.KnownClosestNodes, tev.NumResults) // TODO: return error as state case *EventPoolStopQuery: if qry, ok := p.queryIndex[tev.QueryID]; ok { @@ -131,9 +135,9 @@ func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { } eventQueryID = qry.id } - case *EventPoolFindCloserResponse[K, N]: + case *EventPoolNodeResponse[K, N]: if qry, ok := p.queryIndex[tev.QueryID]; ok { - state, terminal := p.advanceQuery(ctx, qry, &EventQueryFindCloserResponse[K, N]{ + state, terminal := p.advanceQuery(ctx, qry, &EventQueryNodeResponse[K, N]{ NodeID: tev.NodeID, CloserNodes: tev.CloserNodes, }) @@ -142,9 +146,9 @@ func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { } eventQueryID = qry.id } - case *EventPoolFindCloserFailure[K, N]: + case *EventPoolNodeFailure[K, N]: if qry, ok := p.queryIndex[tev.QueryID]; ok { - state, terminal := p.advanceQuery(ctx, qry, &EventQueryFindCloserFailure[K, N]{ + state, terminal := p.advanceQuery(ctx, qry, &EventQueryNodeFailure[K, N]{ NodeID: tev.NodeID, Error: tev.Error, }) @@ -170,7 +174,7 @@ func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { continue } - state, terminal := p.advanceQuery(ctx, qry, nil) + state, terminal := p.advanceQuery(ctx, qry, &EventQueryPoll{}) if terminal { return state } @@ -188,7 +192,7 @@ func (p *Pool[K, N]) Advance(ctx context.Context, ev PoolEvent) PoolState { return &StatePoolIdle{} } -func (p *Pool[K, N]) advanceQuery(ctx context.Context, qry *Query[K, N], qev QueryEvent) (PoolState, bool) { +func (p *Pool[K, N, M]) advanceQuery(ctx context.Context, qry *Query[K, N, M], qev QueryEvent) (PoolState, bool) { state := qry.Advance(ctx, qev) switch st := state.(type) { case *StateQueryFindCloser[K, N]: @@ -199,11 +203,20 @@ func (p *Pool[K, N]) advanceQuery(ctx context.Context, qry *Query[K, N], qev Que NodeID: st.NodeID, Target: st.Target, }, true - case *StateQueryFinished: - p.removeQuery(qry.id) - return &StatePoolQueryFinished{ + case *StateQuerySendMessage[K, N, M]: + p.queriesInFlight++ + return &StatePoolSendMessage[K, N, M]{ QueryID: st.QueryID, Stats: st.Stats, + NodeID: st.NodeID, + Message: st.Message, + }, true + case *StateQueryFinished[K, N]: + p.removeQuery(qry.id) + return &StatePoolQueryFinished[K, N]{ + QueryID: st.QueryID, + Stats: st.Stats, + ClosestNodes: st.ClosestNodes, }, true case *StateQueryWaitingAtCapacity: elapsed := p.cfg.Clock.Since(qry.stats.Start) @@ -229,7 +242,7 @@ func (p *Pool[K, N]) advanceQuery(ctx context.Context, qry *Query[K, N], qev Que return nil, false } -func (p *Pool[K, N]) removeQuery(queryID QueryID) { +func (p *Pool[K, N, M]) removeQuery(queryID QueryID) { for i := range p.queries { if p.queries[i].id != queryID { continue @@ -245,18 +258,49 @@ func (p *Pool[K, N]) removeQuery(queryID QueryID) { // addQuery adds a query to the pool, returning the new query id // TODO: remove target argument and use msg.Target -func (p *Pool[K, N]) addQuery(ctx context.Context, queryID QueryID, target K, knownClosestNodes []N) error { +func (p *Pool[K, N, M]) addQuery(ctx context.Context, queryID QueryID, target K, msg M, knownClosestNodes []N, numResults int) error { if _, exists := p.queryIndex[queryID]; exists { return fmt.Errorf("query id already in use") } iter := NewClosestNodesIter[K, N](target) - qryCfg := DefaultQueryConfig[K]() + qryCfg := DefaultQueryConfig() qryCfg.Clock = p.cfg.Clock qryCfg.Concurrency = p.cfg.QueryConcurrency qryCfg.RequestTimeout = p.cfg.RequestTimeout - qry, err := NewQuery[K, N](p.self, queryID, target, iter, knownClosestNodes, qryCfg) + if numResults > 0 { + qryCfg.NumResults = numResults + } + + qry, err := NewQuery[K, N, M](p.self, queryID, target, msg, iter, knownClosestNodes, qryCfg) + if err != nil { + return fmt.Errorf("new query: %w", err) + } + + p.queries = append(p.queries, qry) + p.queryIndex[queryID] = qry + + return nil +} + +// addQuery adds a find closer query to the pool, returning the new query id +func (p *Pool[K, N, M]) addFindCloserQuery(ctx context.Context, queryID QueryID, target K, knownClosestNodes []N, numResults int) error { + if _, exists := p.queryIndex[queryID]; exists { + return fmt.Errorf("query id already in use") + } + iter := NewClosestNodesIter[K, N](target) + + qryCfg := DefaultQueryConfig() + qryCfg.Clock = p.cfg.Clock + qryCfg.Concurrency = p.cfg.QueryConcurrency + qryCfg.RequestTimeout = p.cfg.RequestTimeout + + if numResults > 0 { + qryCfg.NumResults = numResults + } + + qry, err := NewFindCloserQuery[K, N, M](p.self, queryID, target, iter, knownClosestNodes, qryCfg) if err != nil { return fmt.Errorf("new query: %w", err) } @@ -284,6 +328,14 @@ type StatePoolFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { Stats QueryStats } +// StatePoolSendMessage indicates that a pool query wants to send a message to a node. +type StatePoolSendMessage[K kad.Key[K], N kad.NodeID[K], M Message] struct { + QueryID QueryID + NodeID N // the node to send the message to + Message M + Stats QueryStats +} + // StatePoolWaitingAtCapacity indicates that at least one query is waiting for results and the pool has reached // its maximum number of concurrent queries. type StatePoolWaitingAtCapacity struct{} @@ -293,9 +345,10 @@ type StatePoolWaitingAtCapacity struct{} type StatePoolWaitingWithCapacity struct{} // StatePoolQueryFinished indicates that a query has finished. -type StatePoolQueryFinished struct { - QueryID QueryID - Stats QueryStats +type StatePoolQueryFinished[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID + Stats QueryStats + ClosestNodes []N } // StatePoolQueryTimeout indicates that a query has timed out. @@ -305,23 +358,34 @@ type StatePoolQueryTimeout struct { } // poolState() ensures that only Pool states can be assigned to the PoolState interface. -func (*StatePoolIdle) poolState() {} -func (*StatePoolFindCloser[K, N]) poolState() {} -func (*StatePoolWaitingAtCapacity) poolState() {} -func (*StatePoolWaitingWithCapacity) poolState() {} -func (*StatePoolQueryFinished) poolState() {} -func (*StatePoolQueryTimeout) poolState() {} +func (*StatePoolIdle) poolState() {} +func (*StatePoolFindCloser[K, N]) poolState() {} +func (*StatePoolSendMessage[K, N, M]) poolState() {} +func (*StatePoolWaitingAtCapacity) poolState() {} +func (*StatePoolWaitingWithCapacity) poolState() {} +func (*StatePoolQueryFinished[K, N]) poolState() {} +func (*StatePoolQueryTimeout) poolState() {} // PoolEvent is an event intended to advance the state of a pool. type PoolEvent interface { poolEvent() } -// EventPoolAddQuery is an event that attempts to add a new query -type EventPoolAddQuery[K kad.Key[K], N kad.NodeID[K]] struct { +// EventPoolAddQuery is an event that attempts to add a new query that finds closer nodes to a target key. +type EventPoolAddFindCloserQuery[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID // the id to use for the new query + Target K // the target key for the query + KnownClosestNodes []N // an initial set of close nodes the query should use + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete +} + +// EventPoolAddQuery is an event that attempts to add a new query that sends a message. +type EventPoolAddQuery[K kad.Key[K], N kad.NodeID[K], M Message] struct { QueryID QueryID // the id to use for the new query Target K // the target key for the query + Message M // message to be sent to each node KnownClosestNodes []N // an initial set of close nodes the query should use + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete } // EventPoolStopQuery notifies a [Pool] to stop a query. @@ -329,15 +393,15 @@ type EventPoolStopQuery struct { QueryID QueryID // the id of the query that should be stopped } -// EventPoolFindCloserResponse notifies a [Pool] that an attempt to find closer nodes has received a successful response. -type EventPoolFindCloserResponse[K kad.Key[K], N kad.NodeID[K]] struct { +// EventPoolNodeResponse notifies a [Pool] that an attempt to contact a node has received a successful response. +type EventPoolNodeResponse[K kad.Key[K], N kad.NodeID[K]] struct { QueryID QueryID // the id of the query that sent the message NodeID N // the node the message was sent to CloserNodes []N // the closer nodes sent by the node } -// EventPoolFindCloserFailure notifies a [Pool] that an attempt to find closer nodes has failed. -type EventPoolFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { +// EventPoolNodeFailure notifies a [Pool] that an attempt to contact a node has failed. +type EventPoolNodeFailure[K kad.Key[K], N kad.NodeID[K]] struct { QueryID QueryID // the id of the query that sent the message NodeID N // the node the message was sent to Error error // the error that caused the failure, if any @@ -347,8 +411,9 @@ type EventPoolFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { type EventPoolPoll struct{} // poolEvent() ensures that only events accepted by a [Pool] can be assigned to the [PoolEvent] interface. -func (*EventPoolAddQuery[K, N]) poolEvent() {} +func (*EventPoolAddQuery[K, N, M]) poolEvent() {} +func (*EventPoolAddFindCloserQuery[K, N]) poolEvent() {} func (*EventPoolStopQuery) poolEvent() {} -func (*EventPoolFindCloserResponse[K, N]) poolEvent() {} -func (*EventPoolFindCloserFailure[K, N]) poolEvent() {} +func (*EventPoolNodeResponse[K, N]) poolEvent() {} +func (*EventPoolNodeFailure[K, N]) poolEvent() {} func (*EventPoolPoll) poolEvent() {} diff --git a/v2/coord/query/pool_test.go b/v2/internal/coord/query/pool_test.go similarity index 75% rename from v2/coord/query/pool_test.go rename to v2/internal/coord/query/pool_test.go index 2f6ab26f..d54c6d23 100644 --- a/v2/coord/query/pool_test.go +++ b/v2/internal/coord/query/pool_test.go @@ -8,7 +8,7 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) func TestPoolConfigValidate(t *testing.T) { @@ -71,7 +71,7 @@ func TestPoolStartsIdle(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(0) - p, err := NewPool[tiny.Key](self, cfg) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) require.NoError(t, err) state := p.Advance(ctx, &EventPoolPoll{}) @@ -85,21 +85,21 @@ func TestPoolStopWhenNoQueries(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(0) - p, err := NewPool[tiny.Key](self, cfg) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) require.NoError(t, err) state := p.Advance(ctx, &EventPoolPoll{}) require.IsType(t, &StatePoolIdle{}, state) } -func TestPoolAddQueryStartsIfCapacity(t *testing.T) { +func TestPoolAddFindCloserQueryStartsIfCapacity(t *testing.T) { ctx := context.Background() clk := clock.NewMock() cfg := DefaultPoolConfig() cfg.Clock = clk self := tiny.NewNode(0) - p, err := NewPool[tiny.Key](self, cfg) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) @@ -108,7 +108,7 @@ func TestPoolAddQueryStartsIfCapacity(t *testing.T) { queryID := QueryID("test") // first thing the new pool should do is start the query - state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID, Target: target, KnownClosestNodes: []tiny.Node{a}, @@ -132,14 +132,55 @@ func TestPoolAddQueryStartsIfCapacity(t *testing.T) { require.IsType(t, &StatePoolWaitingWithCapacity{}, state) } -func TestPoolMessageResponse(t *testing.T) { +func TestPoolAddQueryStartsIfCapacity(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultPoolConfig() + cfg.Clock = clk + + self := tiny.NewNode(0) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + + queryID := QueryID("test") + msg := tiny.Message{Content: "msg"} + // first thing the new pool should do is start the query + state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StatePoolSendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + + // the query should attempt to contact the node it was given + st := state.(*StatePoolSendMessage[tiny.Key, tiny.Node, tiny.Message]) + + // the query should be the one just added + require.Equal(t, queryID, st.QueryID) + + // the query should attempt to contact the node it was given + require.Equal(t, a, st.NodeID) + + // with the correct message + require.Equal(t, msg, st.Message) + + // now the pool reports that it is waiting + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolWaitingWithCapacity{}, state) +} + +func TestPoolNodeResponse(t *testing.T) { ctx := context.Background() clk := clock.NewMock() cfg := DefaultPoolConfig() cfg.Clock = clk self := tiny.NewNode(0) - p, err := NewPool[tiny.Key](self, cfg) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) @@ -148,7 +189,7 @@ func TestPoolMessageResponse(t *testing.T) { queryID := QueryID("test") // first thing the new pool should do is start the query - state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID, Target: target, KnownClosestNodes: []tiny.Node{a}, @@ -161,15 +202,15 @@ func TestPoolMessageResponse(t *testing.T) { require.Equal(t, a, st.NodeID) // notify query that node was contacted successfully, but no closer nodes - state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolNodeResponse[tiny.Key, tiny.Node]{ QueryID: queryID, NodeID: a, }) // pool should respond that query has finished - require.IsType(t, &StatePoolQueryFinished{}, state) + require.IsType(t, &StatePoolQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StatePoolQueryFinished) + stf := state.(*StatePoolQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, queryID, stf.QueryID) require.Equal(t, 1, stf.Stats.Requests) require.Equal(t, 1, stf.Stats.Success) @@ -183,7 +224,7 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { cfg.Concurrency = 2 // allow two queries to run concurrently self := tiny.NewNode(0) - p, err := NewPool[tiny.Key](self, cfg) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) @@ -194,7 +235,7 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { // Add the first query queryID1 := QueryID("1") - state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID1, Target: target, KnownClosestNodes: []tiny.Node{a, b, c, d}, @@ -208,7 +249,7 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { // Add the second query queryID2 := QueryID("2") - state = p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID2, Target: target, KnownClosestNodes: []tiny.Node{a, b, c, d}, @@ -235,7 +276,7 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { require.Equal(t, a, st.NodeID) // notify first query that node was contacted successfully, but no closer nodes - state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolNodeResponse[tiny.Key, tiny.Node]{ QueryID: queryID1, NodeID: a, }) @@ -247,7 +288,7 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { require.Equal(t, d, st.NodeID) // notify first query that next node was contacted successfully, but no closer nodes - state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolNodeResponse[tiny.Key, tiny.Node]{ QueryID: queryID1, NodeID: b, }) @@ -268,7 +309,7 @@ func TestPoolRespectsConcurrency(t *testing.T) { cfg.QueryConcurrency = 1 // allow each query to have a single request in flight self := tiny.NewNode(0) - p, err := NewPool[tiny.Key](self, cfg) + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) require.NoError(t, err) target := tiny.Key(0b00000001) @@ -276,7 +317,7 @@ func TestPoolRespectsConcurrency(t *testing.T) { // Add the first query queryID1 := QueryID("1") - state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID1, Target: target, KnownClosestNodes: []tiny.Node{a}, @@ -290,7 +331,7 @@ func TestPoolRespectsConcurrency(t *testing.T) { // Add the second query queryID2 := QueryID("2") - state = p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID2, Target: target, KnownClosestNodes: []tiny.Node{a}, @@ -304,7 +345,7 @@ func TestPoolRespectsConcurrency(t *testing.T) { // Add a third query queryID3 := QueryID("3") - state = p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ QueryID: queryID3, Target: target, KnownClosestNodes: []tiny.Node{a}, @@ -314,14 +355,14 @@ func TestPoolRespectsConcurrency(t *testing.T) { require.IsType(t, &StatePoolWaitingAtCapacity{}, state) // notify first query that next node was contacted successfully, but no closer nodes - state = p.Advance(ctx, &EventPoolFindCloserResponse[tiny.Key, tiny.Node]{ + state = p.Advance(ctx, &EventPoolNodeResponse[tiny.Key, tiny.Node]{ QueryID: queryID1, NodeID: a, }) // first query is out of nodes so it has finished - require.IsType(t, &StatePoolQueryFinished{}, state) - stf := state.(*StatePoolQueryFinished) + require.IsType(t, &StatePoolQueryFinished[tiny.Key, tiny.Node]{}, state) + stf := state.(*StatePoolQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, queryID1, stf.QueryID) // advancing pool again allows query 3 to start diff --git a/v2/coord/query/query.go b/v2/internal/coord/query/query.go similarity index 65% rename from v2/coord/query/query.go rename to v2/internal/coord/query/query.go index 9b0d87eb..b0003a83 100644 --- a/v2/coord/query/query.go +++ b/v2/internal/coord/query/query.go @@ -27,7 +27,7 @@ type QueryStats struct { } // QueryConfig specifies optional configuration for a Query -type QueryConfig[K kad.Key[K]] struct { +type QueryConfig struct { Concurrency int // the maximum number of concurrent requests that may be in flight NumResults int // the minimum number of nodes to successfully contact before considering iteration complete RequestTimeout time.Duration // the timeout for contacting a single node @@ -35,7 +35,7 @@ type QueryConfig[K kad.Key[K]] struct { } // Validate checks the configuration options and returns an error if any have invalid values. -func (cfg *QueryConfig[K]) Validate() error { +func (cfg *QueryConfig) Validate() error { if cfg.Clock == nil { return &kaderr.ConfigurationError{ Component: "QueryConfig", @@ -65,8 +65,8 @@ func (cfg *QueryConfig[K]) Validate() error { // DefaultQueryConfig returns the default configuration options for a Query. // Options may be overridden before passing to NewQuery -func DefaultQueryConfig[K kad.Key[K]]() *QueryConfig[K] { - return &QueryConfig[K]{ +func DefaultQueryConfig() *QueryConfig { + return &QueryConfig{ Concurrency: 3, NumResults: 20, RequestTimeout: time.Minute, @@ -74,27 +74,44 @@ func DefaultQueryConfig[K kad.Key[K]]() *QueryConfig[K] { } } -type Query[K kad.Key[K], N kad.NodeID[K]] struct { +type Query[K kad.Key[K], N kad.NodeID[K], M Message] struct { self N id QueryID // cfg is a copy of the optional configuration supplied to the query - cfg QueryConfig[K] + cfg QueryConfig - iter NodeIter[K, N] - target K - stats QueryStats + iter NodeIter[K, N] + target K + msg M + findCloser bool + stats QueryStats // finished indicates that that the query has completed its work or has been stopped. finished bool + // targetNodes is the set of responsive nodes thought to be closest to the target. + // It is populated once the query has been marked as finished. + // This will contain up to [QueryConfig.NumResults] nodes. + targetNodes []N + // inFlight is number of requests in flight, will be <= concurrency inFlight int } -func NewQuery[K kad.Key[K], N kad.NodeID[K]](self N, id QueryID, target K, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig[K]) (*Query[K, N], error) { +func NewFindCloserQuery[K kad.Key[K], N kad.NodeID[K], M Message](self N, id QueryID, target K, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig) (*Query[K, N, M], error) { + var empty M + q, err := NewQuery[K, N, M](self, id, target, empty, iter, knownClosestNodes, cfg) + if err != nil { + return nil, err + } + q.findCloser = true + return q, nil +} + +func NewQuery[K kad.Key[K], N kad.NodeID[K], M Message](self N, id QueryID, target K, msg M, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig) (*Query[K, N, M], error) { if cfg == nil { - cfg = DefaultQueryConfig[K]() + cfg = DefaultQueryConfig() } else if err := cfg.Validate(); err != nil { return nil, err } @@ -110,16 +127,17 @@ func NewQuery[K kad.Key[K], N kad.NodeID[K]](self N, id QueryID, target K, iter }) } - return &Query[K, N]{ + return &Query[K, N, M]{ self: self, id: id, cfg: *cfg, + msg: msg, iter: iter, target: target, }, nil } -func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) (out QueryState) { +func (q *Query[K, N, M]) Advance(ctx context.Context, ev QueryEvent) (out QueryState) { ctx, span := tele.StartSpan(ctx, "Query.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) defer func() { span.SetAttributes(tele.AttrOutEvent(out)) @@ -127,26 +145,29 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) (out QueryStat }() if q.finished { - return &StateQueryFinished{ - QueryID: q.id, - Stats: q.stats, + return &StateQueryFinished[K, N]{ + QueryID: q.id, + Stats: q.stats, + ClosestNodes: q.targetNodes, } } switch tev := ev.(type) { case *EventQueryCancel: - q.markFinished() - return &StateQueryFinished{ - QueryID: q.id, - Stats: q.stats, + q.markFinished(ctx) + return &StateQueryFinished[K, N]{ + QueryID: q.id, + Stats: q.stats, + ClosestNodes: q.targetNodes, } - case *EventQueryFindCloserResponse[K, N]: - q.onMessageResponse(ctx, tev.NodeID, tev.CloserNodes) - case *EventQueryFindCloserFailure[K, N]: + case *EventQueryNodeResponse[K, N]: + q.onNodeResponse(ctx, tev.NodeID, tev.CloserNodes) + case *EventQueryNodeFailure[K, N]: span.RecordError(tev.Error) - q.onMessageFailure(ctx, tev.NodeID) - case nil: - // TEMPORARY: no event to process + q.onNodeFailure(ctx, tev.NodeID) + case *EventQueryPoll: + // no event to process + default: panic(fmt.Sprintf("unexpected event: %T", tev)) } @@ -191,10 +212,11 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) (out QueryStat // If the iterator is not progressing then it doesn't expect any more nodes to be added to the list. // If it has contacted at least NumResults nodes successfully then the iteration is done. if !progressing && successes >= q.cfg.NumResults { - q.markFinished() - returnState = &StateQueryFinished{ - QueryID: q.id, - Stats: q.stats, + q.markFinished(ctx) + returnState = &StateQueryFinished[K, N]{ + QueryID: q.id, + Stats: q.stats, + ClosestNodes: q.targetNodes, } return true } @@ -208,11 +230,21 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) (out QueryStat if q.stats.Start.IsZero() { q.stats.Start = q.cfg.Clock.Now() } - returnState = &StateQueryFindCloser[K, N]{ - NodeID: ni.NodeID, - QueryID: q.id, - Stats: q.stats, - Target: q.target, + + if q.findCloser { + returnState = &StateQueryFindCloser[K, N]{ + NodeID: ni.NodeID, + QueryID: q.id, + Stats: q.stats, + Target: q.target, + } + } else { + returnState = &StateQuerySendMessage[K, N, M]{ + NodeID: ni.NodeID, + QueryID: q.id, + Stats: q.stats, + Message: q.msg, + } } return true } @@ -248,22 +280,36 @@ func (q *Query[K, N]) Advance(ctx context.Context, ev QueryEvent) (out QueryStat // The iterator is finished because all available nodes have been contacted // and the iterator is not waiting for any more results. - q.markFinished() - return &StateQueryFinished{ - QueryID: q.id, - Stats: q.stats, + q.markFinished(ctx) + return &StateQueryFinished[K, N]{ + QueryID: q.id, + Stats: q.stats, + ClosestNodes: q.targetNodes, } } -func (q *Query[K, N]) markFinished() { +func (q *Query[K, N, M]) markFinished(ctx context.Context) { q.finished = true if q.stats.End.IsZero() { q.stats.End = q.cfg.Clock.Now() } + + q.targetNodes = make([]N, 0, q.cfg.NumResults) + + q.iter.Each(ctx, func(ctx context.Context, ni *NodeStatus[K, N]) bool { + switch ni.State.(type) { + case *StateNodeSucceeded: + q.targetNodes = append(q.targetNodes, ni.NodeID) + if len(q.targetNodes) >= q.cfg.NumResults { + return true + } + } + return false + }) } -// onMessageResponse processes the result of a successful response received from a node. -func (q *Query[K, N]) onMessageResponse(ctx context.Context, node N, closer []N) { +// onNodeResponse processes the result of a successful response received from a node. +func (q *Query[K, N, M]) onNodeResponse(ctx context.Context, node N, closer []N) { ni, found := q.iter.Find(node.Key()) if !found { // got a rogue message @@ -303,8 +349,8 @@ func (q *Query[K, N]) onMessageResponse(ctx context.Context, node N, closer []N) ni.State = &StateNodeSucceeded{} } -// onMessageFailure processes the result of a failed attempt to contact a node. -func (q *Query[K, N]) onMessageFailure(ctx context.Context, node N) { +// onNodeFailure processes the result of a failed attempt to contact a node. +func (q *Query[K, N, M]) onNodeFailure(ctx context.Context, node N) { ni, found := q.iter.Find(node.Key()) if !found { // got a rogue message @@ -338,9 +384,10 @@ type QueryState interface { } // StateQueryFinished indicates that the [Query] has finished. -type StateQueryFinished struct { - QueryID QueryID - Stats QueryStats +type StateQueryFinished[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID QueryID + Stats QueryStats + ClosestNodes []N // contains the closest nodes to the target key that were found } // StateQueryFindCloser indicates that the [Query] wants to send a find closer nodes message to a node. @@ -351,6 +398,14 @@ type StateQueryFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { Stats QueryStats } +// StateQuerySendMessage indicates that the [Query] wants to send a message to a node. +type StateQuerySendMessage[K kad.Key[K], N kad.NodeID[K], M Message] struct { + QueryID QueryID + NodeID N // the node to send the message to + Message M + Stats QueryStats +} + // StateQueryWaitingAtCapacity indicates that the [Query] is waiting for results and is at capacity. type StateQueryWaitingAtCapacity struct { QueryID QueryID @@ -364,10 +419,11 @@ type StateQueryWaitingWithCapacity struct { } // queryState() ensures that only [Query] states can be assigned to a QueryState. -func (*StateQueryFinished) queryState() {} -func (*StateQueryFindCloser[K, N]) queryState() {} -func (*StateQueryWaitingAtCapacity) queryState() {} -func (*StateQueryWaitingWithCapacity) queryState() {} +func (*StateQueryFinished[K, N]) queryState() {} +func (*StateQueryFindCloser[K, N]) queryState() {} +func (*StateQuerySendMessage[K, N, M]) queryState() {} +func (*StateQueryWaitingAtCapacity) queryState() {} +func (*StateQueryWaitingWithCapacity) queryState() {} type QueryEvent interface { queryEvent() @@ -376,19 +432,23 @@ type QueryEvent interface { // EventQueryMessageResponse notifies a query to stop all work and enter the finished state. type EventQueryCancel struct{} -// EventQueryFindCloserResponse notifies a [Query] that an attempt to find closer nodes has received a successful response. -type EventQueryFindCloserResponse[K kad.Key[K], N kad.NodeID[K]] struct { +// EventQueryNodeResponse notifies a [Query] that an attempt to contact a node has received a successful response. +type EventQueryNodeResponse[K kad.Key[K], N kad.NodeID[K]] struct { NodeID N // the node the message was sent to CloserNodes []N // the closer nodes sent by the node } -// EventQueryFindCloserFailure notifies a [Query] that an attempt to find closer nodes has failed. -type EventQueryFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { +// EventQueryNodeFailure notifies a [Query] that an attempt to to contact a node has failed. +type EventQueryNodeFailure[K kad.Key[K], N kad.NodeID[K]] struct { NodeID N // the node the message was sent to Error error // the error that caused the failure, if any } +// EventQueryPoll is an event that signals a [Query] that it can perform housekeeping work. +type EventQueryPoll struct{} + // queryEvent() ensures that only events accepted by [Query] can be assigned to a [QueryEvent]. -func (*EventQueryCancel) queryEvent() {} -func (*EventQueryFindCloserResponse[K, N]) queryEvent() {} -func (*EventQueryFindCloserFailure[K, N]) queryEvent() {} +func (*EventQueryCancel) queryEvent() {} +func (*EventQueryNodeResponse[K, N]) queryEvent() {} +func (*EventQueryNodeFailure[K, N]) queryEvent() {} +func (*EventQueryPoll) queryEvent() {} diff --git a/v2/coord/query/query_test.go b/v2/internal/coord/query/query_test.go similarity index 64% rename from v2/coord/query/query_test.go rename to v2/internal/coord/query/query_test.go index 49564dcd..6cb1d9d1 100644 --- a/v2/coord/query/query_test.go +++ b/v2/internal/coord/query/query_test.go @@ -9,23 +9,23 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) func TestQueryConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() require.NoError(t, cfg.Validate()) }) t.Run("clock is not nil", func(t *testing.T) { - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = nil require.Error(t, cfg.Validate()) }) t.Run("request timeout positive", func(t *testing.T) { - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.RequestTimeout = 0 require.Error(t, cfg.Validate()) cfg.RequestTimeout = -1 @@ -33,7 +33,7 @@ func TestQueryConfigValidate(t *testing.T) { }) t.Run("concurrency positive", func(t *testing.T) { - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Concurrency = 0 require.Error(t, cfg.Validate()) cfg.Concurrency = -1 @@ -41,7 +41,7 @@ func TestQueryConfigValidate(t *testing.T) { }) t.Run("num results positive", func(t *testing.T) { - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.NumResults = 0 require.Error(t, cfg.Validate()) cfg.NumResults = -1 @@ -62,17 +62,17 @@ func TestQueryMessagesNode(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is request to send a message to the node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // check that we are messaging the correct node with the right message @@ -85,14 +85,14 @@ func TestQueryMessagesNode(t *testing.T) { require.Equal(t, 0, st.Stats.Success) // advancing now reports that the query is waiting for a response but its underlying query still has capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) stw := state.(*StateQueryWaitingWithCapacity) require.Equal(t, 1, stw.Stats.Requests) require.Equal(t, 0, st.Stats.Success) } -func TestQueryMessagesNearest(t *testing.T) { +func TestQueryFindCloserNearest(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000011) @@ -111,17 +111,17 @@ func TestQueryMessagesNearest(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is message the nearest node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // check that we are contacting the nearest node first @@ -142,26 +142,26 @@ func TestQueryCancelFinishesQuery(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is request to send a message to the node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) clk.Add(time.Second) // cancel the query state = qry.Advance(ctx, &EventQueryCancel{}) - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 1, stf.Stats.Requests) // no successful responses were received before query was cancelled @@ -185,20 +185,20 @@ func TestQueryNoClosest(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) clk := clock.NewMock() - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // query is finished because there were no nodes to contat - state := qry.Advance(ctx, nil) - require.IsType(t, &StateQueryFinished{}, state) + state := qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) // no requests were made require.Equal(t, 0, stf.Stats.Requests) @@ -228,32 +228,32 @@ func TestQueryWaitsAtCapacity(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is request to send a message to the node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) require.Equal(t, 1, st.Stats.Requests) // advancing sends the message to the next node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) require.Equal(t, 2, st.Stats.Requests) // advancing now reports that the query is waiting at capacity since there are 2 messages in flight - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) stw := state.(*StateQueryWaitingAtCapacity) @@ -281,7 +281,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.RequestTimeout = 3 * time.Minute cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes @@ -289,11 +289,11 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the nearest node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) @@ -306,7 +306,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { clk.Add(time.Minute) // while the query has capacity the query should contact the next nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) @@ -319,7 +319,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { clk.Add(time.Minute) // while the query has capacity the query should contact the second nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, c, st.NodeID) @@ -332,7 +332,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { clk.Add(time.Minute) // the query should be at capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) stwa := state.(*StateQueryWaitingAtCapacity) require.Equal(t, 3, stwa.Stats.Requests) @@ -343,7 +343,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { clk.Add(time.Minute) // the first node request should have timed out, making capacity for the last node to attempt connection - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, d, st.NodeID) @@ -357,7 +357,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { clk.Add(time.Minute) // advancing now makes more capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) stww := state.(*StateQueryWaitingWithCapacity) @@ -366,7 +366,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { require.Equal(t, 2, stww.Stats.Failure) } -func TestQueryMessageResponseMakesCapacity(t *testing.T) { +func TestQueryFindCloserResponseMakesCapacity(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) @@ -387,18 +387,18 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the nearest node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) @@ -408,7 +408,7 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { require.Equal(t, 0, stwm.Stats.Failure) // while the query has capacity the query should contact the next nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) @@ -418,7 +418,7 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { require.Equal(t, 0, stwm.Stats.Failure) // while the query has capacity the query should contact the second nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, c, st.NodeID) @@ -428,11 +428,11 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { require.Equal(t, 0, stwm.Stats.Failure) // the query should be at capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) // notify query that first node was contacted successfully, now node d can be contacted - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: a}) + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{NodeID: a}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, d, st.NodeID) @@ -442,7 +442,7 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { require.Equal(t, 0, stwm.Stats.Failure) // the query should be at capacity again - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) stwa := state.(*StateQueryWaitingAtCapacity) require.Equal(t, 4, stwa.Stats.Requests) @@ -471,28 +471,28 @@ func TestQueryCloserNodesAreAddedToIteration(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, d, st.NodeID) // advancing reports query has capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) // notify query that first node was contacted successfully, with closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: d, CloserNodes: []tiny.Node{ b, @@ -527,34 +527,34 @@ func TestQueryCloserNodesIgnoresDuplicates(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) // next the query attempts to contact second nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, d, st.NodeID) // advancing reports query has no capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) // notify query that second node was contacted successfully, with closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: d, CloserNodes: []tiny.Node{ b, @@ -581,27 +581,27 @@ func TestQueryCancelFinishesIteration(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) // cancel the query so it is now finished state = qry.Advance(ctx, &EventQueryCancel{}) - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 0, stf.Stats.Success) } @@ -619,43 +619,43 @@ func TestQueryFinishedIgnoresLaterEvents(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) // cancel the query so it is now finished state = qry.Advance(ctx, &EventQueryCancel{}) - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) // no successes - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 1, stf.Stats.Requests) require.Equal(t, 0, stf.Stats.Success) require.Equal(t, 0, stf.Stats.Failure) // notify query that second node was contacted successfully, with closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: b, CloserNodes: []tiny.Node{a}, }) // query remains finished - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) // still no successes since contact message was after query had been cancelled - stf = state.(*StateQueryFinished) + stf = state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 1, stf.Stats.Requests) require.Equal(t, 0, stf.Stats.Success) require.Equal(t, 0, stf.Stats.Failure) @@ -676,18 +676,18 @@ func TestQueryWithCloserIterIgnoresMessagesFromUnknownNodes(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, c, st.NodeID) @@ -697,7 +697,7 @@ func TestQueryWithCloserIterIgnoresMessagesFromUnknownNodes(t *testing.T) { require.Equal(t, 0, stwm.Stats.Failure) // notify query that second node was contacted successfully, with closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: b, CloserNodes: []tiny.Node{a}, }) @@ -727,7 +727,7 @@ func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 4 cfg.NumResults = 2 @@ -735,23 +735,23 @@ func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // contact first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) // contact second node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) // notify query that first node was contacted successfully - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: a, }) @@ -761,12 +761,15 @@ func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { require.Equal(t, c, st.NodeID) // notify query that second node was contacted successfully - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: b, }) // query has finished since it contacted the NumResults closest nodes - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) + + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) + require.Equal(t, 2, len(stf.ClosestNodes)) } func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { @@ -784,7 +787,7 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 4 cfg.NumResults = 2 @@ -792,18 +795,18 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // contact first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, c, st.NodeID) // notify query that node was contacted successfully and tell it about // a closer one - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: c, CloserNodes: []tiny.Node{b}, }) @@ -815,7 +818,7 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { // notify query that node was contacted successfully and tell it about // a closer one - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: b, CloserNodes: []tiny.Node{a}, }) @@ -828,14 +831,14 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { require.Equal(t, a, st.NodeID) // notify query that second node was contacted successfully - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: a, }) // query has finished since it contacted the NumResults closest nodes - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 3, stf.Stats.Success) } @@ -857,50 +860,50 @@ func TestQueryNotContactedMakesCapacity(t *testing.T) { iter := NewSequentialIter[tiny.Key, tiny.Node]() clk := clock.NewMock() - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the nearest node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, a, st.NodeID) // while the query has capacity the query should contact the next nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) // while the query has capacity the query should contact the second nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, c, st.NodeID) // the query should be at capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) // notify query that first node was not contacted, now node d can be contacted - state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: a}) + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: a}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, d, st.NodeID) // the query should be at capacity again - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) } -func TestQueryAllNotContactedFinishes(t *testing.T) { +func TestFindCloserQueryAllNotContactedFinishes(t *testing.T) { ctx := context.Background() target := tiny.Key(0b00000001) @@ -915,47 +918,47 @@ func TestQueryAllNotContactedFinishes(t *testing.T) { iter := NewSequentialIter[tiny.Key, tiny.Node]() - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = len(knownNodes) // allow all to be contacted at once queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the nearest node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // while the query has capacity the query should contact the next nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // while the query has capacity the query should contact the third nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // the query should be at capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) // notify query that first node was not contacted - state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: a}) + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: a}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) // notify query that second node was not contacted - state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: b}) + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: b}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) // notify query that third node was not contacted - state = qry.Advance(ctx, &EventQueryFindCloserFailure[tiny.Key, tiny.Node]{NodeID: c}) + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: c}) // query has finished since it contacted all possible nodes - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 0, stf.Stats.Success) } @@ -973,7 +976,7 @@ func TestQueryAllContactedFinishes(t *testing.T) { iter := NewSequentialIter[tiny.Key, tiny.Node]() - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = len(knownNodes) // allow all to be contacted at once cfg.NumResults = len(knownNodes) + 1 // one more than the size of the network @@ -981,41 +984,41 @@ func TestQueryAllContactedFinishes(t *testing.T) { queryID := QueryID("test") self := tiny.NewNode(0) - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the nearest node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // while the query has capacity the query should contact the next nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // while the query has capacity the query should contact the third nearest node - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) // the query should be at capacity - state = qry.Advance(ctx, nil) + state = qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryWaitingAtCapacity{}, state) // notify query that first node was contacted successfully, but no closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: a}) + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{NodeID: a}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) // notify query that second node was contacted successfully, but no closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: b}) + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{NodeID: b}) require.IsType(t, &StateQueryWaitingWithCapacity{}, state) // notify query that third node was contacted successfully, but no closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{NodeID: c}) + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{NodeID: c}) // query has finished since it contacted all possible nodes, even though it didn't // reach the desired NumResults - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 3, stf.Stats.Success) } @@ -1033,34 +1036,273 @@ func TestQueryNeverMessagesSelf(t *testing.T) { iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) - cfg := DefaultQueryConfig[tiny.Key]() + cfg := DefaultQueryConfig() cfg.Clock = clk cfg.Concurrency = 2 queryID := QueryID("test") self := a - qry, err := NewQuery[tiny.Key, tiny.Node](self, queryID, target, iter, knownNodes, cfg) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) require.NoError(t, err) // first thing the new query should do is contact the first node - state := qry.Advance(ctx, nil) + state := qry.Advance(ctx, &EventQueryPoll{}) require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) require.Equal(t, b, st.NodeID) // notify query that first node was contacted successfully, with closer nodes - state = qry.Advance(ctx, &EventQueryFindCloserResponse[tiny.Key, tiny.Node]{ + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ NodeID: b, CloserNodes: []tiny.Node{a}, }) // query is finished since it can't contact self - require.IsType(t, &StateQueryFinished{}, state) + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) // one successful message - stf := state.(*StateQueryFinished) + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) require.Equal(t, 1, stf.Stats.Requests) require.Equal(t, 1, stf.Stats.Success) require.Equal(t, 0, stf.Stats.Failure) } + +func TestQueryMessagesNearest(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000011) + far := tiny.NewNode(0b11011011) + near := tiny.NewNode(0b00000110) + + // ensure near is nearer to target than far is + require.Less(t, target.Xor(near.Key()), target.Xor(far.Key())) + + // knownNodes are in "random" order with furthest before nearest + knownNodes := []tiny.Node{ + far, + near, + } + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig() + cfg.Clock = clk + + queryID := QueryID("test") + + self := tiny.NewNode(0) + msg := tiny.Message{Content: "msg"} + qry, err := NewQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, msg, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is message the nearest node + state := qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + + // check that we are contacting the nearest node first + st := state.(*StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]) + require.Equal(t, near, st.NodeID) +} + +func TestQueryMessageResponseMakesCapacity(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 + + // ensure the order of the known nodes + require.True(t, target.Xor(a.Key()).Compare(target.Xor(b.Key())) == -1) + require.True(t, target.Xor(b.Key()).Compare(target.Xor(c.Key())) == -1) + require.True(t, target.Xor(c.Key()).Compare(target.Xor(d.Key())) == -1) + + // knownNodes are in "random" order + knownNodes := []tiny.Node{b, c, a, d} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig() + cfg.Clock = clk + cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes + + queryID := QueryID("test") + + self := tiny.NewNode(0) + msg := tiny.Message{Content: "msg"} + qry, err := NewQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, msg, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + st := state.(*StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]) + require.Equal(t, a, st.NodeID) + require.Equal(t, 1, st.Stats.Requests) + require.Equal(t, 0, st.Stats.Success) + require.Equal(t, 0, st.Stats.Failure) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + st = state.(*StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]) + require.Equal(t, b, st.NodeID) + require.Equal(t, 2, st.Stats.Requests) + require.Equal(t, 0, st.Stats.Success) + require.Equal(t, 0, st.Stats.Failure) + + // while the query has capacity the query should contact the second nearest node + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + st = state.(*StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]) + require.Equal(t, c, st.NodeID) + require.Equal(t, 3, st.Stats.Requests) + require.Equal(t, 0, st.Stats.Success) + require.Equal(t, 0, st.Stats.Failure) + + // the query should be at capacity + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that first node was contacted successfully, now node d can be contacted + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{NodeID: a}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + st = state.(*StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]) + require.Equal(t, d, st.NodeID) + require.Equal(t, 4, st.Stats.Requests) + require.Equal(t, 1, st.Stats.Success) + require.Equal(t, 0, st.Stats.Failure) + + // the query should be at capacity again + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + stwa := state.(*StateQueryWaitingAtCapacity) + require.Equal(t, 4, stwa.Stats.Requests) + require.Equal(t, 1, stwa.Stats.Success) + require.Equal(t, 0, stwa.Stats.Failure) +} + +func TestQueryAllNotContactedFinishes(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + + // knownNodes are in "random" order + knownNodes := []tiny.Node{a, b, c} + + clk := clock.NewMock() + + iter := NewSequentialIter[tiny.Key, tiny.Node]() + + cfg := DefaultQueryConfig() + cfg.Clock = clk + cfg.Concurrency = len(knownNodes) // allow all to be contacted at once + + queryID := QueryID("test") + + self := tiny.NewNode(0) + msg := tiny.Message{Content: "msg"} + qry, err := NewQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, msg, iter, knownNodes, cfg) + require.NoError(t, err) + + // first thing the new query should do is contact the nearest node + state := qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + + // while the query has capacity the query should contact the next nearest node + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + + // while the query has capacity the query should contact the third nearest node + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQuerySendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) + + // the query should be at capacity + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQueryWaitingAtCapacity{}, state) + + // notify query that first node was not contacted + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: a}) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that second node was not contacted + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: b}) + require.IsType(t, &StateQueryWaitingWithCapacity{}, state) + + // notify query that third node was not contacted + state = qry.Advance(ctx, &EventQueryNodeFailure[tiny.Key, tiny.Node]{NodeID: c}) + + // query has finished since it contacted all possible nodes + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) + + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) + require.Equal(t, 0, stf.Stats.Success) + require.Equal(t, 3, stf.Stats.Failure) +} + +func TestFindCloserQueryIncludesPartialClosestNodesWhenCancelled(t *testing.T) { + ctx := context.Background() + + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00001000) // 8 + c := tiny.NewNode(0b00010000) // 16 + d := tiny.NewNode(0b00100000) // 32 + + // one known node to start with + knownNodes := []tiny.Node{a, b, c, d} + + clk := clock.NewMock() + + iter := NewClosestNodesIter[tiny.Key, tiny.Node](target) + + cfg := DefaultQueryConfig() + cfg.Clock = clk + cfg.Concurrency = 4 + cfg.NumResults = 4 + + queryID := QueryID("test") + + self := tiny.NewNode(0) + qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) + require.NoError(t, err) + + // contact first node + state := qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // contact second node + state = qry.Advance(ctx, &EventQueryPoll{}) + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, b, st.NodeID) + + // notify query that first node was contacted successfully + state = qry.Advance(ctx, &EventQueryNodeResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // query attempts to contact third node + require.IsType(t, &StateQueryFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateQueryFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + + // cancel query + state = qry.Advance(ctx, &EventQueryCancel{}) + + // query has finished + require.IsType(t, &StateQueryFinished[tiny.Key, tiny.Node]{}, state) + + stf := state.(*StateQueryFinished[tiny.Key, tiny.Node]) + require.Equal(t, 1, len(stf.ClosestNodes)) +} diff --git a/v2/coord/routing.go b/v2/internal/coord/routing.go similarity index 99% rename from v2/coord/routing.go rename to v2/internal/coord/routing.go index 1c34bca8..832bfa64 100644 --- a/v2/coord/routing.go +++ b/v2/internal/coord/routing.go @@ -9,7 +9,7 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) diff --git a/v2/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go similarity index 93% rename from v2/coord/routing/bootstrap.go rename to v2/internal/coord/routing/bootstrap.go index e4b9d452..914f9615 100644 --- a/v2/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -20,7 +20,7 @@ type Bootstrap[K kad.Key[K], N kad.NodeID[K]] struct { self N // qry is the query used by the bootstrap process - qry *query.Query[K, N] + qry *query.Query[K, N, any] // cfg is a copy of the optional configuration supplied to the Bootstrap cfg BootstrapConfig[K] @@ -101,29 +101,29 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst // TODO: ignore start event if query is already in progress iter := query.NewClosestNodesIter[K, N](b.self.Key()) - qryCfg := query.DefaultQueryConfig[K]() + qryCfg := query.DefaultQueryConfig() qryCfg.Clock = b.cfg.Clock qryCfg.Concurrency = b.cfg.RequestConcurrency qryCfg.RequestTimeout = b.cfg.RequestTimeout queryID := query.QueryID("bootstrap") - qry, err := query.NewQuery[K, N](b.self, queryID, b.self.Key(), iter, tev.KnownClosestNodes, qryCfg) + qry, err := query.NewFindCloserQuery[K, N, any](b.self, queryID, b.self.Key(), iter, tev.KnownClosestNodes, qryCfg) if err != nil { // TODO: don't panic panic(err) } b.qry = qry - return b.advanceQuery(ctx, nil) + return b.advanceQuery(ctx, &query.EventQueryPoll{}) case *EventBootstrapFindCloserResponse[K, N]: - return b.advanceQuery(ctx, &query.EventQueryFindCloserResponse[K, N]{ + return b.advanceQuery(ctx, &query.EventQueryNodeResponse[K, N]{ NodeID: tev.NodeID, CloserNodes: tev.CloserNodes, }) case *EventBootstrapFindCloserFailure[K, N]: span.RecordError(tev.Error) - return b.advanceQuery(ctx, &query.EventQueryFindCloserFailure[K, N]{ + return b.advanceQuery(ctx, &query.EventQueryNodeFailure[K, N]{ NodeID: tev.NodeID, Error: tev.Error, }) @@ -135,7 +135,7 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst } if b.qry != nil { - return b.advanceQuery(ctx, nil) + return b.advanceQuery(ctx, &query.EventQueryPoll{}) } return &StateBootstrapIdle{} @@ -154,7 +154,7 @@ func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent NodeID: st.NodeID, Target: st.Target, } - case *query.StateQueryFinished: + case *query.StateQueryFinished[K, N]: span.SetAttributes(attribute.String("out_state", "StateBootstrapFinished")) return &StateBootstrapFinished{ Stats: st.Stats, diff --git a/v2/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go similarity index 98% rename from v2/coord/routing/bootstrap_test.go rename to v2/internal/coord/routing/bootstrap_test.go index df1364df..70c8b6f0 100644 --- a/v2/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -8,8 +8,8 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" ) func TestBootstrapConfigValidate(t *testing.T) { diff --git a/v2/coord/routing/include.go b/v2/internal/coord/routing/include.go similarity index 100% rename from v2/coord/routing/include.go rename to v2/internal/coord/routing/include.go diff --git a/v2/coord/routing/include_test.go b/v2/internal/coord/routing/include_test.go similarity index 99% rename from v2/coord/routing/include_test.go rename to v2/internal/coord/routing/include_test.go index a788e8d5..a565521a 100644 --- a/v2/coord/routing/include_test.go +++ b/v2/internal/coord/routing/include_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/benbjohnson/clock" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing/simplert" "github.com/stretchr/testify/require" diff --git a/v2/coord/routing/probe.go b/v2/internal/coord/routing/probe.go similarity index 100% rename from v2/coord/routing/probe.go rename to v2/internal/coord/routing/probe.go diff --git a/v2/coord/routing/probe_test.go b/v2/internal/coord/routing/probe_test.go similarity index 99% rename from v2/coord/routing/probe_test.go rename to v2/internal/coord/routing/probe_test.go index e07d6445..e97ddce3 100644 --- a/v2/coord/routing/probe_test.go +++ b/v2/internal/coord/routing/probe_test.go @@ -11,7 +11,7 @@ import ( "github.com/plprobelab/go-kademlia/routing/simplert" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/tiny" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) var _ heap.Interface = (*nodeValuePendingList[tiny.Key, tiny.Node])(nil) diff --git a/v2/coord/routing_test.go b/v2/internal/coord/routing_test.go similarity index 97% rename from v2/coord/routing_test.go rename to v2/internal/coord/routing_test.go index 2b07d6d1..c789c9dc 100644 --- a/v2/coord/routing_test.go +++ b/v2/internal/coord/routing_test.go @@ -12,9 +12,9 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/query" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/routing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) diff --git a/v2/coord/telemetry.go b/v2/internal/coord/telemetry.go similarity index 100% rename from v2/coord/telemetry.go rename to v2/internal/coord/telemetry.go diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index f87057a8..2ad4bbef 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -1,7 +1,4 @@ // Package kadt contains the kademlia types for interacting with go-kademlia. -// It would be nicer to have these types in the top-level DHT package; however, -// we also need these types in, e.g., the pb package to let the -// [pb.Message] type conform to certain interfaces. package kadt import ( @@ -73,3 +70,15 @@ func (ai AddrInfo) Addresses() []ma.Multiaddr { copy(addrs, ai.Info.Addrs) return addrs } + +// RoutingTable is a mapping between [Key] and [PeerID] and provides methods to interact with the mapping +// and find PeerIDs close to a particular Key. +type RoutingTable interface { + kad.RoutingTable[Key, PeerID] + + // Cpl returns the longest common prefix length the supplied key shares with the table's key. + Cpl(kk Key) int + + // CplSize returns the number of nodes in the table whose longest common prefix with the table's key is of length cpl. + CplSize(cpl int) int +} diff --git a/v2/query_test.go b/v2/query_test.go index 3fa63336..86ea55c2 100644 --- a/v2/query_test.go +++ b/v2/query_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) diff --git a/v2/router.go b/v2/router.go index 14db2cd9..70bd69ca 100644 --- a/v2/router.go +++ b/v2/router.go @@ -13,12 +13,12 @@ import ( "github.com/libp2p/go-msgio/pbio" "google.golang.org/protobuf/proto" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) -type Router struct { +type router struct { host host.Host // ProtocolID represents the DHT [protocol] we can query with and respond to. // @@ -26,7 +26,7 @@ type Router struct { ProtocolID protocol.ID } -var _ coord.Router[kadt.Key, kadt.PeerID, *pb.Message] = (*Router)(nil) +var _ coord.Router[kadt.Key, kadt.PeerID, *pb.Message] = (*router)(nil) func FindKeyRequest(k kadt.Key) *pb.Message { marshalledKey, _ := k.MarshalBinary() @@ -36,7 +36,7 @@ func FindKeyRequest(k kadt.Key) *pb.Message { } } -func (r *Router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { +func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { // TODO: what to do with addresses in peer.AddrInfo? if len(r.host.Peerstore().Addrs(peer.ID(to))) == 0 { return nil, fmt.Errorf("no address for peer %s", to) @@ -79,7 +79,7 @@ func (r *Router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Messag return &protoResp, err } -func (r *Router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kadt.Key) ([]kadt.PeerID, error) { +func (r *router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kadt.Key) ([]kadt.PeerID, error) { resp, err := r.SendMessage(ctx, to, FindKeyRequest(target)) if err != nil { return nil, err @@ -88,7 +88,7 @@ func (r *Router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kad return resp.CloserNodes(), nil } -func (r *Router) addToPeerStore(ctx context.Context, ai peer.AddrInfo, ttl time.Duration) error { +func (r *router) addToPeerStore(ctx context.Context, ai peer.AddrInfo, ttl time.Duration) error { // Don't add addresses for self or our connected peers. We have better ones. if ai.ID == r.host.ID() || r.host.Network().Connectedness(ai.ID) == network.Connected { return nil diff --git a/v2/routing.go b/v2/routing.go index bfb76805..569e66b9 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -8,8 +8,9 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/network" @@ -40,25 +41,25 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { target := kadt.PeerID(id) - var foundNode coord.Node - fn := func(ctx context.Context, node coord.Node, stats coord.QueryStats) error { - if peer.ID(node.ID()) == id { - foundNode = node + var foundPeer peer.ID + fn := func(ctx context.Context, visited kadt.PeerID, msg *pb.Message, stats coord.QueryStats) error { + if peer.ID(visited) == id { + foundPeer = peer.ID(visited) return coord.ErrSkipRemaining } return nil } - _, err := d.kad.Query(ctx, target.Key(), fn) + _, _, err := d.kad.QueryClosest(ctx, target.Key(), fn, 20) if err != nil { return peer.AddrInfo{}, fmt.Errorf("failed to run query: %w", err) } - if foundNode == nil { + if foundPeer == "" { return peer.AddrInfo{}, fmt.Errorf("peer record not found") } - return d.host.Peerstore().PeerInfo(peer.ID(foundNode.ID())), nil + return d.host.Peerstore().PeerInfo(foundPeer), nil } func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { @@ -151,14 +152,44 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option defer span.End() v, err := d.getValueLocal(ctx, key) - if err != nil { + if err == nil { return v, nil } if !errors.Is(err, ds.ErrNotFound) { return nil, fmt.Errorf("put value locally: %w", err) } - panic("implement me") + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: []byte(key), + } + + // TODO: quorum + var value []byte + fn := func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats coord.QueryStats) error { + if resp == nil { + return nil + } + + if resp.GetType() != pb.Message_GET_VALUE { + return nil + } + + if string(resp.GetKey()) != key { + return nil + } + + value = resp.GetRecord().GetValue() + + return coord.ErrSkipRemaining + } + + _, err = d.kad.QueryMessage(ctx, req, fn, d.cfg.BucketSize) + if err != nil { + return nil, fmt.Errorf("failed to run query: %w", err) + } + + return value, nil } // getValueLocal retrieves a value from the local datastore without querying the network. diff --git a/v2/routing_test.go b/v2/routing_test.go index 5204ae48..ec80da31 100644 --- a/v2/routing_test.go +++ b/v2/routing_test.go @@ -44,8 +44,6 @@ func TestGetSetValueLocal(t *testing.T) { } func TestGetValueOnePeer(t *testing.T) { - t.Skip("not implemented yet") - ctx := kadtest.CtxShort(t) top := NewTopology(t) local := top.AddServer(nil) diff --git a/v2/topology_test.go b/v2/topology_test.go index 1af5353f..189b494e 100644 --- a/v2/topology_test.go +++ b/v2/topology_test.go @@ -7,7 +7,7 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -55,12 +55,12 @@ func (t *Topology) AddServer(cfg *Config) *DHT { } cfg.Mode = ModeOptServer - rn := coord.NewBufferedRoutingNotifier() - cfg.Kademlia.RoutingNotifier = rn - d, err := New(h, cfg) require.NoError(t.tb, err) + rn := coord.NewBufferedRoutingNotifier() + d.kad.SetRoutingNotifier(rn) + // add at least 1 entry in the routing table so the server will pass connectivity checks fillRoutingTable(t.tb, d, 1) require.NotEmpty(t.tb, d.rt.NearestNodes(kadt.PeerID(d.host.ID()).Key(), 1)) @@ -97,12 +97,12 @@ func (t *Topology) AddClient(cfg *Config) *DHT { } cfg.Mode = ModeOptClient - rn := coord.NewBufferedRoutingNotifier() - cfg.Kademlia.RoutingNotifier = rn - d, err := New(h, cfg) require.NoError(t.tb, err) + rn := coord.NewBufferedRoutingNotifier() + d.kad.SetRoutingNotifier(rn) + t.tb.Cleanup(func() { if err = d.Close(); err != nil { t.tb.Logf("unexpected error when closing dht: %s", err) From 74ffa67a668746a6ffdeb1b850fb5e19b12527f7 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Fri, 22 Sep 2023 13:27:29 +0200 Subject: [PATCH 52/64] Add broadcast state machine for storing records in the DHT (#930) Co-authored-by: Ian Davis <18375+iand@users.noreply.github.com> --- v2/dht.go | 9 - v2/dht_test.go | 4 +- v2/go.mod | 4 +- v2/handlers.go | 4 +- v2/internal/coord/behaviour.go | 4 - v2/internal/coord/brdcst.go | 230 +++++++++++++ v2/internal/coord/brdcst/brdcst.go | 143 ++++++++ v2/internal/coord/brdcst/brdcst_test.go | 35 ++ v2/internal/coord/brdcst/config.go | 74 +++++ v2/internal/coord/brdcst/config_test.go | 44 +++ v2/internal/coord/brdcst/doc.go | 5 + v2/internal/coord/brdcst/followup.go | 252 ++++++++++++++ v2/internal/coord/brdcst/pool.go | 347 ++++++++++++++++++++ v2/internal/coord/brdcst/pool_test.go | 301 +++++++++++++++++ v2/internal/coord/brdcst_events.go | 34 ++ v2/internal/coord/coordinator.go | 131 ++++++-- v2/internal/coord/coordinator_test.go | 8 +- v2/internal/coord/{ => coordt}/coretypes.go | 11 +- v2/internal/coord/event.go | 31 +- v2/internal/coord/network.go | 37 ++- v2/internal/coord/query.go | 35 +- v2/internal/coord/query/pool.go | 67 ++-- v2/internal/coord/query/pool_test.go | 67 ++-- v2/internal/coord/query/query.go | 25 +- v2/internal/coord/query/query_test.go | 45 +-- v2/internal/coord/routing.go | 16 +- v2/internal/coord/routing/bootstrap.go | 5 +- v2/internal/coord/routing/bootstrap_test.go | 8 +- v2/internal/coord/routing/include_test.go | 3 +- v2/internal/coord/routing_test.go | 13 +- v2/kadt/kadt.go | 57 +++- v2/pb/msg.aux.go | 29 +- v2/pb/msg.aux_test.go | 23 ++ v2/query_test.go | 6 +- v2/router.go | 23 +- v2/routing.go | 77 ++++- v2/routing_test.go | 36 +- 37 files changed, 1992 insertions(+), 251 deletions(-) create mode 100644 v2/internal/coord/brdcst.go create mode 100644 v2/internal/coord/brdcst/brdcst.go create mode 100644 v2/internal/coord/brdcst/brdcst_test.go create mode 100644 v2/internal/coord/brdcst/config.go create mode 100644 v2/internal/coord/brdcst/config_test.go create mode 100644 v2/internal/coord/brdcst/doc.go create mode 100644 v2/internal/coord/brdcst/followup.go create mode 100644 v2/internal/coord/brdcst/pool.go create mode 100644 v2/internal/coord/brdcst/pool_test.go create mode 100644 v2/internal/coord/brdcst_events.go rename v2/internal/coord/{ => coordt}/coretypes.go (92%) diff --git a/v2/dht.go b/v2/dht.go index 0afeb408..1dbcfecc 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -2,7 +2,6 @@ package dht import ( "context" - "crypto/sha256" "fmt" "io" "sync" @@ -13,7 +12,6 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" @@ -339,13 +337,6 @@ func (d *DHT) AddAddresses(ctx context.Context, ais []peer.AddrInfo, ttl time.Du return d.kad.AddNodes(ctx, ids) } -// newSHA256Key returns a [kadt.KadKey] that conforms to the [kad.Key] interface by -// SHA256 hashing the given bytes and wrapping them in a [kadt.KadKey]. -func newSHA256Key(data []byte) kadt.Key { - h := sha256.Sum256(data) - return key.NewKey256(h[:]) -} - // typedBackend returns the backend at the given namespace. It is casted to the // provided type. If the namespace doesn't exist or the type cast failed, this // function returns an error. Can't be a method on [DHT] because of the generic diff --git a/v2/dht_test.go b/v2/dht_test.go index 44d68bc4..0ee635df 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -82,7 +82,7 @@ func TestAddAddresses(t *testing.T) { // local routing table should not contain the node _, err := local.kad.GetNode(ctx, kadt.PeerID(remote.host.ID())) - require.ErrorIs(t, err, coord.ErrNodeNotFound) + require.ErrorIs(t, err, coordt.ErrNodeNotFound) remoteAddrInfo := peer.AddrInfo{ ID: remote.host.ID(), diff --git a/v2/go.mod b/v2/go.mod index cd8a0748..5eb07d61 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -15,6 +15,8 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 github.com/prometheus/client_golang v1.16.0 // indirect github.com/stretchr/testify v1.8.4 @@ -84,14 +86,12 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/onsi/ginkgo/v2 v2.11.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect diff --git a/v2/handlers.go b/v2/handlers.go index 5b8536f3..74e55a2b 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -110,7 +110,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag resp := &pb.Message{ Type: pb.Message_GET_VALUE, Key: req.GetKey(), - CloserPeers: d.closerPeers(ctx, remote, newSHA256Key(req.GetKey())), + CloserPeers: d.closerPeers(ctx, remote, kadt.NewKey(req.GetKey())), } ns, path, err := record.SplitKey(k) // get namespace (prefix of the key) @@ -226,7 +226,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me resp := &pb.Message{ Type: pb.Message_GET_PROVIDERS, Key: k, - CloserPeers: d.closerPeers(ctx, remote, newSHA256Key(k)), + CloserPeers: d.closerPeers(ctx, remote, kadt.NewKey(k)), ProviderPeers: pbProviders, } diff --git a/v2/internal/coord/behaviour.go b/v2/internal/coord/behaviour.go index aa69917f..74609944 100644 --- a/v2/internal/coord/behaviour.go +++ b/v2/internal/coord/behaviour.go @@ -37,10 +37,6 @@ type Behaviour[I BehaviourEvent, O BehaviourEvent] interface { Perform(ctx context.Context) (O, bool) } -type SM[E any, S any] interface { - Advance(context.Context, E) S -} - type WorkQueueFunc[E BehaviourEvent] func(context.Context, E) bool // WorkQueue is buffered queue of work to be performed. diff --git a/v2/internal/coord/brdcst.go b/v2/internal/coord/brdcst.go new file mode 100644 index 00000000..0c4dadfc --- /dev/null +++ b/v2/internal/coord/brdcst.go @@ -0,0 +1,230 @@ +package coord + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/brdcst" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +type PooledBroadcastBehaviour struct { + pool coordt.StateMachine[brdcst.PoolEvent, brdcst.PoolState] + waiters map[coordt.QueryID]NotifyCloser[BehaviourEvent] + + pendingMu sync.Mutex + pending []BehaviourEvent + ready chan struct{} + + logger *slog.Logger + tracer trace.Tracer +} + +var _ Behaviour[BehaviourEvent, BehaviourEvent] = (*PooledBroadcastBehaviour)(nil) + +func NewPooledBroadcastBehaviour(brdcstPool *brdcst.Pool[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *PooledBroadcastBehaviour { + b := &PooledBroadcastBehaviour{ + pool: brdcstPool, + waiters: make(map[coordt.QueryID]NotifyCloser[BehaviourEvent]), + ready: make(chan struct{}, 1), + logger: logger.With("behaviour", "pooledBroadcast"), + tracer: tracer, + } + return b +} + +func (b *PooledBroadcastBehaviour) Ready() <-chan struct{} { + return b.ready +} + +func (b *PooledBroadcastBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := b.tracer.Start(ctx, "PooledBroadcastBehaviour.Notify") + defer span.End() + + b.pendingMu.Lock() + defer b.pendingMu.Unlock() + + var cmd brdcst.PoolEvent + switch ev := ev.(type) { + case *EventStartBroadcast: + cmd = &brdcst.EventPoolStartBroadcast[kadt.Key, kadt.PeerID, *pb.Message]{ + QueryID: ev.QueryID, + Target: ev.Target, + Message: ev.Message, + Seed: ev.Seed, + Config: ev.Config, + } + if ev.Notify != nil { + b.waiters[ev.QueryID] = ev.Notify + } + + case *EventGetCloserNodesSuccess: + for _, info := range ev.CloserNodes { + b.pending = append(b.pending, &EventAddNode{ + NodeID: info, + }) + } + + waiter, ok := b.waiters[ev.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryProgressed{ + NodeID: ev.To, + QueryID: ev.QueryID, + }) + } + + cmd = &brdcst.EventPoolGetCloserNodesSuccess[kadt.Key, kadt.PeerID]{ + NodeID: ev.To, + QueryID: ev.QueryID, + Target: ev.Target, + CloserNodes: ev.CloserNodes, + } + + case *EventGetCloserNodesFailure: + // queue an event that will notify the routing behaviour of a failed node + b.pending = append(b.pending, &EventNotifyNonConnectivity{ + ev.To, + }) + + cmd = &brdcst.EventPoolGetCloserNodesFailure[kadt.Key, kadt.PeerID]{ + NodeID: ev.To, + QueryID: ev.QueryID, + Target: ev.Target, + Error: ev.Err, + } + + case *EventSendMessageSuccess: + for _, info := range ev.CloserNodes { + b.pending = append(b.pending, &EventAddNode{ + NodeID: info, + }) + } + waiter, ok := b.waiters[ev.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryProgressed{ + NodeID: ev.To, + QueryID: ev.QueryID, + Response: ev.Response, + }) + } + // TODO: How do we know it's a StoreRecord response? + cmd = &brdcst.EventPoolStoreRecordSuccess[kadt.Key, kadt.PeerID, *pb.Message]{ + QueryID: ev.QueryID, + NodeID: ev.To, + Request: ev.Request, + Response: ev.Response, + } + + case *EventSendMessageFailure: + // queue an event that will notify the routing behaviour of a failed node + b.pending = append(b.pending, &EventNotifyNonConnectivity{ + ev.To, + }) + + // TODO: How do we know it's a StoreRecord response? + cmd = &brdcst.EventPoolStoreRecordFailure[kadt.Key, kadt.PeerID, *pb.Message]{ + NodeID: ev.To, + QueryID: ev.QueryID, + Request: ev.Request, + Error: ev.Err, + } + + case *EventStopQuery: + cmd = &brdcst.EventPoolStopBroadcast{ + QueryID: ev.QueryID, + } + } + + // attempt to advance the broadcast pool + ev, ok := b.advancePool(ctx, cmd) + if ok { + b.pending = append(b.pending, ev) + } + if len(b.pending) > 0 { + select { + case b.ready <- struct{}{}: + default: + } + } +} + +func (b *PooledBroadcastBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { + ctx, span := b.tracer.Start(ctx, "PooledBroadcastBehaviour.Perform") + defer span.End() + + // No inbound work can be done until Perform is complete + b.pendingMu.Lock() + defer b.pendingMu.Unlock() + + for { + // drain queued events first. + if len(b.pending) > 0 { + var ev BehaviourEvent + ev, b.pending = b.pending[0], b.pending[1:] + + if len(b.pending) > 0 { + select { + case b.ready <- struct{}{}: + default: + } + } + return ev, true + } + + ev, ok := b.advancePool(ctx, &brdcst.EventPoolPoll{}) + if ok { + return ev, true + } + + // finally check if any pending events were accumulated in the meantime + if len(b.pending) == 0 { + return nil, false + } + } +} + +func (b *PooledBroadcastBehaviour) advancePool(ctx context.Context, ev brdcst.PoolEvent) (out BehaviourEvent, term bool) { + ctx, span := b.tracer.Start(ctx, "PooledBroadcastBehaviour.advancePool", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + + pstate := b.pool.Advance(ctx, ev) + switch st := pstate.(type) { + case *brdcst.StatePoolIdle: + // nothing to do + case *brdcst.StatePoolFindCloser[kadt.Key, kadt.PeerID]: + return &EventOutboundGetCloserNodes{ + QueryID: st.QueryID, + To: st.NodeID, + Target: st.Target, + Notify: b, + }, true + case *brdcst.StatePoolStoreRecord[kadt.Key, kadt.PeerID, *pb.Message]: + return &EventOutboundSendMessage{ + QueryID: st.QueryID, + To: st.NodeID, + Message: st.Message, + Notify: b, + }, true + case *brdcst.StatePoolBroadcastFinished[kadt.Key, kadt.PeerID]: + waiter, ok := b.waiters[st.QueryID] + if ok { + waiter.Notify(ctx, &EventBroadcastFinished{ + QueryID: st.QueryID, + Contacted: st.Contacted, + Errors: st.Errors, + }) + waiter.Close() + } + } + + return nil, false +} diff --git a/v2/internal/coord/brdcst/brdcst.go b/v2/internal/coord/brdcst/brdcst.go new file mode 100644 index 00000000..5d16b973 --- /dev/null +++ b/v2/internal/coord/brdcst/brdcst.go @@ -0,0 +1,143 @@ +package brdcst + +import ( + "github.com/plprobelab/go-kademlia/kad" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" +) + +// BroadcastState must be implemented by all states that a [Broadcast] state +// machine can reach. There are multiple different broadcast state machines that +// all have in common to "emit" a [BroadcastState] and accept a +// [BroadcastEvent]. Recall, states are basically the "events" that a state +// machine emits which other state machines or behaviours could react upon. +type BroadcastState interface { + broadcastState() +} + +// StateBroadcastFindCloser indicates to the broadcast [Pool] or any other upper +// layer that a [Broadcast] state machine wants to query the given node (NodeID) +// for closer nodes to the target key (Target). +type StateBroadcastFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID coordt.QueryID // the id of the broadcast operation that wants to send the message + NodeID N // the node to send the message to + Target K // the key that the query wants to find closer nodes for +} + +// StateBroadcastStoreRecord indicates to the broadcast [Pool] or any other +// upper layer that a [Broadcast] state machine wants to store a record using +// the given Message with the given NodeID. +type StateBroadcastStoreRecord[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID // the id of the broadcast operation that wants to send the message + NodeID N // the node to send the message to + Message M // the message the broadcast behaviour wants to send +} + +// StateBroadcastWaiting indicates that a [Broadcast] state machine is waiting +// for network I/O to finish. It means the state machine isn't idle, but that +// there are operations in-flight that it is waiting on to finish. +type StateBroadcastWaiting struct { + QueryID coordt.QueryID // the id of the broadcast operation that is waiting +} + +// StateBroadcastFinished indicates that a [Broadcast] state machine has +// finished its operation. During that operation, all nodes in Contacted have +// been contacted to store the record. The Contacted slice does not contain +// the nodes we have queried to find the closest nodes to the target key - only +// the ones that we eventually contacted to store the record. The Errors map +// maps the string representation of any node N in the Contacted slice to a +// potential error struct that contains the original Node and error. In the best +// case, this Errors map is empty. +type StateBroadcastFinished[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID coordt.QueryID // the id of the broadcast operation that has finished + Contacted []N // all nodes we contacted to store the record (successful or not) + Errors map[string]struct { // any error that occurred for any node that we contacted + Node N // a node from the Contacted slice + Err error // the error that happened when contacting that Node + } +} + +// StateBroadcastIdle means that a [Broadcast] state machine has finished all of +// its operation. This state will be emitted if the state machine is polled to +// advance its state but has already finished its operation. The last meaningful +// state will be [StateBroadcastFinished]. Being idle is different from waiting +// for network I/O to finish (see [StateBroadcastWaiting]). +type StateBroadcastIdle struct{} + +func (*StateBroadcastFindCloser[K, N]) broadcastState() {} +func (*StateBroadcastStoreRecord[K, N, M]) broadcastState() {} +func (*StateBroadcastWaiting) broadcastState() {} +func (*StateBroadcastFinished[K, N]) broadcastState() {} +func (*StateBroadcastIdle) broadcastState() {} + +// BroadcastEvent is an event intended to advance the state of a [Broadcast] +// state machine. [Broadcast] state machines only operate on events that +// implement this interface. An "Event" is the opposite of a "State." An "Event" +// flows into the state machine and a "State" flows out of it. +// +// Currently, there are the [FollowUp] and [Optimistic] state machines. +type BroadcastEvent interface { + broadcastEvent() +} + +// EventBroadcastPoll is an event that signals a [Broadcast] state machine that +// it can perform housekeeping work such as time out queries. +type EventBroadcastPoll struct{} + +// EventBroadcastStart is an event that instructs a broadcast state machine to +// start the operation. +type EventBroadcastStart[K kad.Key[K], N kad.NodeID[K]] struct { + Target K // the key we want to store the record for + Seed []N // the closest nodes we know so far and from where we start the operation +} + +// EventBroadcastStop notifies a [Broadcast] state machine to stop the +// operation. This comprises all in-flight queries. +type EventBroadcastStop struct{} + +// EventBroadcastNodeResponse notifies a [Broadcast] state machine that a remote +// node (NodeID) has successfully responded with closer nodes (CloserNodes) to +// the Target key that's stored on the [Broadcast] state machine +type EventBroadcastNodeResponse[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to and that replied + CloserNodes []N // the closer nodes sent by the node +} + +// EventBroadcastNodeFailure notifies a [Broadcast] state machine that a remote +// node (NodeID) has failed responding with closer nodes to the target key. +type EventBroadcastNodeFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to and that has replied + Error error // the error that caused the failure, if any +} + +// EventBroadcastStoreRecordSuccess notifies a broadcast [Broadcast] state +// machine that storing a record with a remote node (NodeID) was successful. The +// message that was sent is held in Request, and the returned value is contained +// in Response. However, in the case of the Amino DHT, nodes do not respond with +// a confirmation, so Response will always be nil. Check out +// [pb.Message.ExpectResponse] for information about which requests should +// receive a response. +type EventBroadcastStoreRecordSuccess[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + NodeID N // the node the message was sent to + Request M // the message that was sent to the remote node + Response M // the reply we got from the remote node (nil in many cases of the Amino DHT) +} + +// EventBroadcastStoreRecordFailure notifies a broadcast [Broadcast] state +// machine that storing a record with a remote node (NodeID) has failed. The +// message that was sent is held in Request, and the error will be in Error. +type EventBroadcastStoreRecordFailure[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + NodeID N // the node the message was sent to + Request M // the message that was sent to the remote node + Error error // the error that caused the failure, if any +} + +// broadcastEvent() ensures that only events accepted by a [Broadcast] state +// machine can be assigned to the [BroadcastEvent] interface. +func (*EventBroadcastStop) broadcastEvent() {} +func (*EventBroadcastPoll) broadcastEvent() {} +func (*EventBroadcastStart[K, N]) broadcastEvent() {} +func (*EventBroadcastNodeResponse[K, N]) broadcastEvent() {} +func (*EventBroadcastNodeFailure[K, N]) broadcastEvent() {} +func (*EventBroadcastStoreRecordSuccess[K, N, M]) broadcastEvent() {} +func (*EventBroadcastStoreRecordFailure[K, N, M]) broadcastEvent() {} diff --git a/v2/internal/coord/brdcst/brdcst_test.go b/v2/internal/coord/brdcst/brdcst_test.go new file mode 100644 index 00000000..baf1d4bc --- /dev/null +++ b/v2/internal/coord/brdcst/brdcst_test.go @@ -0,0 +1,35 @@ +package brdcst + +import ( + "testing" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" +) + +func TestBroadcastState_interface_conformance(t *testing.T) { + states := []BroadcastState{ + &StateBroadcastIdle{}, + &StateBroadcastWaiting{}, + &StateBroadcastStoreRecord[tiny.Key, tiny.Node, tiny.Message]{}, + &StateBroadcastFindCloser[tiny.Key, tiny.Node]{}, + &StateBroadcastFinished[tiny.Key, tiny.Node]{}, + } + for _, st := range states { + st.broadcastState() // drives test coverage + } +} + +func TestBroadcastEvent_interface_conformance(t *testing.T) { + events := []BroadcastEvent{ + &EventBroadcastStop{}, + &EventBroadcastPoll{}, + &EventBroadcastStart[tiny.Key, tiny.Node]{}, + &EventBroadcastNodeResponse[tiny.Key, tiny.Node]{}, + &EventBroadcastNodeFailure[tiny.Key, tiny.Node]{}, + &EventBroadcastStoreRecordSuccess[tiny.Key, tiny.Node, tiny.Message]{}, + &EventBroadcastStoreRecordFailure[tiny.Key, tiny.Node, tiny.Message]{}, + } + for _, ev := range events { + ev.broadcastEvent() // drives test coverage + } +} diff --git a/v2/internal/coord/brdcst/config.go b/v2/internal/coord/brdcst/config.go new file mode 100644 index 00000000..4d6d425b --- /dev/null +++ b/v2/internal/coord/brdcst/config.go @@ -0,0 +1,74 @@ +package brdcst + +import ( + "fmt" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" +) + +// ConfigPool specifies the configuration for a broadcast [Pool]. +type ConfigPool struct { + pCfg *query.PoolConfig +} + +// Validate checks the configuration options and returns an error if any have +// invalid values. +func (cfg *ConfigPool) Validate() error { + if cfg.pCfg == nil { + return fmt.Errorf("query pool config must not be nil") + } + + return nil +} + +// DefaultConfigPool returns the default configuration options for a Pool. +// Options may be overridden before passing to NewPool +func DefaultConfigPool() *ConfigPool { + return &ConfigPool{ + pCfg: query.DefaultPoolConfig(), + } +} + +// Config is an interface that all broadcast configurations must implement. +// Because we have multiple ways of broadcasting records to the network, like +// [FollowUp] or [Optimistic], the [EventPoolStartBroadcast] has a configuration +// field that depending on the concrete type of [Config] initializes the +// respective state machine. Then the broadcast operation will performed based +// on the encoded rules in that state machine. +type Config interface { + broadcastConfig() +} + +func (c *ConfigFollowUp) broadcastConfig() {} +func (c *ConfigOptimistic) broadcastConfig() {} + +// ConfigFollowUp specifies the configuration for the [FollowUp] state machine. +type ConfigFollowUp struct{} + +// Validate checks the configuration options and returns an error if any have +// invalid values. +func (c *ConfigFollowUp) Validate() error { + return nil +} + +// DefaultConfigFollowUp returns the default configuration options for the +// [FollowUp] state machine. +func DefaultConfigFollowUp() *ConfigFollowUp { + return &ConfigFollowUp{} +} + +// ConfigOptimistic specifies the configuration for the [Optimistic] state +// machine. +type ConfigOptimistic struct{} + +// Validate checks the configuration options and returns an error if any have +// invalid values. +func (c *ConfigOptimistic) Validate() error { + return nil +} + +// DefaultConfigOptimistic returns the default configuration options for the +// [Optimistic] state machine. +func DefaultConfigOptimistic() *ConfigOptimistic { + return &ConfigOptimistic{} +} diff --git a/v2/internal/coord/brdcst/config_test.go b/v2/internal/coord/brdcst/config_test.go new file mode 100644 index 00000000..68447a1f --- /dev/null +++ b/v2/internal/coord/brdcst/config_test.go @@ -0,0 +1,44 @@ +package brdcst + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfigPool_Validate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultConfigPool() + assert.NoError(t, cfg.Validate()) + }) + + t.Run("nil pool config", func(t *testing.T) { + cfg := DefaultConfigPool() + cfg.pCfg = nil + assert.Error(t, cfg.Validate()) + }) +} + +func TestConfigFollowUp_Validate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultConfigFollowUp() + assert.NoError(t, cfg.Validate()) + }) +} + +func TestConfigOptimistic_Validate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultConfigOptimistic() + assert.NoError(t, cfg.Validate()) + }) +} + +func TestConfig_interface_conformance(t *testing.T) { + configs := []Config{ + &ConfigFollowUp{}, + &ConfigOptimistic{}, + } + for _, c := range configs { + c.broadcastConfig() // drives test coverage + } +} diff --git a/v2/internal/coord/brdcst/doc.go b/v2/internal/coord/brdcst/doc.go new file mode 100644 index 00000000..847849a0 --- /dev/null +++ b/v2/internal/coord/brdcst/doc.go @@ -0,0 +1,5 @@ +/* +Package brdcst contains state machines that implement algorithms for +broadcasting records into the DHT network. +*/ +package brdcst diff --git a/v2/internal/coord/brdcst/followup.go b/v2/internal/coord/brdcst/followup.go new file mode 100644 index 00000000..27d14d30 --- /dev/null +++ b/v2/internal/coord/brdcst/followup.go @@ -0,0 +1,252 @@ +package brdcst + +import ( + "context" + "fmt" + + "github.com/plprobelab/go-kademlia/kad" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// FollowUp is a [Broadcast] state machine and encapsulates the logic around +// doing a "classic" put operation. This mimics the algorithm employed in the +// original go-libp2p-kad-dht v1 code base. It first queries the closest nodes +// to a certain target key, and after they were discovered, it "follows up" with +// storing the record with these closest nodes. +type FollowUp[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + // the unique ID for this broadcast operation + queryID coordt.QueryID + + // a struct holding configuration options + cfg *ConfigFollowUp + + // a reference to the query pool in which the "get closer nodes" queries + // will be spawned. This pool is governed by the broadcast [Pool]. + // Unfortunately, having a reference here breaks the hierarchy but it makes + // the logic much easier to implement. + pool *query.Pool[K, N, M] + + // the message that we will send to the closest nodes in the follow-up phase + msg M + + // the closest nodes to the target key. This will be filled after the query + // for the closest nodes has finished (when the query pool emits a + // [query.StatePoolQueryFinished] event). + closest []N + + // nodes we still need to store records with. This map will be filled with + // all the closest nodes after the query has finished. + todo map[string]N + + // nodes we have contacted to store the record but haven't heard a response yet + waiting map[string]N + + // nodes that successfully hold the record for us + success map[string]N + + // nodes that failed to hold the record for us + failed map[string]struct { + Node N + Err error + } +} + +// NewFollowUp initializes a new [FollowUp] struct. +func NewFollowUp[K kad.Key[K], N kad.NodeID[K], M coordt.Message](qid coordt.QueryID, pool *query.Pool[K, N, M], msg M, cfg *ConfigFollowUp) *FollowUp[K, N, M] { + return &FollowUp[K, N, M]{ + queryID: qid, + cfg: cfg, + pool: pool, + msg: msg, + todo: map[string]N{}, + waiting: map[string]N{}, + success: map[string]N{}, + failed: map[string]struct { + Node N + Err error + }{}, + } +} + +// Advance advances the state of the [FollowUp] [Broadcast] state machine. It +// first handles the event by mapping it to a potential event for the query +// pool. If the [BroadcastEvent] maps to a [query.PoolEvent], it gets forwarded +// to the query pool and handled in [FollowUp.advancePool]. If it doesn't map to +// a query pool event, we check if there are any nodes we should contact to hold +// the record for us and emit that instruction instead. Similarly, if we're +// waiting on responses or are completely finished, we return that as well. +func (f *FollowUp[K, N, M]) Advance(ctx context.Context, ev BroadcastEvent) (out BroadcastState) { + ctx, span := tele.StartSpan(ctx, "FollowUp.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + + pev := f.handleEvent(ctx, ev) + if pev != nil { + if state, terminal := f.advancePool(ctx, pev); terminal { + return state + } + } + + _, isStopEvent := ev.(*EventBroadcastStop) + if isStopEvent { + for _, n := range f.todo { + delete(f.todo, n.String()) + f.failed[n.String()] = struct { + Node N + Err error + }{Node: n, Err: fmt.Errorf("cancelled")} + } + + for _, n := range f.waiting { + delete(f.waiting, n.String()) + f.failed[n.String()] = struct { + Node N + Err error + }{Node: n, Err: fmt.Errorf("cancelled")} + } + } + + for k, n := range f.todo { + delete(f.todo, k) + f.waiting[k] = n + return &StateBroadcastStoreRecord[K, N, M]{ + QueryID: f.queryID, + NodeID: n, + Message: f.msg, + } + } + + if len(f.waiting) > 0 { + return &StateBroadcastWaiting{} + } + + if isStopEvent || (len(f.todo) == 0 && len(f.closest) != 0) { + return &StateBroadcastFinished[K, N]{ + QueryID: f.queryID, + Contacted: f.closest, + Errors: f.failed, + } + } + + return &StateBroadcastIdle{} +} + +// handleEvent receives a [BroadcastEvent] and returns the corresponding query +// pool event ([query.PoolEvent]). Some [BroadcastEvent] events don't map to +// a query pool event, in which case this method handles that event and returns +// nil. +func (f *FollowUp[K, N, M]) handleEvent(ctx context.Context, ev BroadcastEvent) (out query.PoolEvent) { + _, span := tele.StartSpan(ctx, "FollowUp.handleEvent", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + + switch ev := ev.(type) { + case *EventBroadcastStart[K, N]: + return &query.EventPoolAddFindCloserQuery[K, N]{ + QueryID: f.queryID, + Target: ev.Target, + Seed: ev.Seed, + } + case *EventBroadcastStop: + if f.isQueryDone() { + return nil + } + + return &query.EventPoolStopQuery{ + QueryID: f.queryID, + } + case *EventBroadcastNodeResponse[K, N]: + return &query.EventPoolNodeResponse[K, N]{ + QueryID: f.queryID, + NodeID: ev.NodeID, + CloserNodes: ev.CloserNodes, + } + case *EventBroadcastNodeFailure[K, N]: + return &query.EventPoolNodeFailure[K, N]{ + QueryID: f.queryID, + NodeID: ev.NodeID, + Error: ev.Error, + } + case *EventBroadcastStoreRecordSuccess[K, N, M]: + delete(f.waiting, ev.NodeID.String()) + f.success[ev.NodeID.String()] = ev.NodeID + case *EventBroadcastStoreRecordFailure[K, N, M]: + delete(f.waiting, ev.NodeID.String()) + f.failed[ev.NodeID.String()] = struct { + Node N + Err error + }{Node: ev.NodeID, Err: ev.Error} + case *EventBroadcastPoll: + // ignore, nothing to do + return &query.EventPoolPoll{} + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } + + return nil +} + +// advancePool advances the query pool with the given query pool event that was +// returned by [FollowUp.handleEvent]. The additional boolean value indicates +// whether the returned [BroadcastState] should be ignored. +func (f *FollowUp[K, N, M]) advancePool(ctx context.Context, ev query.PoolEvent) (out BroadcastState, term bool) { + ctx, span := tele.StartSpan(ctx, "FollowUp.advanceQuery", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + + state := f.pool.Advance(ctx, ev) + switch st := state.(type) { + case *query.StatePoolFindCloser[K, N]: + return &StateBroadcastFindCloser[K, N]{ + QueryID: st.QueryID, + NodeID: st.NodeID, + Target: st.Target, + }, true + case *query.StatePoolWaitingAtCapacity: + return &StateBroadcastWaiting{ + QueryID: f.queryID, + }, true + case *query.StatePoolWaitingWithCapacity: + return &StateBroadcastWaiting{ + QueryID: f.queryID, + }, true + case *query.StatePoolQueryFinished[K, N]: + f.closest = st.ClosestNodes + + for _, n := range st.ClosestNodes { + f.todo[n.String()] = n + } + + case *query.StatePoolQueryTimeout: + return &StateBroadcastFinished[K, N]{ + QueryID: f.queryID, + Contacted: make([]N, 0), + Errors: map[string]struct { + Node N + Err error + }{}, + }, true + case *query.StatePoolIdle: + // nothing to do + default: + panic(fmt.Sprintf("unexpected pool state: %T", st)) + } + + return nil, false +} + +// isQueryDone returns true if the DHT walk/ query phase has finished. +// This is indicated by the fact that the [FollowUp.closest] slice is filled. +func (f *FollowUp[K, N, M]) isQueryDone() bool { + return len(f.closest) != 0 +} diff --git a/v2/internal/coord/brdcst/pool.go b/v2/internal/coord/brdcst/pool.go new file mode 100644 index 00000000..bba83dad --- /dev/null +++ b/v2/internal/coord/brdcst/pool.go @@ -0,0 +1,347 @@ +package brdcst + +import ( + "context" + "fmt" + + "github.com/plprobelab/go-kademlia/kad" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// Broadcast is a type alias for a specific kind of state machine that any +// kind of broadcast strategy state machine must implement. Currently, there +// are the [FollowUp] and [Optimistic] state machines. +type Broadcast = coordt.StateMachine[BroadcastEvent, BroadcastState] + +// Pool is a [coordt.StateMachine] that manages all running broadcast +// operations. In the future it could limit the number of concurrent operations, +// but right now it is just keeping track of all running broadcasts. The +// referenced [query.Pool] is passed down to the respective broadcast state +// machines. This is not nice because it breaks the hierarchy but makes things +// way easier. +// +// Conceptually, a broadcast consists of finding the closest nodes to a certain +// key and then storing the record with them. There are a few different +// strategies that can be applied. For now, these are the [FollowUp] and the [Optimistic] +// strategies. In the future, we also want to support [Reprovide Sweep]. +// However, this requires a different type of query as we are not looking for +// the closest nodes but rather enumerating the keyspace. In any case, this +// broadcast [Pool] would keep track of all running broadcasts. +// +// [Reprovide Sweep]: https://www.notion.so/pl-strflt/DHT-Reprovide-Sweep-3108adf04e9d4086bafb727b17ae033d?pvs=4 +type Pool[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + qp *query.Pool[K, N, M] // the query pool of "get closer peers" queries + bcs map[coordt.QueryID]Broadcast // all currently running broadcast operations + cfg ConfigPool // cfg is a copy of the optional configuration supplied to the Pool +} + +// NewPool initializes a new broadcast pool. If cfg is nil, the +// [DefaultConfigPool] will be used. Each broadcast pool creates its own query +// pool ([query.Pool]). A query pool limits the number of concurrent queries +// and already exists "stand-alone" beneath the [coord.PooledQueryBehaviour]. +// We are initializing a new one in here because: +// 1. it allows us to apply different limits to either broadcast or ordinary +// "get closer nodes" queries +// 2. the query pool logic will stay simpler +// 3. we don't need to cross communicated from the broadcast to the query pool +// 4. +func NewPool[K kad.Key[K], N kad.NodeID[K], M coordt.Message](self N, cfg *ConfigPool) (*Pool[K, N, M], error) { + if cfg == nil { + cfg = DefaultConfigPool() + } else if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("validate pool config: %w", err) + } + + qp, err := query.NewPool[K, N, M](self, cfg.pCfg) + if err != nil { + return nil, fmt.Errorf("new query pool: %w", err) + } + + return &Pool[K, N, M]{ + qp: qp, + bcs: map[coordt.QueryID]Broadcast{}, + cfg: *cfg, + }, nil +} + +// Advance advances the state of the broadcast [Pool]. It first handles the +// event by extracting the broadcast state machine that should handle this event +// from the [Pool.bcs] map and constructing the correct event for that broadcast +// state machine. If either the state machine wasn't found (shouldn't happen) or +// there's no corresponding broadcast event ([EventPoolPoll] for example) don't +// do anything and instead try to advance the other broadcast state machines. +func (p *Pool[K, N, M]) Advance(ctx context.Context, ev PoolEvent) (out PoolState) { + ctx, span := tele.StartSpan(ctx, "Pool.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + + sm, bev := p.handleEvent(ctx, ev) + if sm != nil && bev != nil { + if state, terminal := p.advanceBroadcast(ctx, sm, bev); terminal { + return state + } + } + + // advance other state machines until we have reached a terminal state in any + for _, bsm := range p.bcs { + if sm == bsm { + continue + } + + state, terminal := p.advanceBroadcast(ctx, bsm, &EventBroadcastPoll{}) + if terminal { + return state + } + } + + return &StatePoolIdle{} +} + +// handleEvent receives a broadcast [PoolEvent] and returns the corresponding +// broadcast state machine [FollowUp] or [Optimistic] plus the event for that +// state machine. If any return parameter is nil, either the pool event was for +// an unknown query or the event doesn't need to be forwarded to the state +// machine. +func (p *Pool[K, N, M]) handleEvent(ctx context.Context, ev PoolEvent) (sm Broadcast, out BroadcastEvent) { + _, span := tele.StartSpan(ctx, "Pool.handleEvent", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() + + switch ev := ev.(type) { + case *EventPoolStartBroadcast[K, N, M]: + // first initialize the state machine for the broadcast desired strategy + switch cfg := ev.Config.(type) { + case *ConfigFollowUp: + p.bcs[ev.QueryID] = NewFollowUp(ev.QueryID, p.qp, ev.Message, cfg) + case *ConfigOptimistic: + panic("implement me") + } + + // start the new state machine + return p.bcs[ev.QueryID], &EventBroadcastStart[K, N]{ + Target: ev.Target, + Seed: ev.Seed, + } + + case *EventPoolStopBroadcast: + return p.bcs[ev.QueryID], &EventBroadcastStop{} + + case *EventPoolGetCloserNodesSuccess[K, N]: + return p.bcs[ev.QueryID], &EventBroadcastNodeResponse[K, N]{ + NodeID: ev.NodeID, + CloserNodes: ev.CloserNodes, + } + + case *EventPoolGetCloserNodesFailure[K, N]: + return p.bcs[ev.QueryID], &EventBroadcastNodeFailure[K, N]{ + NodeID: ev.NodeID, + Error: ev.Error, + } + + case *EventPoolStoreRecordSuccess[K, N, M]: + return p.bcs[ev.QueryID], &EventBroadcastStoreRecordSuccess[K, N, M]{ + NodeID: ev.NodeID, + Request: ev.Request, + Response: ev.Response, + } + + case *EventPoolStoreRecordFailure[K, N, M]: + return p.bcs[ev.QueryID], &EventBroadcastStoreRecordFailure[K, N, M]{ + NodeID: ev.NodeID, + Request: ev.Request, + Error: ev.Error, + } + + case *EventPoolPoll: + // no event to process + + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } + + return nil, nil +} + +// advanceBroadcast advances the given broadcast state machine ([FollowUp] or +// [Optimistic]) and returns the new [Pool] state ([PoolState]). The additional +// boolean value indicates whether the returned [PoolState] should be ignored. +func (p *Pool[K, N, M]) advanceBroadcast(ctx context.Context, sm Broadcast, bev BroadcastEvent) (PoolState, bool) { + ctx, span := tele.StartSpan(ctx, "Pool.advanceBroadcast", trace.WithAttributes(tele.AttrInEvent(bev))) + defer span.End() + + state := sm.Advance(ctx, bev) + switch st := state.(type) { + case *StateBroadcastFindCloser[K, N]: + return &StatePoolFindCloser[K, N]{ + QueryID: st.QueryID, + NodeID: st.NodeID, + Target: st.Target, + }, true + case *StateBroadcastWaiting: + return &StatePoolWaiting{}, true + case *StateBroadcastStoreRecord[K, N, M]: + return &StatePoolStoreRecord[K, N, M]{ + QueryID: st.QueryID, + NodeID: st.NodeID, + Message: st.Message, + }, true + case *StateBroadcastFinished[K, N]: + delete(p.bcs, st.QueryID) + return &StatePoolBroadcastFinished[K, N]{ + QueryID: st.QueryID, + Contacted: st.Contacted, + Errors: st.Errors, + }, true + } + + return nil, false +} + +// PoolState must be implemented by all states that a [Pool] can reach. States +// are basically the events that the [Pool] emits that other state machines or +// behaviours could react upon. +type PoolState interface { + poolState() +} + +// StatePoolFindCloser indicates to the broadcast behaviour that a broadcast +// state machine and indirectly the broadcast pool wants to query the given node +// (NodeID) for closer nodes to the target key (Target). +type StatePoolFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID coordt.QueryID // the id of the broadcast operation that wants to send the message + Target K // the key that the query wants to find closer nodes for + NodeID N // the node to send the message to +} + +// StatePoolWaiting indicates that the broadcast [Pool] is waiting for network +// I/O to finish. It means the [Pool] isn't idle, but there are operations +// in-flight that it is waiting on to finish. +type StatePoolWaiting struct{} + +// StatePoolStoreRecord indicates to the upper layer that the broadcast [Pool] +// wants to store a record using the given Message with the given NodeID. The +// network behaviour should take over and notify the [coord.PooledBroadcastBehaviour] +// about updates. +type StatePoolStoreRecord[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID // the id of the broadcast operation that wants to send the message + NodeID N // the node to send the message to + Message M // the message that should be sent to the remote node +} + +// StatePoolBroadcastFinished indicates that the broadcast operation with the +// id QueryID has finished. During that operation, all nodes in Contacted have +// been contacted to store the record. The Contacted slice does not contain +// the nodes we have queried to find the closest nodes to the target key - only +// the ones that we eventually contacted to store the record. The Errors map +// maps the string representation of any node N in the Contacted slice to a +// potential error struct that contains the original Node and error. In the best +// case, this Errors map is empty. +type StatePoolBroadcastFinished[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID coordt.QueryID // the id of the broadcast operation that has finished + Contacted []N // all nodes we contacted to store the record (successful or not) + Errors map[string]struct { // any error that occurred for any node that we contacted + Node N // a node from the Contacted slice + Err error // the error that happened when contacting that Node + } +} + +// StatePoolIdle means that the broadcast [Pool] is not managing any broadcast +// operations at this time. +type StatePoolIdle struct{} + +// poolState() ensures that only [PoolState]s can be returned by advancing the +// [Pool] state machine. +func (*StatePoolFindCloser[K, N]) poolState() {} +func (*StatePoolWaiting) poolState() {} +func (*StatePoolStoreRecord[K, N, M]) poolState() {} +func (*StatePoolBroadcastFinished[K, N]) poolState() {} +func (*StatePoolIdle) poolState() {} + +// PoolEvent is an event intended to advance the state of the broadcast [Pool] +// state machine. The [Pool] state machine only operates on events that +// implement this interface. An "Event" is the opposite of a "State." An "Event" +// flows into the state machine and a "State" flows out of it. +type PoolEvent interface { + poolEvent() +} + +// EventPoolPoll is an event that signals the broadcast [Pool] state machine +// that it can perform housekeeping work such as time out queries. +type EventPoolPoll struct{} + +// EventPoolStartBroadcast is an event that attempts to start a new broadcast +// operation. This is the entry point. +type EventPoolStartBroadcast[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID // the unique ID for this operation + Target K // the key we want to store the record for + Message M // the message that we want to send to the closest peers (this encapsulates the payload we want to store) + Seed []N // the closest nodes we know so far and from where we start the operation + Config Config // the configuration for this operation. Most importantly, this defines the broadcast strategy ([FollowUp] or [Optimistic]) +} + +// EventPoolStopBroadcast notifies broadcast [Pool] to stop a broadcast +// operation. +type EventPoolStopBroadcast struct { + QueryID coordt.QueryID // the id of the broadcast operation that should be stopped +} + +// EventPoolGetCloserNodesSuccess notifies a [Pool] that a remote node (NodeID) +// has successfully responded with closer nodes (CloserNodes) to the Target key +// for the broadcast operation with the given id (QueryID). +type EventPoolGetCloserNodesSuccess[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID coordt.QueryID // the id of the broadcast operation that this response belongs to + NodeID N // the node the message was sent to and that replied + Target K // the key we want are searching closer nodes for + CloserNodes []N // the closer nodes sent by the node NodeID +} + +// EventPoolGetCloserNodesFailure notifies a [Pool] that a remote node (NodeID) +// has failed responding with closer nodes to the Target key for the broadcast +// operation with the given id (QueryID). +type EventPoolGetCloserNodesFailure[K kad.Key[K], N kad.NodeID[K]] struct { + QueryID coordt.QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to and that has replied + Target K // the key we want are searching closer nodes for + Error error // the error that caused the failure, if any +} + +// EventPoolStoreRecordSuccess noties the broadcast [Pool] that storing a record +// with a remote node (NodeID) was successful. The message that was sent is held +// in Request, and the returned value is contained in Response. However, in the +// case of the Amino DHT, nodes do not respond with a confirmation, so Response +// will always be nil. Check out [pb.Message.ExpectResponse] for information +// about which requests should receive a response. +type EventPoolStoreRecordSuccess[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to + Request M // the message that was sent to the remote node + Response M // the reply we got from the remote node (nil in many cases of the Amino DHT) +} + +// EventPoolStoreRecordFailure noties the broadcast [Pool] that storing a record +// with a remote node (NodeID) has failed. The message that was sent is hold +// in Request, and the error will be in Error. +type EventPoolStoreRecordFailure[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to + Request M // the message that was sent to the remote node + Error error // the error that caused the failure +} + +// poolEvent() ensures that only events accepted by a broadcast [Pool] can be +// assigned to the [PoolEvent] interface. +func (*EventPoolStopBroadcast) poolEvent() {} +func (*EventPoolPoll) poolEvent() {} +func (*EventPoolStartBroadcast[K, N, M]) poolEvent() {} +func (*EventPoolGetCloserNodesSuccess[K, N]) poolEvent() {} +func (*EventPoolGetCloserNodesFailure[K, N]) poolEvent() {} +func (*EventPoolStoreRecordSuccess[K, N, M]) poolEvent() {} +func (*EventPoolStoreRecordFailure[K, N, M]) poolEvent() {} diff --git a/v2/internal/coord/brdcst/pool_test.go b/v2/internal/coord/brdcst/pool_test.go new file mode 100644 index 00000000..f9404f3a --- /dev/null +++ b/v2/internal/coord/brdcst/pool_test.go @@ -0,0 +1,301 @@ +package brdcst + +import ( + "context" + "fmt" + "testing" + + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" +) + +// Assert that Pool implements the common state machine interface +var _ coordt.StateMachine[PoolEvent, PoolState] = (*Pool[tiny.Key, tiny.Node, tiny.Message])(nil) + +func TestPoolStopWhenNoQueries(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + state := p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) +} + +func TestPool_FollowUp_lifecycle(t *testing.T) { + // This test attempts to cover the whole lifecycle of + // a follow-up broadcast operation. + // + // We have a network of three peers: a, b, and, c + // First, we query all three while peer c fails to respond + // Second, we store the record with the remaining a and b, while b fails to respond + + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + msg := tiny.Message{Content: "store this"} + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00000011) // 3 + c := tiny.NewNode(0b00000010) // 2 + + queryID := coordt.QueryID("test") + + state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{a}, + Config: DefaultConfigFollowUp(), + }) + + // the query should attempt to contact the node it was given + st, ok := state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.True(t, ok) + + require.Equal(t, queryID, st.QueryID) // the query should be the one just added + require.Equal(t, a, st.NodeID) // the query should attempt to contact the node it was given + require.True(t, key.Equal(target, st.Target)) // with the correct target + + // polling the state machine returns waiting + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolWaiting{}, state) + + // notify pool that the node was contacted successfully + // with a single closer node. + state = p.Advance(ctx, &EventPoolGetCloserNodesSuccess[tiny.Key, tiny.Node]{ + QueryID: queryID, + Target: target, + NodeID: a, + CloserNodes: []tiny.Node{a, b}, + }) + + // the query should attempt to contact the single closer node it has found + st, ok = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.True(t, ok, "state is %T", state) + + require.Equal(t, queryID, st.QueryID) // the query should be the same + require.Equal(t, b, st.NodeID) // the query should attempt to contact the newly discovered node + require.True(t, key.Equal(target, st.Target)) // with the correct target + + // notify pool that the node was contacted successfully + // with no new node. + state = p.Advance(ctx, &EventPoolGetCloserNodesSuccess[tiny.Key, tiny.Node]{ + QueryID: queryID, + Target: target, + NodeID: b, + CloserNodes: []tiny.Node{b, c}, // returns additional node + }) + + // the query should attempt to contact the newly closer node it has found + st, ok = state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.True(t, ok) + + require.Equal(t, queryID, st.QueryID) // the query should be the same + require.Equal(t, c, st.NodeID) // the query should attempt to contact the newly discovered node + require.True(t, key.Equal(target, st.Target)) // with the correct target + + // this last node times out -> start contacting the other two + timeoutErr := fmt.Errorf("timeout") + state = p.Advance(ctx, &EventPoolGetCloserNodesFailure[tiny.Key, tiny.Node]{ + QueryID: queryID, + NodeID: c, + Target: target, + Error: timeoutErr, + }) + + // This means we should start the follow-up phase + srState, ok := state.(*StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]) + require.True(t, ok, "state is %T", state) + + require.Equal(t, queryID, srState.QueryID) + firstContactedNode := srState.NodeID + require.True(t, a == srState.NodeID || b == srState.NodeID) // we should contact either node - there's no inherent order + require.Equal(t, msg.Content, srState.Message.Content) + + // polling the state machine should trigger storing the record with + // the second node + state = p.Advance(ctx, &EventPoolPoll{}) + srState, ok = state.(*StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]) + require.True(t, ok, "state is %T", state) + + require.Equal(t, queryID, srState.QueryID) + require.True(t, a == srState.NodeID || b == srState.NodeID) // we should contact either node - there's no inherent order + require.NotEqual(t, firstContactedNode, srState.NodeID) // should be the other one now + require.Equal(t, msg.Content, srState.Message.Content) + + // since we have two requests in-flight, polling should return a waiting state machine + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolWaiting{}, state) + + // first response from storing the record comes back + state = p.Advance(ctx, &EventPoolStoreRecordSuccess[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + NodeID: a, + Request: msg, + }) + require.IsType(t, &StatePoolWaiting{}, state) + + // second response from storing the record comes back and it failed! + state = p.Advance(ctx, &EventPoolStoreRecordFailure[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + NodeID: b, + Request: msg, + Error: timeoutErr, + }) + + // since we have contacted all nodes we knew, the broadcast has finished + finishState, ok := state.(*StatePoolBroadcastFinished[tiny.Key, tiny.Node]) + require.True(t, ok, "state is %T", state) + + require.Equal(t, queryID, finishState.QueryID) + require.Len(t, finishState.Contacted, 2) + require.Len(t, finishState.Errors, 1) + require.Equal(t, finishState.Errors[b.String()].Node, b) + require.Equal(t, finishState.Errors[b.String()].Err, timeoutErr) + + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) + + require.Nil(t, p.bcs[queryID]) // should have been removed +} + +func TestPool_FollowUp_stop_during_query(t *testing.T) { + // This test attempts to cover the case where a followup broadcast operation + // is cancelled during the query phase + + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + msg := tiny.Message{Content: "store this"} + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + + queryID := coordt.QueryID("test") + + state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{a}, + Config: DefaultConfigFollowUp(), + }) + + // the query should attempt to contact the node it was given + st, ok := state.(*StatePoolFindCloser[tiny.Key, tiny.Node]) + require.True(t, ok, "state is %T", state) + + require.Equal(t, queryID, st.QueryID) // the query should be the one just added + require.Equal(t, a, st.NodeID) // the query should attempt to contact the node it was given + require.True(t, key.Equal(target, st.Target)) // with the correct target + + // polling the state machine returns waiting + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolWaiting{}, state) + + state = p.Advance(ctx, &EventPoolStopBroadcast{ + QueryID: queryID, + }) + finish, ok := state.(*StatePoolBroadcastFinished[tiny.Key, tiny.Node]) + require.True(t, ok, "state is %T", state) + require.Len(t, finish.Contacted, 0) +} + +func TestPool_FollowUp_stop_during_followup_phase(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + msg := tiny.Message{Content: "store this"} + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00000011) // 3 + + queryID := coordt.QueryID("test") + + state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{a, b}, + Config: DefaultConfigFollowUp(), + }) + + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) + + state = p.Advance(ctx, &EventPoolGetCloserNodesSuccess[tiny.Key, tiny.Node]{ + QueryID: queryID, + Target: target, + NodeID: a, + CloserNodes: []tiny.Node{a, b}, + }) + require.IsType(t, &StatePoolWaiting{}, state) + + state = p.Advance(ctx, &EventPoolGetCloserNodesSuccess[tiny.Key, tiny.Node]{ + QueryID: queryID, + Target: target, + NodeID: b, + CloserNodes: []tiny.Node{a, b}, + }) + require.IsType(t, &StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]{}, state) + + state = p.Advance(ctx, &EventPoolStopBroadcast{ + QueryID: queryID, + }) + + st, ok := state.(*StatePoolBroadcastFinished[tiny.Key, tiny.Node]) + require.True(t, ok, "state is %T", state) + require.Equal(t, st.QueryID, queryID) + require.Len(t, st.Contacted, 2) + require.Len(t, st.Errors, 2) +} + +func TestPoolState_interface_conformance(t *testing.T) { + states := []PoolState{ + &StatePoolIdle{}, + &StatePoolWaiting{}, + &StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]{}, + &StatePoolFindCloser[tiny.Key, tiny.Node]{}, + &StatePoolBroadcastFinished[tiny.Key, tiny.Node]{}, + } + for _, st := range states { + st.poolState() // drives test coverage + } +} + +func TestPoolEvent_interface_conformance(t *testing.T) { + events := []PoolEvent{ + &EventPoolStopBroadcast{}, + &EventPoolPoll{}, + &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{}, + &EventPoolGetCloserNodesSuccess[tiny.Key, tiny.Node]{}, + &EventPoolGetCloserNodesFailure[tiny.Key, tiny.Node]{}, + &EventPoolStoreRecordSuccess[tiny.Key, tiny.Node, tiny.Message]{}, + &EventPoolStoreRecordFailure[tiny.Key, tiny.Node, tiny.Message]{}, + } + for _, ev := range events { + ev.poolEvent() // drives test coverage + } +} diff --git a/v2/internal/coord/brdcst_events.go b/v2/internal/coord/brdcst_events.go new file mode 100644 index 00000000..ac016c25 --- /dev/null +++ b/v2/internal/coord/brdcst_events.go @@ -0,0 +1,34 @@ +package coord + +import ( + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/brdcst" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" +) + +// EventStartBroadcast starts a new +type EventStartBroadcast struct { + QueryID coordt.QueryID + Target kadt.Key + Message *pb.Message + Seed []kadt.PeerID + Config brdcst.Config + Notify NotifyCloser[BehaviourEvent] +} + +func (*EventStartBroadcast) behaviourEvent() {} + +// EventBroadcastFinished is emitted by the coordinator when a broadcasting +// a record to the network has finished, either through running to completion or +// by being canceled. +type EventBroadcastFinished struct { + QueryID coordt.QueryID + Contacted []kadt.PeerID + Errors map[string]struct { + Node kadt.PeerID + Err error + } +} + +func (*EventBroadcastFinished) behaviourEvent() {} diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index d3d619b2..18e09467 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -20,6 +20,8 @@ import ( "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/brdcst" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -46,7 +48,7 @@ type Coordinator struct { rt kad.RoutingTable[kadt.Key, kadt.PeerID] // rtr is the message router used to send messages - rtr Router[kadt.Key, kadt.PeerID, *pb.Message] + rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message] // networkBehaviour is the behaviour responsible for communicating with the network networkBehaviour *NetworkBehaviour @@ -57,6 +59,9 @@ type Coordinator struct { // queryBehaviour is the behaviour responsible for running user-submitted queries queryBehaviour Behaviour[BehaviourEvent, BehaviourEvent] + // brdcstBehaviour is the behaviour responsible for running user-submitted queries to store records with nodes + brdcstBehaviour Behaviour[BehaviourEvent, BehaviourEvent] + // tele provides tracing and metric reporting capabilities tele *Telemetry @@ -162,7 +167,7 @@ func DefaultCoordinatorConfig() *CoordinatorConfig { } } -func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Message], rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *CoordinatorConfig) (*Coordinator, error) { +func NewCoordinator(self kadt.PeerID, rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message], rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { cfg = DefaultCoordinatorConfig() } else if err := cfg.Validate(); err != nil { @@ -194,7 +199,7 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency bootstrapCfg.RequestTimeout = cfg.RequestTimeout - bootstrap, err := routing.NewBootstrap(kadt.PeerID(self), bootstrapCfg) + bootstrap, err := routing.NewBootstrap(self, bootstrapCfg) if err != nil { return nil, fmt.Errorf("bootstrap: %w", err) } @@ -228,6 +233,13 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, tele.Tracer) + b, err := brdcst.NewPool[kadt.Key, kadt.PeerID, *pb.Message](self, nil) + if err != nil { + return nil, fmt.Errorf("broadcast: %w", err) + } + + brdcstBehaviour := NewPooledBroadcastBehaviour(b, cfg.Logger, tele.Tracer) + ctx, cancel := context.WithCancel(context.Background()) d := &Coordinator{ @@ -242,7 +254,9 @@ func NewCoordinator(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Mess networkBehaviour: networkBehaviour, routingBehaviour: routingBehaviour, queryBehaviour: queryBehaviour, - routingNotifier: nullRoutingNotifier{}, + brdcstBehaviour: brdcstBehaviour, + + routingNotifier: nullRoutingNotifier{}, } go d.eventLoop(ctx) @@ -266,9 +280,11 @@ func (c *Coordinator) eventLoop(ctx context.Context) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.eventLoop") defer span.End() + for { var ev BehaviourEvent var ok bool + select { case <-ctx.Done(): // coordinator is closing @@ -279,6 +295,8 @@ func (c *Coordinator) eventLoop(ctx context.Context) { ev, ok = c.routingBehaviour.Perform(ctx) case <-c.queryBehaviour.Ready(): ev, ok = c.queryBehaviour.Perform(ctx) + case <-c.brdcstBehaviour.Ready(): + ev, ok = c.brdcstBehaviour.Perform(ctx) } if ok { @@ -296,6 +314,8 @@ func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { c.networkBehaviour.Notify(ctx, ev) case QueryCommand: c.queryBehaviour.Notify(ctx, ev) + case BrdcstCommand: + c.brdcstBehaviour.Notify(ctx, ev) case RoutingCommand: c.routingBehaviour.Notify(ctx, ev) case RoutingNotification: @@ -316,11 +336,11 @@ func (c *Coordinator) SetRoutingNotifier(rn RoutingNotifier) { // GetNode retrieves the node associated with the given node id from the DHT's local routing table. // If the node isn't found in the table, it returns ErrNodeNotFound. -func (c *Coordinator) GetNode(ctx context.Context, id kadt.PeerID) (Node, error) { +func (c *Coordinator) GetNode(ctx context.Context, id kadt.PeerID) (coordt.Node, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.GetNode") defer span.End() if _, exists := c.rt.GetNode(id.Key()); !exists { - return nil, ErrNodeNotFound + return nil, coordt.ErrNodeNotFound } nh, err := c.networkBehaviour.getNodeHandler(ctx, id) @@ -331,11 +351,11 @@ func (c *Coordinator) GetNode(ctx context.Context, id kadt.PeerID) (Node, error) } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. -func (c *Coordinator) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]Node, error) { +func (c *Coordinator) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]coordt.Node, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.GetClosestNodes") defer span.End() closest := c.rt.NearestNodes(k, n) - nodes := make([]Node, 0, len(closest)) + nodes := make([]coordt.Node, 0, len(closest)) for _, id := range closest { nh, err := c.networkBehaviour.getNodeHandler(ctx, id) if err != nil { @@ -348,13 +368,13 @@ func (c *Coordinator) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([ // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (c *Coordinator) GetValue(ctx context.Context, k kadt.Key) (Value, error) { +func (c *Coordinator) GetValue(ctx context.Context, k kadt.Key) (coordt.Value, error) { panic("not implemented") } // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. -func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { +func (c *Coordinator) PutValue(ctx context.Context, r coordt.Value, q int) error { panic("not implemented") } @@ -369,7 +389,7 @@ func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { // numResults specifies the minimum number of nodes to successfully contact before considering iteration complete. // The query is considered to be exhausted when it has received responses from at least this number of nodes // and there are no closer nodes remaining to be contacted. A default of 20 is used if this value is less than 1. -func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn QueryFunc, numResults int) ([]kadt.PeerID, QueryStats, error) { +func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn coordt.QueryFunc, numResults int) ([]kadt.PeerID, coordt.QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Query") defer span.End() @@ -378,16 +398,16 @@ func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn Quer seeds, err := c.GetClosestNodes(ctx, target, 20) if err != nil { - return nil, QueryStats{}, err + return nil, coordt.QueryStats{}, err } seedIDs := make([]kadt.PeerID, 0, len(seeds)) for _, s := range seeds { - seedIDs = append(seedIDs, kadt.PeerID(s.ID())) + seedIDs = append(seedIDs, s.ID()) } waiter := NewWaiter[BehaviourEvent]() - queryID := c.newQueryID() + queryID := c.newOperationID() cmd := &EventStartFindCloserQuery{ QueryID: queryID, @@ -414,7 +434,7 @@ func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn Quer // numResults specifies the minimum number of nodes to successfully contact before considering iteration complete. // The query is considered to be exhausted when it has received responses from at least this number of nodes // and there are no closer nodes remaining to be contacted. A default of 20 is used if this value is less than 1. -func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn QueryFunc, numResults int) (QueryStats, error) { +func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coordt.QueryFunc, numResults int) (coordt.QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.QueryMessage") defer span.End() @@ -427,16 +447,16 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn Quer seeds, err := c.GetClosestNodes(ctx, msg.Target(), numResults) if err != nil { - return QueryStats{}, err + return coordt.QueryStats{}, err } seedIDs := make([]kadt.PeerID, 0, len(seeds)) for _, s := range seeds { - seedIDs = append(seedIDs, kadt.PeerID(s.ID())) + seedIDs = append(seedIDs, s.ID()) } waiter := NewWaiter[BehaviourEvent]() - queryID := c.newQueryID() + queryID := c.newOperationID() cmd := &EventStartMessageQuery{ QueryID: queryID, @@ -454,8 +474,47 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn Quer return stats, err } -func (c *Coordinator) waitForQuery(ctx context.Context, queryID query.QueryID, waiter *Waiter[BehaviourEvent], fn QueryFunc) ([]kadt.PeerID, QueryStats, error) { - var lastStats QueryStats +func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) error { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.BroadcastRecord") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + seeds, err := c.GetClosestNodes(ctx, msg.Target(), 20) + if err != nil { + return err + } + + seedIDs := make([]kadt.PeerID, 0, len(seeds)) + for _, s := range seeds { + seedIDs = append(seedIDs, s.ID()) + } + + waiter := NewWaiter[BehaviourEvent]() + queryID := c.newOperationID() + + cmd := &EventStartBroadcast{ + QueryID: queryID, + Target: msg.Target(), + Message: msg, + Seed: seedIDs, + Notify: waiter, + Config: brdcst.DefaultConfigFollowUp(), + } + + // queue the start of the query + c.brdcstBehaviour.Notify(ctx, cmd) + + contacted, errs, err := c.waitForBroadcast(ctx, waiter) + fmt.Println(contacted) + fmt.Println(errs) + + return err +} + +func (c *Coordinator) waitForQuery(ctx context.Context, queryID coordt.QueryID, waiter *Waiter[BehaviourEvent], fn coordt.QueryFunc) ([]kadt.PeerID, coordt.QueryStats, error) { + var lastStats coordt.QueryStats for { select { case <-ctx.Done(): @@ -464,20 +523,20 @@ func (c *Coordinator) waitForQuery(ctx context.Context, queryID query.QueryID, w ctx, ev := wev.Ctx, wev.Event switch ev := ev.(type) { case *EventQueryProgressed: - lastStats = QueryStats{ + lastStats = coordt.QueryStats{ Start: ev.Stats.Start, Requests: ev.Stats.Requests, Success: ev.Stats.Success, Failure: ev.Stats.Failure, } - nh, err := c.networkBehaviour.getNodeHandler(ctx, kadt.PeerID(ev.NodeID)) + nh, err := c.networkBehaviour.getNodeHandler(ctx, ev.NodeID) if err != nil { // ignore unknown node break } err = fn(ctx, nh.ID(), ev.Response, lastStats) - if errors.Is(err, ErrSkipRemaining) { + if errors.Is(err, coordt.ErrSkipRemaining) { // done c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) return nil, lastStats, nil @@ -500,6 +559,28 @@ func (c *Coordinator) waitForQuery(ctx context.Context, queryID query.QueryID, w } } +func (c *Coordinator) waitForBroadcast(ctx context.Context, waiter *Waiter[BehaviourEvent]) ([]kadt.PeerID, map[string]struct { + Node kadt.PeerID + Err error +}, error, +) { + for { + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case wev := <-waiter.Chan(): + switch ev := wev.Event.(type) { + case *EventQueryProgressed: + case *EventBroadcastFinished: + return ev.Contacted, ev.Errors, nil + + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } + } + } +} + // AddNodes suggests new DHT nodes to be added to the routing table. // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. @@ -559,9 +640,9 @@ func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) return nil } -func (c *Coordinator) newQueryID() query.QueryID { +func (c *Coordinator) newOperationID() coordt.QueryID { next := c.lastQueryID.Add(1) - return query.QueryID(fmt.Sprintf("%016x", next)) + return coordt.QueryID(fmt.Sprintf("%016x", next)) } // A BufferedRoutingNotifier is a [RoutingNotifier] that buffers [RoutingNotification] events and provides methods diff --git a/v2/internal/coord/coordinator_test.go b/v2/internal/coord/coordinator_test.go index c267b4a0..bbec6a38 100644 --- a/v2/internal/coord/coordinator_test.go +++ b/v2/internal/coord/coordinator_test.go @@ -5,6 +5,8 @@ import ( "log" "testing" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/benbjohnson/clock" "github.com/stretchr/testify/require" @@ -106,7 +108,7 @@ func TestExhaustiveQuery(t *testing.T) { visited := make(map[string]int) // Record the nodes as they are visited - qfn := func(ctx context.Context, id kadt.PeerID, msg *pb.Message, stats QueryStats) error { + qfn := func(ctx context.Context, id kadt.PeerID, msg *pb.Message, stats coordt.QueryStats) error { visited[id.String()]++ return nil } @@ -144,7 +146,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { rn := NewBufferedRoutingNotifier() c.SetRoutingNotifier(rn) - qfn := func(ctx context.Context, id kadt.PeerID, msg *pb.Message, stats QueryStats) error { + qfn := func(ctx context.Context, id kadt.PeerID, msg *pb.Message, stats coordt.QueryStats) error { return nil } @@ -255,7 +257,7 @@ func TestIncludeNode(t *testing.T) { // the routing table should not contain the node yet _, err = d.GetNode(ctx, candidate) - require.ErrorIs(t, err, ErrNodeNotFound) + require.ErrorIs(t, err, coordt.ErrNodeNotFound) // inject a new node err = d.AddNodes(ctx, []kadt.PeerID{candidate}) diff --git a/v2/internal/coord/coretypes.go b/v2/internal/coord/coordt/coretypes.go similarity index 92% rename from v2/internal/coord/coretypes.go rename to v2/internal/coord/coordt/coretypes.go index 12c9ba26..2e000c81 100644 --- a/v2/internal/coord/coretypes.go +++ b/v2/internal/coord/coordt/coretypes.go @@ -1,4 +1,4 @@ -package coord +package coordt import ( "context" @@ -11,6 +11,15 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) +// TODO: rename to something like OperationID. This type isn't only used to identify queries but also other operations like broadcasts. +type QueryID string + +const InvalidQueryID QueryID = "" + +type StateMachine[E any, S any] interface { + Advance(context.Context, E) S +} + // Value is a value that may be stored in the DHT. type Value interface { Key() kadt.Key diff --git a/v2/internal/coord/event.go b/v2/internal/coord/event.go index a0037732..fddc40ef 100644 --- a/v2/internal/coord/event.go +++ b/v2/internal/coord/event.go @@ -1,6 +1,7 @@ package coord import ( + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" @@ -28,6 +29,12 @@ type QueryCommand interface { queryCommand() } +// BrdcstCommand is a type of [BehaviourEvent] that instructs a [BrdcstBehaviour] to perform an action. +type BrdcstCommand interface { + BehaviourEvent + brdcstCommand() +} + type NodeHandlerRequest interface { BehaviourEvent nodeHandlerRequest() @@ -51,7 +58,7 @@ func (*EventStartBootstrap) behaviourEvent() {} func (*EventStartBootstrap) routingCommand() {} type EventOutboundGetCloserNodes struct { - QueryID query.QueryID + QueryID coordt.QueryID To kadt.PeerID Target kadt.Key Notify Notify[BehaviourEvent] @@ -62,7 +69,7 @@ func (*EventOutboundGetCloserNodes) nodeHandlerRequest() {} func (*EventOutboundGetCloserNodes) networkCommand() {} type EventOutboundSendMessage struct { - QueryID query.QueryID + QueryID coordt.QueryID To kadt.PeerID Message *pb.Message Notify Notify[BehaviourEvent] @@ -73,7 +80,7 @@ func (*EventOutboundSendMessage) nodeHandlerRequest() {} func (*EventOutboundSendMessage) networkCommand() {} type EventStartMessageQuery struct { - QueryID query.QueryID + QueryID coordt.QueryID Target kadt.Key Message *pb.Message KnownClosestNodes []kadt.PeerID @@ -85,7 +92,7 @@ func (*EventStartMessageQuery) behaviourEvent() {} func (*EventStartMessageQuery) queryCommand() {} type EventStartFindCloserQuery struct { - QueryID query.QueryID + QueryID coordt.QueryID Target kadt.Key KnownClosestNodes []kadt.PeerID Notify NotifyCloser[BehaviourEvent] @@ -96,7 +103,7 @@ func (*EventStartFindCloserQuery) behaviourEvent() {} func (*EventStartFindCloserQuery) queryCommand() {} type EventStopQuery struct { - QueryID query.QueryID + QueryID coordt.QueryID } func (*EventStopQuery) behaviourEvent() {} @@ -113,7 +120,7 @@ func (*EventAddNode) routingCommand() {} // EventGetCloserNodesSuccess notifies a behaviour that a GetCloserNodes request, initiated by an // [EventOutboundGetCloserNodes] event has produced a successful response. type EventGetCloserNodesSuccess struct { - QueryID query.QueryID + QueryID coordt.QueryID To kadt.PeerID // To is the peer that the GetCloserNodes request was sent to. Target kadt.Key CloserNodes []kadt.PeerID @@ -125,7 +132,7 @@ func (*EventGetCloserNodesSuccess) nodeHandlerResponse() {} // EventGetCloserNodesFailure notifies a behaviour that a GetCloserNodes request, initiated by an // [EventOutboundGetCloserNodes] event has failed to produce a valid response. type EventGetCloserNodesFailure struct { - QueryID query.QueryID + QueryID coordt.QueryID To kadt.PeerID // To is the peer that the GetCloserNodes request was sent to. Target kadt.Key Err error @@ -137,7 +144,8 @@ func (*EventGetCloserNodesFailure) nodeHandlerResponse() {} // EventSendMessageSuccess notifies a behaviour that a SendMessage request, initiated by an // [EventOutboundSendMessage] event has produced a successful response. type EventSendMessageSuccess struct { - QueryID query.QueryID + QueryID coordt.QueryID + Request *pb.Message To kadt.PeerID // To is the peer that the SendMessage request was sent to. Response *pb.Message CloserNodes []kadt.PeerID @@ -149,7 +157,8 @@ func (*EventSendMessageSuccess) nodeHandlerResponse() {} // EventSendMessageFailure notifies a behaviour that a SendMessage request, initiated by an // [EventOutboundSendMessage] event has failed to produce a valid response. type EventSendMessageFailure struct { - QueryID query.QueryID + QueryID coordt.QueryID + Request *pb.Message To kadt.PeerID // To is the peer that the SendMessage request was sent to. Target kadt.Key Err error @@ -161,7 +170,7 @@ func (*EventSendMessageFailure) nodeHandlerResponse() {} // EventQueryProgressed is emitted by the coordinator when a query has received a // response from a node. type EventQueryProgressed struct { - QueryID query.QueryID + QueryID coordt.QueryID NodeID kadt.PeerID Response *pb.Message Stats query.QueryStats @@ -172,7 +181,7 @@ func (*EventQueryProgressed) behaviourEvent() {} // EventQueryFinished is emitted by the coordinator when a query has finished, either through // running to completion or by being canceled. type EventQueryFinished struct { - QueryID query.QueryID + QueryID coordt.QueryID Stats query.QueryStats ClosestNodes []kadt.PeerID } diff --git a/v2/internal/coord/network.go b/v2/internal/coord/network.go index d4087564..487a2506 100644 --- a/v2/internal/coord/network.go +++ b/v2/internal/coord/network.go @@ -5,18 +5,19 @@ import ( "fmt" "sync" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) type NetworkBehaviour struct { // rtr is the message router used to send messages - rtr Router[kadt.Key, kadt.PeerID, *pb.Message] + rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message] nodeHandlersMu sync.Mutex nodeHandlers map[kadt.PeerID]*NodeHandler // TODO: garbage collect node handlers @@ -29,7 +30,7 @@ type NetworkBehaviour struct { tracer trace.Tracer } -func NewNetworkBehaviour(rtr Router[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { +func NewNetworkBehaviour(rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { b := &NetworkBehaviour{ rtr: rtr, nodeHandlers: make(map[kadt.PeerID]*NodeHandler), @@ -51,21 +52,19 @@ func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { switch ev := ev.(type) { case *EventOutboundGetCloserNodes: b.nodeHandlersMu.Lock() - p := kadt.PeerID(ev.To) - nh, ok := b.nodeHandlers[p] + nh, ok := b.nodeHandlers[ev.To] if !ok { - nh = NewNodeHandler(p, b.rtr, b.logger, b.tracer) - b.nodeHandlers[p] = nh + nh = NewNodeHandler(ev.To, b.rtr, b.logger, b.tracer) + b.nodeHandlers[ev.To] = nh } b.nodeHandlersMu.Unlock() nh.Notify(ctx, ev) case *EventOutboundSendMessage: b.nodeHandlersMu.Lock() - p := kadt.PeerID(ev.To) - nh, ok := b.nodeHandlers[p] + nh, ok := b.nodeHandlers[ev.To] if !ok { - nh = NewNodeHandler(p, b.rtr, b.logger, b.tracer) - b.nodeHandlers[p] = nh + nh = NewNodeHandler(ev.To, b.rtr, b.logger, b.tracer) + b.nodeHandlers[ev.To] = nh } b.nodeHandlersMu.Unlock() nh.Notify(ctx, ev) @@ -122,13 +121,13 @@ func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id kadt.PeerID) ( type NodeHandler struct { self kadt.PeerID - rtr Router[kadt.Key, kadt.PeerID, *pb.Message] + rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message] queue *WorkQueue[NodeHandlerRequest] logger *slog.Logger tracer trace.Tracer } -func NewNodeHandler(self kadt.PeerID, rtr Router[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *NodeHandler { +func NewNodeHandler(self kadt.PeerID, rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *NodeHandler { h := &NodeHandler{ self: self, rtr: rtr, @@ -179,6 +178,7 @@ func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { cmd.Notify.Notify(ctx, &EventSendMessageFailure{ QueryID: cmd.QueryID, To: h.self, + Request: cmd.Message, Err: fmt.Errorf("NodeHandler: %w", err), }) return false @@ -187,6 +187,7 @@ func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { cmd.Notify.Notify(ctx, &EventSendMessageSuccess{ QueryID: cmd.QueryID, To: h.self, + Request: cmd.Message, Response: resp, CloserNodes: resp.CloserNodes(), }) @@ -203,13 +204,13 @@ func (h *NodeHandler) ID() kadt.PeerID { // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. -func (h *NodeHandler) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]Node, error) { +func (h *NodeHandler) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([]coordt.Node, error) { ctx, span := h.tracer.Start(ctx, "NodeHandler.GetClosestNodes") defer span.End() w := NewWaiter[BehaviourEvent]() ev := &EventOutboundGetCloserNodes{ - QueryID: query.QueryID(key.HexString(k)), + QueryID: coordt.QueryID(key.HexString(k)), To: h.self, Target: k, Notify: w, @@ -224,7 +225,7 @@ func (h *NodeHandler) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([ switch res := we.Event.(type) { case *EventGetCloserNodesSuccess: - nodes := make([]Node, 0, len(res.CloserNodes)) + nodes := make([]coordt.Node, 0, len(res.CloserNodes)) for _, info := range res.CloserNodes { // TODO use a global registry of node handlers nodes = append(nodes, NewNodeHandler(info, h.rtr, h.logger, h.tracer)) @@ -245,12 +246,12 @@ func (h *NodeHandler) GetClosestNodes(ctx context.Context, k kadt.Key, n int) ([ // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (h *NodeHandler) GetValue(ctx context.Context, key kadt.Key) (Value, error) { +func (h *NodeHandler) GetValue(ctx context.Context, key kadt.Key) (coordt.Value, error) { panic("not implemented") } // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. -func (h *NodeHandler) PutValue(ctx context.Context, r Value, q int) error { +func (h *NodeHandler) PutValue(ctx context.Context, r coordt.Value, q int) error { panic("not implemented") } diff --git a/v2/internal/coord/query.go b/v2/internal/coord/query.go index 91cbab09..5d1df302 100644 --- a/v2/internal/coord/query.go +++ b/v2/internal/coord/query.go @@ -5,17 +5,19 @@ import ( "fmt" "sync" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/pb" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) type PooledQueryBehaviour struct { pool *query.Pool[kadt.Key, kadt.PeerID, *pb.Message] - waiters map[query.QueryID]NotifyCloser[BehaviourEvent] + waiters map[coordt.QueryID]NotifyCloser[BehaviourEvent] pendingMu sync.Mutex pending []BehaviourEvent @@ -28,7 +30,7 @@ type PooledQueryBehaviour struct { func NewPooledQueryBehaviour(pool *query.Pool[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { h := &PooledQueryBehaviour{ pool: pool, - waiters: make(map[query.QueryID]NotifyCloser[BehaviourEvent]), + waiters: make(map[coordt.QueryID]NotifyCloser[BehaviourEvent]), ready: make(chan struct{}, 1), logger: logger.With("behaviour", "query"), tracer: tracer, @@ -47,29 +49,27 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { switch ev := ev.(type) { case *EventStartFindCloserQuery: cmd = &query.EventPoolAddFindCloserQuery[kadt.Key, kadt.PeerID]{ - QueryID: ev.QueryID, - Target: ev.Target, - KnownClosestNodes: ev.KnownClosestNodes, + QueryID: ev.QueryID, + Target: ev.Target, + Seed: ev.KnownClosestNodes, } if ev.Notify != nil { p.waiters[ev.QueryID] = ev.Notify } case *EventStartMessageQuery: cmd = &query.EventPoolAddQuery[kadt.Key, kadt.PeerID, *pb.Message]{ - QueryID: ev.QueryID, - Target: ev.Target, - Message: ev.Message, - KnownClosestNodes: ev.KnownClosestNodes, + QueryID: ev.QueryID, + Target: ev.Target, + Message: ev.Message, + Seed: ev.KnownClosestNodes, } if ev.Notify != nil { p.waiters[ev.QueryID] = ev.Notify } - case *EventStopQuery: cmd = &query.EventPoolStopQuery{ QueryID: ev.QueryID, } - case *EventGetCloserNodesSuccess: for _, info := range ev.CloserNodes { // TODO: do this after advancing pool @@ -189,9 +189,12 @@ func (p *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, boo } } -func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (BehaviourEvent, bool) { - ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.advancePool") - defer span.End() +func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (out BehaviourEvent, term bool) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.advancePool", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() pstate := p.pool.Advance(ctx, ev) switch st := pstate.(type) { diff --git a/v2/internal/coord/query/pool.go b/v2/internal/coord/query/pool.go index a94566cb..d6edea86 100644 --- a/v2/internal/coord/query/pool.go +++ b/v2/internal/coord/query/pool.go @@ -9,16 +9,15 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) -type Message interface{} - -type Pool[K kad.Key[K], N kad.NodeID[K], M Message] struct { +type Pool[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { // self is the node id of the system the pool is running on self N queries []*Query[K, N, M] - queryIndex map[QueryID]*Query[K, N, M] + queryIndex map[coordt.QueryID]*Query[K, N, M] // cfg is a copy of the optional configuration supplied to the pool cfg PoolConfig @@ -94,7 +93,7 @@ func DefaultPoolConfig() *PoolConfig { } } -func NewPool[K kad.Key[K], N kad.NodeID[K], M Message](self N, cfg *PoolConfig) (*Pool[K, N, M], error) { +func NewPool[K kad.Key[K], N kad.NodeID[K], M coordt.Message](self N, cfg *PoolConfig) (*Pool[K, N, M], error) { if cfg == nil { cfg = DefaultPoolConfig() } else if err := cfg.Validate(); err != nil { @@ -105,7 +104,7 @@ func NewPool[K kad.Key[K], N kad.NodeID[K], M Message](self N, cfg *PoolConfig) self: self, cfg: *cfg, queries: make([]*Query[K, N, M], 0), - queryIndex: make(map[QueryID]*Query[K, N, M]), + queryIndex: make(map[coordt.QueryID]*Query[K, N, M]), }, nil } @@ -119,13 +118,13 @@ func (p *Pool[K, N, M]) Advance(ctx context.Context, ev PoolEvent) PoolState { // eventQueryID keeps track of a query that was advanced via a specific event, to avoid it // being advanced twice - eventQueryID := InvalidQueryID + eventQueryID := coordt.InvalidQueryID switch tev := ev.(type) { case *EventPoolAddFindCloserQuery[K, N]: - p.addFindCloserQuery(ctx, tev.QueryID, tev.Target, tev.KnownClosestNodes, tev.NumResults) + p.addFindCloserQuery(ctx, tev.QueryID, tev.Target, tev.Seed, tev.NumResults) case *EventPoolAddQuery[K, N, M]: - p.addQuery(ctx, tev.QueryID, tev.Target, tev.Message, tev.KnownClosestNodes, tev.NumResults) + p.addQuery(ctx, tev.QueryID, tev.Target, tev.Message, tev.Seed, tev.NumResults) // TODO: return error as state case *EventPoolStopQuery: if qry, ok := p.queryIndex[tev.QueryID]; ok { @@ -242,7 +241,7 @@ func (p *Pool[K, N, M]) advanceQuery(ctx context.Context, qry *Query[K, N, M], q return nil, false } -func (p *Pool[K, N, M]) removeQuery(queryID QueryID) { +func (p *Pool[K, N, M]) removeQuery(queryID coordt.QueryID) { for i := range p.queries { if p.queries[i].id != queryID { continue @@ -258,7 +257,7 @@ func (p *Pool[K, N, M]) removeQuery(queryID QueryID) { // addQuery adds a query to the pool, returning the new query id // TODO: remove target argument and use msg.Target -func (p *Pool[K, N, M]) addQuery(ctx context.Context, queryID QueryID, target K, msg M, knownClosestNodes []N, numResults int) error { +func (p *Pool[K, N, M]) addQuery(ctx context.Context, queryID coordt.QueryID, target K, msg M, knownClosestNodes []N, numResults int) error { if _, exists := p.queryIndex[queryID]; exists { return fmt.Errorf("query id already in use") } @@ -285,7 +284,7 @@ func (p *Pool[K, N, M]) addQuery(ctx context.Context, queryID QueryID, target K, } // addQuery adds a find closer query to the pool, returning the new query id -func (p *Pool[K, N, M]) addFindCloserQuery(ctx context.Context, queryID QueryID, target K, knownClosestNodes []N, numResults int) error { +func (p *Pool[K, N, M]) addFindCloserQuery(ctx context.Context, queryID coordt.QueryID, target K, knownClosestNodes []N, numResults int) error { if _, exists := p.queryIndex[queryID]; exists { return fmt.Errorf("query id already in use") } @@ -322,15 +321,15 @@ type StatePoolIdle struct{} // StatePoolFindCloser indicates that a pool query wants to send a find closer nodes message to a node. type StatePoolFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID + QueryID coordt.QueryID Target K // the key that the query wants to find closer nodes for NodeID N // the node to send the message to Stats QueryStats } // StatePoolSendMessage indicates that a pool query wants to send a message to a node. -type StatePoolSendMessage[K kad.Key[K], N kad.NodeID[K], M Message] struct { - QueryID QueryID +type StatePoolSendMessage[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID NodeID N // the node to send the message to Message M Stats QueryStats @@ -346,14 +345,14 @@ type StatePoolWaitingWithCapacity struct{} // StatePoolQueryFinished indicates that a query has finished. type StatePoolQueryFinished[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID + QueryID coordt.QueryID Stats QueryStats ClosestNodes []N } // StatePoolQueryTimeout indicates that a query has timed out. type StatePoolQueryTimeout struct { - QueryID QueryID + QueryID coordt.QueryID Stats QueryStats } @@ -373,38 +372,38 @@ type PoolEvent interface { // EventPoolAddQuery is an event that attempts to add a new query that finds closer nodes to a target key. type EventPoolAddFindCloserQuery[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID // the id to use for the new query - Target K // the target key for the query - KnownClosestNodes []N // an initial set of close nodes the query should use - NumResults int // the minimum number of nodes to successfully contact before considering iteration complete + QueryID coordt.QueryID // the id to use for the new query + Target K // the target key for the query + Seed []N // an initial set of close nodes the query should use + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete } // EventPoolAddQuery is an event that attempts to add a new query that sends a message. -type EventPoolAddQuery[K kad.Key[K], N kad.NodeID[K], M Message] struct { - QueryID QueryID // the id to use for the new query - Target K // the target key for the query - Message M // message to be sent to each node - KnownClosestNodes []N // an initial set of close nodes the query should use - NumResults int // the minimum number of nodes to successfully contact before considering iteration complete +type EventPoolAddQuery[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID // the id to use for the new query + Target K // the target key for the query + Message M // message to be sent to each node + Seed []N // an initial set of close nodes the query should use + NumResults int // the minimum number of nodes to successfully contact before considering iteration complete } // EventPoolStopQuery notifies a [Pool] to stop a query. type EventPoolStopQuery struct { - QueryID QueryID // the id of the query that should be stopped + QueryID coordt.QueryID // the id of the query that should be stopped } // EventPoolNodeResponse notifies a [Pool] that an attempt to contact a node has received a successful response. type EventPoolNodeResponse[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID // the id of the query that sent the message - NodeID N // the node the message was sent to - CloserNodes []N // the closer nodes sent by the node + QueryID coordt.QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to + CloserNodes []N // the closer nodes sent by the node } // EventPoolNodeFailure notifies a [Pool] that an attempt to contact a node has failed. type EventPoolNodeFailure[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID // the id of the query that sent the message - NodeID N // the node the message was sent to - Error error // the error that caused the failure, if any + QueryID coordt.QueryID // the id of the query that sent the message + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any } // EventPoolPoll is an event that signals the pool that it can perform housekeeping work such as time out queries. diff --git a/v2/internal/coord/query/pool_test.go b/v2/internal/coord/query/pool_test.go index d54c6d23..88f30091 100644 --- a/v2/internal/coord/query/pool_test.go +++ b/v2/internal/coord/query/pool_test.go @@ -8,6 +8,7 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) @@ -105,13 +106,13 @@ func TestPoolAddFindCloserQueryStartsIfCapacity(t *testing.T) { target := tiny.Key(0b00000001) a := tiny.NewNode(0b00000100) // 4 - queryID := QueryID("test") + queryID := coordt.QueryID("test") // first thing the new pool should do is start the query state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID, - Target: target, - KnownClosestNodes: []tiny.Node{a}, + QueryID: queryID, + Target: target, + Seed: []tiny.Node{a}, }) require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) @@ -145,14 +146,14 @@ func TestPoolAddQueryStartsIfCapacity(t *testing.T) { target := tiny.Key(0b00000001) a := tiny.NewNode(0b00000100) // 4 - queryID := QueryID("test") + queryID := coordt.QueryID("test") msg := tiny.Message{Content: "msg"} // first thing the new pool should do is start the query state := p.Advance(ctx, &EventPoolAddQuery[tiny.Key, tiny.Node, tiny.Message]{ - QueryID: queryID, - Target: target, - Message: msg, - KnownClosestNodes: []tiny.Node{a}, + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{a}, }) require.IsType(t, &StatePoolSendMessage[tiny.Key, tiny.Node, tiny.Message]{}, state) @@ -186,13 +187,13 @@ func TestPoolNodeResponse(t *testing.T) { target := tiny.Key(0b00000001) a := tiny.NewNode(0b00000100) // 4 - queryID := QueryID("test") + queryID := coordt.QueryID("test") // first thing the new pool should do is start the query state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID, - Target: target, - KnownClosestNodes: []tiny.Node{a}, + QueryID: queryID, + Target: target, + Seed: []tiny.Node{a}, }) require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) @@ -234,11 +235,11 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { d := tiny.NewNode(0b00100000) // 32 // Add the first query - queryID1 := QueryID("1") + queryID1 := coordt.QueryID("1") state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID1, - Target: target, - KnownClosestNodes: []tiny.Node{a, b, c, d}, + QueryID: queryID1, + Target: target, + Seed: []tiny.Node{a, b, c, d}, }) require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) @@ -248,11 +249,11 @@ func TestPoolPrefersRunningQueriesOverNewOnes(t *testing.T) { require.Equal(t, a, st.NodeID) // Add the second query - queryID2 := QueryID("2") + queryID2 := coordt.QueryID("2") state = p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID2, - Target: target, - KnownClosestNodes: []tiny.Node{a, b, c, d}, + QueryID: queryID2, + Target: target, + Seed: []tiny.Node{a, b, c, d}, }) // the first query should continue its operation in preference to starting the new query @@ -316,11 +317,11 @@ func TestPoolRespectsConcurrency(t *testing.T) { a := tiny.NewNode(0b00000100) // 4 // Add the first query - queryID1 := QueryID("1") + queryID1 := coordt.QueryID("1") state := p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID1, - Target: target, - KnownClosestNodes: []tiny.Node{a}, + QueryID: queryID1, + Target: target, + Seed: []tiny.Node{a}, }) require.IsType(t, &StatePoolFindCloser[tiny.Key, tiny.Node]{}, state) @@ -330,11 +331,11 @@ func TestPoolRespectsConcurrency(t *testing.T) { require.Equal(t, a, st.NodeID) // Add the second query - queryID2 := QueryID("2") + queryID2 := coordt.QueryID("2") state = p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID2, - Target: target, - KnownClosestNodes: []tiny.Node{a}, + QueryID: queryID2, + Target: target, + Seed: []tiny.Node{a}, }) // the second query should start since the first query has a request in flight @@ -344,11 +345,11 @@ func TestPoolRespectsConcurrency(t *testing.T) { require.Equal(t, a, st.NodeID) // Add a third query - queryID3 := QueryID("3") + queryID3 := coordt.QueryID("3") state = p.Advance(ctx, &EventPoolAddFindCloserQuery[tiny.Key, tiny.Node]{ - QueryID: queryID3, - Target: target, - KnownClosestNodes: []tiny.Node{a}, + QueryID: queryID3, + Target: target, + Seed: []tiny.Node{a}, }) // the third query should wait since the pool has reached maximum concurrency diff --git a/v2/internal/coord/query/query.go b/v2/internal/coord/query/query.go index b0003a83..00168082 100644 --- a/v2/internal/coord/query/query.go +++ b/v2/internal/coord/query/query.go @@ -11,13 +11,10 @@ import ( "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) -type QueryID string - -const InvalidQueryID QueryID = "" - type QueryStats struct { Start time.Time End time.Time @@ -74,9 +71,9 @@ func DefaultQueryConfig() *QueryConfig { } } -type Query[K kad.Key[K], N kad.NodeID[K], M Message] struct { +type Query[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { self N - id QueryID + id coordt.QueryID // cfg is a copy of the optional configuration supplied to the query cfg QueryConfig @@ -99,7 +96,7 @@ type Query[K kad.Key[K], N kad.NodeID[K], M Message] struct { inFlight int } -func NewFindCloserQuery[K kad.Key[K], N kad.NodeID[K], M Message](self N, id QueryID, target K, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig) (*Query[K, N, M], error) { +func NewFindCloserQuery[K kad.Key[K], N kad.NodeID[K], M coordt.Message](self N, id coordt.QueryID, target K, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig) (*Query[K, N, M], error) { var empty M q, err := NewQuery[K, N, M](self, id, target, empty, iter, knownClosestNodes, cfg) if err != nil { @@ -109,7 +106,7 @@ func NewFindCloserQuery[K kad.Key[K], N kad.NodeID[K], M Message](self N, id Que return q, nil } -func NewQuery[K kad.Key[K], N kad.NodeID[K], M Message](self N, id QueryID, target K, msg M, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig) (*Query[K, N, M], error) { +func NewQuery[K kad.Key[K], N kad.NodeID[K], M coordt.Message](self N, id coordt.QueryID, target K, msg M, iter NodeIter[K, N], knownClosestNodes []N, cfg *QueryConfig) (*Query[K, N, M], error) { if cfg == nil { cfg = DefaultQueryConfig() } else if err := cfg.Validate(); err != nil { @@ -385,22 +382,22 @@ type QueryState interface { // StateQueryFinished indicates that the [Query] has finished. type StateQueryFinished[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID + QueryID coordt.QueryID Stats QueryStats ClosestNodes []N // contains the closest nodes to the target key that were found } // StateQueryFindCloser indicates that the [Query] wants to send a find closer nodes message to a node. type StateQueryFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID QueryID + QueryID coordt.QueryID Target K // the key that the query wants to find closer nodes for NodeID N // the node to send the message to Stats QueryStats } // StateQuerySendMessage indicates that the [Query] wants to send a message to a node. -type StateQuerySendMessage[K kad.Key[K], N kad.NodeID[K], M Message] struct { - QueryID QueryID +type StateQuerySendMessage[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + QueryID coordt.QueryID NodeID N // the node to send the message to Message M Stats QueryStats @@ -408,13 +405,13 @@ type StateQuerySendMessage[K kad.Key[K], N kad.NodeID[K], M Message] struct { // StateQueryWaitingAtCapacity indicates that the [Query] is waiting for results and is at capacity. type StateQueryWaitingAtCapacity struct { - QueryID QueryID + QueryID coordt.QueryID Stats QueryStats } // StateQueryWaitingWithCapacity indicates that the [Query] is waiting for results but has no further nodes to contact. type StateQueryWaitingWithCapacity struct { - QueryID QueryID + QueryID coordt.QueryID Stats QueryStats } diff --git a/v2/internal/coord/query/query_test.go b/v2/internal/coord/query/query_test.go index 6cb1d9d1..f1a211a2 100644 --- a/v2/internal/coord/query/query_test.go +++ b/v2/internal/coord/query/query_test.go @@ -9,6 +9,7 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) @@ -65,7 +66,7 @@ func TestQueryMessagesNode(t *testing.T) { cfg := DefaultQueryConfig() cfg.Clock = clk - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -114,7 +115,7 @@ func TestQueryFindCloserNearest(t *testing.T) { cfg := DefaultQueryConfig() cfg.Clock = clk - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -145,7 +146,7 @@ func TestQueryCancelFinishesQuery(t *testing.T) { cfg := DefaultQueryConfig() cfg.Clock = clk - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -188,7 +189,7 @@ func TestQueryNoClosest(t *testing.T) { cfg := DefaultQueryConfig() cfg.Clock = clk - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -232,7 +233,7 @@ func TestQueryWaitsAtCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -286,7 +287,7 @@ func TestQueryTimedOutNodeMakesCapacity(t *testing.T) { cfg.RequestTimeout = 3 * time.Minute cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -391,7 +392,7 @@ func TestQueryFindCloserResponseMakesCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -475,7 +476,7 @@ func TestQueryCloserNodesAreAddedToIteration(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -531,7 +532,7 @@ func TestQueryCloserNodesIgnoresDuplicates(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -585,7 +586,7 @@ func TestQueryCancelFinishesIteration(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -623,7 +624,7 @@ func TestQueryFinishedIgnoresLaterEvents(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -680,7 +681,7 @@ func TestQueryWithCloserIterIgnoresMessagesFromUnknownNodes(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -732,7 +733,7 @@ func TestQueryWithCloserIterFinishesWhenNumResultsReached(t *testing.T) { cfg.Concurrency = 4 cfg.NumResults = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -792,7 +793,7 @@ func TestQueryWithCloserIterContinuesUntilNumResultsReached(t *testing.T) { cfg.Concurrency = 4 cfg.NumResults = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -864,7 +865,7 @@ func TestQueryNotContactedMakesCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -922,7 +923,7 @@ func TestFindCloserQueryAllNotContactedFinishes(t *testing.T) { cfg.Clock = clk cfg.Concurrency = len(knownNodes) // allow all to be contacted at once - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -981,7 +982,7 @@ func TestQueryAllContactedFinishes(t *testing.T) { cfg.Concurrency = len(knownNodes) // allow all to be contacted at once cfg.NumResults = len(knownNodes) + 1 // one more than the size of the network - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -1040,7 +1041,7 @@ func TestQueryNeverMessagesSelf(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := a qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) @@ -1090,7 +1091,7 @@ func TestQueryMessagesNearest(t *testing.T) { cfg := DefaultQueryConfig() cfg.Clock = clk - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) msg := tiny.Message{Content: "msg"} @@ -1131,7 +1132,7 @@ func TestQueryMessageResponseMakesCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = len(knownNodes) - 1 // one less than the number of initial nodes - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) msg := tiny.Message{Content: "msg"} @@ -1206,7 +1207,7 @@ func TestQueryAllNotContactedFinishes(t *testing.T) { cfg.Clock = clk cfg.Concurrency = len(knownNodes) // allow all to be contacted at once - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) msg := tiny.Message{Content: "msg"} @@ -1269,7 +1270,7 @@ func TestFindCloserQueryIncludesPartialClosestNodesWhenCancelled(t *testing.T) { cfg.Concurrency = 4 cfg.NumResults = 4 - queryID := QueryID("test") + queryID := coordt.QueryID("test") self := tiny.NewNode(0) qry, err := NewFindCloserQuery[tiny.Key, tiny.Node, tiny.Message](self, queryID, target, iter, knownNodes, cfg) diff --git a/v2/internal/coord/routing.go b/v2/internal/coord/routing.go index 832bfa64..70e64868 100644 --- a/v2/internal/coord/routing.go +++ b/v2/internal/coord/routing.go @@ -5,6 +5,7 @@ import ( "fmt" "sync" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" @@ -19,13 +20,13 @@ type RoutingBehaviour struct { self kadt.PeerID // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table - bootstrap SM[routing.BootstrapEvent, routing.BootstrapState] + bootstrap coordt.StateMachine[routing.BootstrapEvent, routing.BootstrapState] // include is the inclusion state machine, responsible for vetting nodes before including them in the routing table - include SM[routing.IncludeEvent, routing.IncludeState] + include coordt.StateMachine[routing.IncludeEvent, routing.IncludeState] // probe is the node probing state machine, responsible for periodically checking connectivity of nodes in the routing table - probe SM[routing.ProbeEvent, routing.ProbeState] + probe coordt.StateMachine[routing.ProbeEvent, routing.ProbeState] pendingMu sync.Mutex pending []BehaviourEvent @@ -35,7 +36,14 @@ type RoutingBehaviour struct { tracer trace.Tracer } -func NewRoutingBehaviour(self kadt.PeerID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger, tracer trace.Tracer) *RoutingBehaviour { +func NewRoutingBehaviour( + self kadt.PeerID, + bootstrap coordt.StateMachine[routing.BootstrapEvent, routing.BootstrapState], + include coordt.StateMachine[routing.IncludeEvent, routing.IncludeState], + probe coordt.StateMachine[routing.ProbeEvent, routing.ProbeState], + logger *slog.Logger, + tracer trace.Tracer, +) *RoutingBehaviour { r := &RoutingBehaviour{ self: self, bootstrap: bootstrap, diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index 914f9615..8580fbc1 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -106,7 +107,7 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst qryCfg.Concurrency = b.cfg.RequestConcurrency qryCfg.RequestTimeout = b.cfg.RequestTimeout - queryID := query.QueryID("bootstrap") + queryID := coordt.QueryID("bootstrap") qry, err := query.NewFindCloserQuery[K, N, any](b.self, queryID, b.self.Key(), iter, tev.KnownClosestNodes, qryCfg) if err != nil { @@ -195,7 +196,7 @@ type BootstrapState interface { // StateBootstrapFindCloser indicates that the bootstrap query wants to send a find closer nodes message to a node. type StateBootstrapFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { - QueryID query.QueryID + QueryID coordt.QueryID Target K // the key that the query wants to find closer nodes for NodeID N // the node to send the message to Stats query.QueryStats diff --git a/v2/internal/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go index 70c8b6f0..29123980 100644 --- a/v2/internal/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -8,8 +8,8 @@ import ( "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" ) func TestBootstrapConfigValidate(t *testing.T) { @@ -85,7 +85,7 @@ func TestBootstrapStart(t *testing.T) { st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) // the query should be the one just added - require.Equal(t, query.QueryID("bootstrap"), st.QueryID) + require.Equal(t, coordt.QueryID("bootstrap"), st.QueryID) // the query should attempt to contact the node it was given require.Equal(t, a, st.NodeID) @@ -118,7 +118,7 @@ func TestBootstrapMessageResponse(t *testing.T) { // the bootstrap should attempt to contact the node it was given st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) - require.Equal(t, query.QueryID("bootstrap"), st.QueryID) + require.Equal(t, coordt.QueryID("bootstrap"), st.QueryID) require.Equal(t, a, st.NodeID) // notify bootstrap that node was contacted successfully, but no closer nodes @@ -163,7 +163,7 @@ func TestBootstrapProgress(t *testing.T) { // the bootstrap should attempt to contact the closest node it was given require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) - require.Equal(t, query.QueryID("bootstrap"), st.QueryID) + require.Equal(t, coordt.QueryID("bootstrap"), st.QueryID) require.Equal(t, a, st.NodeID) // next the bootstrap attempts to contact second nearest node diff --git a/v2/internal/coord/routing/include_test.go b/v2/internal/coord/routing/include_test.go index a565521a..a02def62 100644 --- a/v2/internal/coord/routing/include_test.go +++ b/v2/internal/coord/routing/include_test.go @@ -5,10 +5,11 @@ import ( "testing" "github.com/benbjohnson/clock" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing/simplert" "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" ) func TestIncludeConfigValidate(t *testing.T) { diff --git a/v2/internal/coord/routing_test.go b/v2/internal/coord/routing_test.go index c789c9dc..545680da 100644 --- a/v2/internal/coord/routing_test.go +++ b/v2/internal/coord/routing_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "go.opentelemetry.io/otel" "github.com/benbjohnson/clock" @@ -13,7 +15,6 @@ import ( "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/nettest" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -65,7 +66,7 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) ev := &EventGetCloserNodesSuccess{ - QueryID: query.QueryID("bootstrap"), + QueryID: coordt.QueryID("bootstrap"), To: nodes[1].NodeID, Target: nodes[0].NodeID.Key(), CloserNodes: []kadt.PeerID{nodes[2].NodeID}, @@ -99,7 +100,7 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ - QueryID: query.QueryID("bootstrap"), + QueryID: coordt.QueryID("bootstrap"), To: nodes[1].NodeID, Target: nodes[0].NodeID.Key(), Err: failure, @@ -163,7 +164,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) ev := &EventGetCloserNodesSuccess{ - QueryID: query.QueryID("include"), + QueryID: coordt.QueryID("include"), To: nodes[1].NodeID, Target: nodes[0].NodeID.Key(), CloserNodes: []kadt.PeerID{nodes[2].NodeID}, @@ -197,7 +198,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ - QueryID: query.QueryID("include"), + QueryID: coordt.QueryID("include"), To: nodes[1].NodeID, Target: nodes[0].NodeID.Key(), Err: failure, @@ -290,6 +291,6 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { // confirm that the message is for the correct node oev = dev.(*EventOutboundGetCloserNodes) - require.Equal(t, query.QueryID("probe"), oev.QueryID) + require.Equal(t, coordt.QueryID("probe"), oev.QueryID) require.Equal(t, candidate, oev.To) } diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index 2ad4bbef..6e6aa8c2 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -11,8 +11,58 @@ import ( ) // Key is a type alias for the type of key that's used with this DHT -// implementation. -type Key = key.Key256 +// implementation. In the Amino DHT, we are sending around the preimage +// of the actual key that's used for calculating Kademlia distance. That's +// why this Key struct also holds the preimage bytes. +type Key struct { + key key.Key256 + preimage []byte +} + +var _ kad.Key[Key] = (*Key)(nil) + +// NewKey initializes a new key struct based on the given preimage bytes. These +// bytes are SHA256 hashed and stored as the actual Kademlia key that's used +// to calculate distances in the XOR keyspace. +func NewKey(preimage []byte) Key { + h := sha256.Sum256(preimage) + return Key{ + key: key.NewKey256(h[:]), + preimage: preimage, + } +} + +// MsgKey returns the bytes that should be used inside Kademlia RPC messages. +// The returned value is the preimage to the actual Kademlia key. To arrive +// at the Kademlia key, these MsgKey bytes must be SHA256 hashed +func (k Key) MsgKey() []byte { + return k.preimage +} + +func (k Key) BitLen() int { + return k.key.BitLen() +} + +func (k Key) Bit(i int) uint { + return k.key.Bit(i) +} + +func (k Key) Xor(other Key) Key { + return Key{key: k.key.Xor(other.key)} +} + +func (k Key) CommonPrefixLength(other Key) int { + return k.key.CommonPrefixLength(other.key) +} + +func (k Key) Compare(other Key) int { + return k.key.Compare(other.key) +} + +// HexString returns a string containing the hexadecimal representation of the key. +func (k Key) HexString() string { + return k.key.HexString() +} // PeerID is a type alias for [peer.ID] that implements the [kad.NodeID] // interface. This means we can use PeerID for any operation that interfaces @@ -26,8 +76,7 @@ var _ kad.NodeID[Key] = PeerID("") // SHA256 hashes of, in this case, peer.IDs. This means this Key method takes // the [peer.ID], hashes it and constructs a 256-bit key. func (p PeerID) Key() Key { - h := sha256.Sum256([]byte(p)) - return key.NewKey256(h[:]) + return NewKey([]byte(p)) } // String calls String on the underlying [peer.ID] and returns a string like diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go index 68ac067a..d8066dfe 100644 --- a/v2/pb/msg.aux.go +++ b/v2/pb/msg.aux.go @@ -2,12 +2,11 @@ package pb import ( "bytes" - "crypto/sha256" + "fmt" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" ) @@ -15,8 +14,30 @@ import ( // It is used to let these types conform to interfaces or add convenience methods. func (m *Message) Target() kadt.Key { - b := sha256.Sum256(m.Key) - return key.NewKey256(b[:]) + return kadt.NewKey(m.Key) +} + +// ExpectResponse returns true if we expect a response from the remote peer if +// we sent a message with the given type to them. For example, when a peer sends +// a PUT_VALUE message to another peer, that other peer won't respond with +// anything. +func (m *Message) ExpectResponse() bool { + switch m.Type { + case Message_PUT_VALUE: + return false + case Message_GET_VALUE: + return true + case Message_ADD_PROVIDER: + return false + case Message_GET_PROVIDERS: + return true + case Message_FIND_NODE: + return true + case Message_PING: + return true + default: + panic(fmt.Sprintf("unexpected message type %d", m.Type)) + } } // FromAddrInfo constructs a [Message_Peer] from the given [peer.AddrInfo]. diff --git a/v2/pb/msg.aux_test.go b/v2/pb/msg.aux_test.go index dc3bd016..757b5c00 100644 --- a/v2/pb/msg.aux_test.go +++ b/v2/pb/msg.aux_test.go @@ -2,8 +2,31 @@ package pb import ( "testing" + + "github.com/stretchr/testify/assert" ) +func TestMessage_ExpectResponse(t *testing.T) { + t.Run("all covered", func(t *testing.T) { + defer func() { + assert.Nil(t, recover()) + }() + + for msgTypeInt := range Message_MessageType_name { + msg := &Message{Type: Message_MessageType(msgTypeInt)} + msg.ExpectResponse() + } + }) + + t.Run("unexpected type", func(t *testing.T) { + defer func() { + assert.NotNil(t, recover()) + }() + msg := &Message{Type: Message_MessageType(-1)} + msg.ExpectResponse() + }) +} + func TestMessage_Peer_invalid_maddr(t *testing.T) { msg := Message_Peer{ Addrs: [][]byte{[]byte("invalid-maddr")}, diff --git a/v2/query_test.go b/v2/query_test.go index 86ea55c2..bb628a0c 100644 --- a/v2/query_test.go +++ b/v2/query_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -22,11 +22,11 @@ func TestRTAdditionOnSuccessfulQuery(t *testing.T) { // d3 does not know about d1 _, err := d3.kad.GetNode(ctx, kadt.PeerID(d1.host.ID())) - require.ErrorIs(t, err, coord.ErrNodeNotFound) + require.ErrorIs(t, err, coordt.ErrNodeNotFound) // d1 does not know about d3 _, err = d1.kad.GetNode(ctx, kadt.PeerID(d3.host.ID())) - require.ErrorIs(t, err, coord.ErrNodeNotFound) + require.ErrorIs(t, err, coordt.ErrNodeNotFound) // // but when d3 queries d2, d1 and d3 discover each other _, _ = d3.FindPeer(ctx, "something") diff --git a/v2/router.go b/v2/router.go index 70bd69ca..bc586a39 100644 --- a/v2/router.go +++ b/v2/router.go @@ -13,7 +13,7 @@ import ( "github.com/libp2p/go-msgio/pbio" "google.golang.org/protobuf/proto" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) @@ -26,15 +26,7 @@ type router struct { ProtocolID protocol.ID } -var _ coord.Router[kadt.Key, kadt.PeerID, *pb.Message] = (*router)(nil) - -func FindKeyRequest(k kadt.Key) *pb.Message { - marshalledKey, _ := k.MarshalBinary() - return &pb.Message{ - Type: pb.Message_FIND_NODE, - Key: marshalledKey, - } -} +var _ coordt.Router[kadt.Key, kadt.PeerID, *pb.Message] = (*router)(nil) func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { // TODO: what to do with addresses in peer.AddrInfo? @@ -63,6 +55,10 @@ func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Messag return nil, fmt.Errorf("write message: %w", err) } + if !req.ExpectResponse() { + return nil, nil + } + data, err := reader.ReadMsg() if err != nil { return nil, fmt.Errorf("read message: %w", err) @@ -80,7 +76,12 @@ func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Messag } func (r *router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kadt.Key) ([]kadt.PeerID, error) { - resp, err := r.SendMessage(ctx, to, FindKeyRequest(target)) + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: target.MsgKey(), + } + + resp, err := r.SendMessage(ctx, to, req) if err != nil { return nil, err } diff --git a/v2/routing.go b/v2/routing.go index 569e66b9..eec85c30 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -8,16 +8,18 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p-kad-dht/v2/pb" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) var _ routing.Routing = (*DHT)(nil) @@ -42,10 +44,10 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { target := kadt.PeerID(id) var foundPeer peer.ID - fn := func(ctx context.Context, visited kadt.PeerID, msg *pb.Message, stats coord.QueryStats) error { + fn := func(ctx context.Context, visited kadt.PeerID, msg *pb.Message, stats coordt.QueryStats) error { if peer.ID(visited) == id { foundPeer = peer.ID(visited) - return coord.ErrSkipRemaining + return coordt.ErrSkipRemaining } return nil } @@ -89,8 +91,22 @@ func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { return nil } - // TODO reach out to Zikade - panic("implement me") + // construct message + addrInfo := peer.AddrInfo{ + ID: d.host.ID(), + Addrs: d.host.Addrs(), + } + + msg := &pb.Message{ + Type: pb.Message_ADD_PROVIDER, + Key: c.Hash(), + ProviderPeers: []*pb.Message_Peer{ + pb.FromAddrInfo(addrInfo), + }, + } + + // finally, find the closest peers to the target key. + return d.kad.BroadcastRecord(ctx, msg) } func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { @@ -110,15 +126,46 @@ func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-ch panic("implement me") } -func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ...routing.Option) error { +// PutValue satisfies the [routing.Routing] interface and will add the given +// value to the k-closest nodes to keyStr. The parameter keyStr should have the +// format `/$namespace/$binary_id`. Namespace examples are `pk` or `ipns`. To +// identify the closest peers to keyStr, that complete string will be SHA256 +// hashed. +func (d *DHT) PutValue(ctx context.Context, keyStr string, value []byte, opts ...routing.Option) error { ctx, span := d.tele.Tracer.Start(ctx, "DHT.PutValue") defer span.End() - if err := d.putValueLocal(ctx, key, value); err != nil { + // first parse the routing options + rOpt := routing.Options{} // routing config + if err := rOpt.Apply(opts...); err != nil { + return fmt.Errorf("apply routing options: %w", err) + } + + // then always store the given value locally + if err := d.putValueLocal(ctx, keyStr, value); err != nil { return fmt.Errorf("put value locally: %w", err) } - panic("implement me") + // if the routing system should operate in offline mode, stop here + if rOpt.Offline { + return nil + } + + // construct Kademlia-key. Yes, we hash the complete key string which + // includes the namespace prefix. + msg := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte(keyStr), + Record: record.MakePutRecord(keyStr, value), + } + + // finally, find the closest peers to the target key. + err := d.kad.BroadcastRecord(ctx, msg) + if err != nil { + return fmt.Errorf("query error: %w", err) + } + + return nil } // putValueLocal stores a value in the local datastore without querying the network. @@ -166,7 +213,7 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option // TODO: quorum var value []byte - fn := func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats coord.QueryStats) error { + fn := func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats coordt.QueryStats) error { if resp == nil { return nil } @@ -181,7 +228,7 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option value = resp.GetRecord().GetValue() - return coord.ErrSkipRemaining + return coordt.ErrSkipRemaining } _, err = d.kad.QueryMessage(ctx, req, fn, d.cfg.BucketSize) @@ -216,6 +263,7 @@ func (d *DHT) getValueLocal(ctx context.Context, key string) ([]byte, error) { if !ok { return nil, fmt.Errorf("expected *recpb.Record from backend, got: %T", val) } + return rec.GetValue(), nil } @@ -233,7 +281,10 @@ func (d *DHT) Bootstrap(ctx context.Context) error { seed := make([]kadt.PeerID, len(d.cfg.BootstrapPeers)) for i, addrInfo := range d.cfg.BootstrapPeers { seed[i] = kadt.PeerID(addrInfo.ID) - d.host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, time.Hour) // TODO: TTL + // TODO: how to handle TTL if BootstrapPeers become dynamic and don't + // point to stable peers or consist of ephemeral peers that we have + // observed during a previous run. + d.host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, peerstore.PermanentAddrTTL) } return d.kad.Bootstrap(ctx, seed) diff --git a/v2/routing_test.go b/v2/routing_test.go index ec80da31..8647b56e 100644 --- a/v2/routing_test.go +++ b/v2/routing_test.go @@ -1,11 +1,12 @@ package dht import ( - "fmt" "testing" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" @@ -21,9 +22,38 @@ func makePkKeyValue(t *testing.T) (string, []byte) { id, err := peer.IDFromPublicKey(pub) require.NoError(t, err) - key := fmt.Sprintf("/pk/%s", string(id)) + return routing.KeyForPublicKey(id), v +} + +func TestDHT_PutValue_local_only(t *testing.T) { + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d := top.AddServer(nil) + + key, v := makePkKeyValue(t) + + err := d.PutValue(ctx, key, v, routing.Offline) + require.NoError(t, err) +} + +func TestDHT_PutValue_invalid_key(t *testing.T) { + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d := top.AddClient(nil) + + _, v := makePkKeyValue(t) + + t.Run("unknown namespace", func(t *testing.T) { + err := d.PutValue(ctx, "/unknown/some_key", v) + assert.ErrorIs(t, err, routing.ErrNotSupported) + }) - return key, v + t.Run("no namespace", func(t *testing.T) { + err := d.PutValue(ctx, "no namespace", v) + assert.ErrorContains(t, err, "splitting key") + }) } func TestGetSetValueLocal(t *testing.T) { From ae5a094672ccfff131361a00eab61ed5925b9a88 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 25 Sep 2023 11:28:15 +0100 Subject: [PATCH 53/64] Add explore state machine to expand population of routing table (#934) * Improve query capabilities * go mod tidy * Review feedback * go mod tidy * Move coord packages to internal (#933) * Move coord and kadt packages to internal * go mod tidy * go fmt * Move kadt out of internal and add RoutingTable interface * Add explore state machine to expand population of routing table * Refactor schedule into separate type * Add generation of random peer id for a given cpl * go mod tidy * Add prefixmap generator * Use constants for various query ids * go mod tidy * Wire explore state machine into routing behaviour * Remove some unnecessary conversions * PR review updates --- v2/go.mod | 2 +- v2/internal/coord/behaviour_test.go | 7 - v2/internal/coord/coordinator.go | 18 +- v2/internal/coord/coordinator_test.go | 8 +- v2/internal/coord/cplutil/cpl.go | 58 + v2/internal/coord/cplutil/cpl_prefixmap.go | 4101 +++++++++++++++++ v2/internal/coord/cplutil/cpl_test.go | 60 + v2/internal/coord/cplutil/gen.go | 76 + v2/internal/coord/event.go | 6 + v2/internal/coord/internal/nettest/layouts.go | 4 +- v2/internal/coord/internal/nettest/routing.go | 2 +- v2/internal/coord/internal/tiny/node.go | 13 + v2/internal/coord/internal/tiny/node_test.go | 15 + v2/internal/coord/routing.go | 136 +- v2/internal/coord/routing/bootstrap.go | 7 +- v2/internal/coord/routing/explore.go | 485 ++ v2/internal/coord/routing/explore_test.go | 387 ++ v2/internal/coord/routing/probe.go | 6 +- v2/internal/coord/routing_test.go | 156 +- 19 files changed, 5471 insertions(+), 76 deletions(-) create mode 100644 v2/internal/coord/cplutil/cpl.go create mode 100644 v2/internal/coord/cplutil/cpl_prefixmap.go create mode 100644 v2/internal/coord/cplutil/cpl_test.go create mode 100644 v2/internal/coord/cplutil/gen.go create mode 100644 v2/internal/coord/routing/explore.go create mode 100644 v2/internal/coord/routing/explore_test.go diff --git a/v2/go.mod b/v2/go.mod index 5eb07d61..5a0b9e6b 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -15,7 +15,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multihash v0.2.3 github.com/pkg/errors v0.9.1 // indirect github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 github.com/prometheus/client_golang v1.16.0 // indirect diff --git a/v2/internal/coord/behaviour_test.go b/v2/internal/coord/behaviour_test.go index 20464c30..ed0761aa 100644 --- a/v2/internal/coord/behaviour_test.go +++ b/v2/internal/coord/behaviour_test.go @@ -4,13 +4,6 @@ import ( "context" ) -type NullSM[E any, S any] struct{} - -func (NullSM[E, S]) Advance(context.Context, E) S { - var v S - return v -} - type RecordingSM[E any, S any] struct { State S Received E diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index 18e09467..d5838929 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -22,6 +22,7 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/brdcst" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/cplutil" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -229,7 +230,22 @@ func NewCoordinator(self kadt.PeerID, rtr coordt.Router[kadt.Key, kadt.PeerID, * return nil, fmt.Errorf("probe: %w", err) } - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger, tele.Tracer) + exploreCfg := routing.DefaultExploreConfig() + exploreCfg.Clock = cfg.Clock + exploreCfg.Timeout = cfg.QueryTimeout + + schedule, err := routing.NewDynamicExploreSchedule(14, cfg.Clock.Now(), time.Hour, 1, 0) + if err != nil { + return nil, fmt.Errorf("explore schedule: %w", err) + } + + // TODO: expose more config + explore, err := routing.NewExplore[kadt.Key](self, rt, cplutil.GenRandPeerID, schedule, exploreCfg) + if err != nil { + return nil, fmt.Errorf("explore: %w", err) + } + + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, explore, cfg.Logger, tele.Tracer) networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, tele.Tracer) diff --git a/v2/internal/coord/coordinator_test.go b/v2/internal/coord/coordinator_test.go index bbec6a38..716fe1e6 100644 --- a/v2/internal/coord/coordinator_test.go +++ b/v2/internal/coord/coordinator_test.go @@ -99,11 +99,11 @@ func TestExhaustiveQuery(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := kadt.PeerID(nodes[0].NodeID) + self := nodes[0].NodeID c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) - target := kadt.PeerID(nodes[3].NodeID).Key() + target := nodes[3].NodeID.Key() visited := make(map[string]int) @@ -137,7 +137,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := kadt.PeerID(nodes[0].NodeID) + self := nodes[0].NodeID c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating coordinator: %v", err) @@ -194,7 +194,7 @@ func TestBootstrap(t *testing.T) { ccfg.Clock = clk - self := kadt.PeerID(nodes[0].NodeID) + self := nodes[0].NodeID d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) diff --git a/v2/internal/coord/cplutil/cpl.go b/v2/internal/coord/cplutil/cpl.go new file mode 100644 index 00000000..25ac9744 --- /dev/null +++ b/v2/internal/coord/cplutil/cpl.go @@ -0,0 +1,58 @@ +package cplutil + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + + mh "github.com/multiformats/go-multihash" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +//go:generate go run ./gen.go + +// GenRandPeerID generates a random [kadt.PeerID] whose key has a common prefix length of exactly cpl with the supplied key. +// Ported from go-libp2p-kbucket +func GenRandPeerID(k kadt.Key, cpl int) (kadt.PeerID, error) { + if cpl > 15 { + return "", fmt.Errorf("cannot generate peer ID for Cpl greater than 15") + } + + targetPrefix := prefix(k, cpl) + + // Convert to a known peer ID. + key := keyPrefixMap[targetPrefix] + id := [32 + 2]byte{mh.SHA2_256, 32} + binary.BigEndian.PutUint32(id[2:], key) + return kadt.PeerID(string(id[:])), nil +} + +type keybit interface { + Bit(i int) uint +} + +// prefix generates random bits that have a common prefix length of exactly cpl with the supplied key. +func prefix(k keybit, cpl int) uint16 { + var p uint16 + // copy the first cpl+1 bits so we can flip the last one + for i := 0; i < cpl+1; i++ { + bit := uint16(k.Bit(i)) << (15 - i) + p |= bit + } + + // flip the bit at cpl (cpl 5 means bits 0-4 must be the same) + mask := uint16(1) << (15 - cpl) + p ^= mask + + if cpl < 15 { + // pad with random data + var buf [2]byte + _, _ = rand.Read(buf[:]) + r := binary.BigEndian.Uint16(buf[:]) + + mask = (^uint16(0)) << (15 - cpl) + p = (p & mask) | (r & ^mask) + } + return p +} diff --git a/v2/internal/coord/cplutil/cpl_prefixmap.go b/v2/internal/coord/cplutil/cpl_prefixmap.go new file mode 100644 index 00000000..e14db9a7 --- /dev/null +++ b/v2/internal/coord/cplutil/cpl_prefixmap.go @@ -0,0 +1,4101 @@ +package cplutil + +// Code generated by ./gen.go DO NOT EDIT +var keyPrefixMap = [...]uint32{ + 77591, 94053, 60620, 45849, 22417, 13238, 102507, 179931, 43971, 15812, 24466, 64694, 28421, 80794, 13447, 118511, + 740, 6439, 164565, 160996, 29829, 65024, 115728, 46297, 71467, 26874, 47057, 19864, 228973, 57886, 62422, 50382, + 196661, 98858, 8131, 154708, 78537, 104511, 53134, 136579, 27689, 126238, 28199, 3679, 36431, 48892, 2655, 57939, + 44415, 38209, 7970, 34780, 14362, 51843, 23108, 52670, 19456, 36805, 408716, 129012, 106025, 12683, 780, 36702, + 96308, 73261, 165714, 94326, 2882, 15786, 65607, 80947, 49509, 13763, 104712, 13107, 21149, 137011, 223495, 30903, + 87173, 75141, 2533, 121964, 131409, 110026, 108394, 16009, 75844, 196819, 1440, 7629, 23676, 111231, 127712, 61087, + 121838, 51872, 29103, 7233, 30291, 24088, 110490, 92353, 17492, 113372, 16487, 97612, 2953, 9394, 210912, 8964, + 7564, 3852, 97455, 42207, 110620, 22643, 65016, 7253, 129477, 46969, 7830, 43238, 127283, 37807, 65596, 47230, + 53113, 68778, 42174, 3025, 72417, 113389, 61485, 3233, 165166, 23272, 207684, 1480, 109690, 77717, 146330, 35614, + 21200, 125839, 9167, 183529, 102125, 27762, 21718, 34784, 24049, 54922, 44135, 54112, 71504, 58952, 18652, 36112, + 90342, 97581, 105898, 116695, 25307, 71711, 19850, 443067, 72039, 164371, 99358, 141908, 26812, 37120, 222981, 92235, + 26715, 2272, 38699, 277092, 32264, 2507, 11509, 41396, 133800, 81066, 75726, 51643, 71161, 32364, 125073, 195906, + 88956, 8820, 58708, 60150, 171987, 43866, 50300, 27077, 51779, 41724, 18910, 42608, 24425, 59574, 40645, 30367, + 16671, 106324, 56018, 73410, 30251, 125091, 17154, 23172, 186294, 741, 111661, 148919, 247761, 71695, 148683, 76545, + 14202, 32826, 57291, 56464, 2121, 52187, 36887, 19845, 8465, 15701, 42227, 10603, 35024, 129005, 20364, 271992, + 4876, 54659, 43090, 48318, 85917, 40506, 60228, 35848, 169730, 2400, 19908, 21535, 3638, 2880, 105194, 37121, + 256836, 27972, 59367, 47659, 96184, 20378, 6352, 132486, 943, 210847, 347244, 42708, 18678, 161556, 4520, 63681, + 6583, 138160, 207565, 4182, 52907, 72891, 36505, 33320, 35807, 152018, 13288, 904, 112254, 139219, 23049, 24474, + 214097, 14830, 47960, 50966, 18796, 25821, 749, 61464, 11595, 123216, 5285, 37544, 9243, 80395, 22070, 63873, + 23554, 106570, 90364, 35779, 887, 61552, 55147, 3791, 268203, 76040, 13872, 53070, 382004, 149091, 9411, 70938, + 24590, 26314, 23297, 60821, 111335, 56198, 123964, 28317, 11625, 39656, 33077, 122186, 16619, 2762, 8556, 43622, + 29039, 54719, 141778, 30583, 102425, 30319, 55618, 4660, 69006, 75066, 46293, 24767, 97976, 8387, 5680, 68535, + 92362, 327684, 180600, 43548, 32552, 905, 167743, 10812, 63717, 48600, 4157, 19832, 41433, 44366, 169717, 362623, + 128974, 242972, 74944, 25914, 137630, 138732, 9905, 65119, 59943, 13001, 10439, 346877, 10019, 72338, 47424, 90540, + 13986, 32605, 74311, 36273, 35430, 43274, 490600, 15654, 33665, 40911, 16891, 132492, 108037, 118859, 30430, 45629, + 43799, 65831, 25824, 63966, 43280, 70552, 34778, 102075, 38195, 5993, 20515, 11742, 29078, 67047, 980, 30234, + 58629, 68076, 5792, 59696, 18265, 2627, 47407, 29302, 14425, 46647, 15604, 15925, 46832, 5440, 684, 42003, + 235538, 28764, 54452, 25101, 40830, 8023, 6501, 50689, 77881, 5650, 16800, 16147, 110717, 28112, 219637, 1634, + 58937, 32412, 88801, 6927, 3463, 157022, 94779, 442571, 325358, 276, 141280, 75559, 51300, 58421, 109559, 35845, + 47623, 321870, 24845, 42379, 117252, 19971, 14000, 130543, 19007, 191657, 1705, 32933, 10170, 64831, 2632, 89911, + 20540, 14737, 53476, 30106, 91237, 23474, 41156, 76048, 294813, 109786, 153316, 31289, 4951, 134188, 5698, 58898, + 79841, 8216, 13373, 150001, 56232, 83956, 179514, 40785, 36270, 150581, 38142, 36729, 128547, 27488, 48397, 32074, + 69209, 83991, 69639, 44375, 66275, 50325, 46119, 4588, 100156, 57453, 106674, 3707, 32063, 12250, 176480, 94462, + 73531, 42286, 44132, 42292, 34439, 205098, 23362, 170867, 80937, 18578, 35224, 8003, 28892, 73415, 50905, 36012, + 44466, 3377, 68122, 77350, 88595, 16048, 139321, 45304, 216307, 26958, 49160, 2333, 32583, 197092, 51650, 27957, + 49620, 28596, 32484, 40154, 16605, 3672, 19287, 14394, 82127, 113881, 101822, 55495, 45807, 22719, 49287, 17105, + 21630, 9213, 225560, 184754, 78726, 55879, 1187, 55736, 20235, 48276, 60072, 8055, 40163, 71435, 10613, 66014, + 111007, 30011, 11754, 32797, 96926, 8244, 35114, 58420, 5567, 8879, 4349, 36989, 72083, 27721, 80502, 31714, + 21665, 68483, 67000, 32243, 58844, 22490, 151524, 85501, 39419, 31544, 46585, 60252, 179767, 135313, 38991, 99008, + 48328, 21411, 230904, 25457, 42662, 73162, 35923, 104338, 51550, 37715, 30664, 24386, 5251, 34179, 21686, 23914, + 37811, 77986, 123822, 22186, 49608, 218194, 113768, 119158, 81056, 136532, 36573, 4335, 50854, 77454, 36591, 786, + 55513, 89905, 64981, 78223, 20922, 90512, 58000, 187805, 18891, 142810, 7204, 125174, 197409, 232663, 64781, 31572, + 164656, 137833, 103498, 55315, 32593, 91963, 91694, 30505, 71449, 150025, 16975, 134836, 220474, 56258, 1789, 23900, + 58919, 39771, 52833, 15954, 85682, 182360, 82050, 60999, 67854, 36289, 50792, 14607, 13758, 73909, 111848, 63880, + 35066, 107613, 145156, 26237, 3565, 8173, 214338, 1836, 61905, 82544, 35483, 19741, 214793, 18510, 3395, 10924, + 119572, 75264, 17466, 43207, 141419, 82668, 39303, 19609, 21504, 19695, 19065, 6944, 10302, 38666, 102996, 88789, + 27354, 75138, 70106, 135106, 67003, 20045, 60619, 54525, 46131, 115306, 12445, 86777, 32668, 68413, 32737, 64388, + 15165, 34095, 171569, 11093, 64871, 119058, 92294, 117952, 34450, 66009, 203796, 6258, 17821, 52488, 314552, 125812, + 2757, 95795, 15139, 46369, 11452, 76801, 3035, 9101, 34189, 14945, 7202, 149174, 5160, 74854, 169046, 30085, + 12257, 76562, 92934, 170882, 85523, 121128, 60225, 45744, 560, 62173, 205019, 128933, 53385, 94, 81804, 5962, + 65887, 9406, 75139, 46078, 119549, 87470, 126330, 115083, 135620, 90768, 93971, 66716, 312353, 69610, 203240, 65196, + 115979, 13452, 77397, 23, 122356, 131305, 48028, 43698, 10867, 95182, 47337, 60657, 193231, 4430, 32675, 100177, + 124537, 49701, 68459, 417255, 54783, 44031, 66481, 29365, 90675, 20969, 21022, 49332, 120791, 87739, 113524, 8715, + 4715, 33049, 64432, 86239, 142253, 763, 145381, 11942, 50943, 44118, 117335, 69368, 17271, 82615, 97767, 8516, + 43358, 61812, 117693, 77645, 25331, 71884, 62816, 56740, 4917, 126017, 38232, 39911, 120566, 45088, 86073, 19308, + 34580, 62715, 98835, 12238, 12878, 32818, 80514, 190672, 33786, 124897, 32390, 13707, 160528, 8239, 24113, 94911, + 32523, 8473, 305619, 143741, 4869, 226676, 116030, 72714, 301307, 245805, 49902, 13070, 104817, 63744, 25320, 14079, + 81491, 66562, 24649, 6335, 23276, 12633, 45891, 31344, 8832, 19031, 49267, 95191, 97911, 27244, 61726, 53839, + 31265, 81626, 4566, 137532, 52065, 115327, 11846, 252068, 7998, 22402, 10126, 209408, 49622, 16068, 12953, 24383, + 9715, 82577, 95468, 95106, 43998, 60754, 21093, 14837, 34091, 72540, 179063, 7433, 84587, 192802, 47914, 4438, + 20664, 45500, 8855, 16934, 69041, 12731, 29041, 217180, 29419, 22657, 137482, 2887, 53205, 550, 70043, 123839, + 10838, 164726, 42397, 184876, 58288, 26641, 22447, 12131, 116145, 22995, 97093, 108266, 6185, 2832, 52427, 64656, + 5154, 49928, 144137, 12044, 141795, 129976, 31641, 84599, 35924, 2502, 28404, 26000, 21307, 63600, 20886, 165871, + 144738, 353334, 45550, 4235, 43730, 54853, 149395, 14340, 12085, 6025, 82291, 127186, 8279, 7961, 81927, 74078, + 10002, 50016, 8795, 38560, 119, 45637, 190798, 21574, 133779, 97318, 19903, 27528, 199668, 1330, 66035, 21635, + 72938, 31184, 60710, 108060, 31768, 145285, 89744, 113430, 39176, 71121, 10578, 19002, 67875, 39253, 95870, 17637, + 38453, 35956, 214432, 92498, 9700, 51981, 75487, 140364, 44144, 248414, 34793, 35244, 4121, 13131, 29680, 132109, + 116048, 51552, 20482, 69742, 41733, 134398, 163626, 2676, 12868, 9786, 36799, 26675, 82669, 19252, 28098, 76936, + 92308, 127797, 49202, 5337, 128, 27975, 178978, 22753, 34262, 94544, 214584, 43276, 11332, 665, 58732, 8484, + 7712, 180682, 90181, 28567, 90764, 20944, 68372, 62049, 36141, 29920, 115786, 1365, 13553, 110638, 163556, 207080, + 71312, 250718, 214174, 18727, 77470, 23807, 32279, 108909, 117314, 4887, 61022, 41180, 96549, 116044, 1081, 78818, + 49135, 8305, 20213, 10021, 23602, 148923, 39033, 76575, 54468, 41625, 121743, 61361, 28605, 110339, 97381, 108784, + 6327, 58565, 37906, 2722, 62308, 42415, 120829, 226683, 17171, 16955, 32278, 42441, 67531, 82112, 7044, 8333, + 21319, 4625, 67693, 83024, 14105, 107392, 18658, 14247, 894, 35117, 78964, 71644, 107722, 11889, 4981, 16504, + 46157, 86476, 243104, 110164, 8503, 65279, 38377, 50730, 51069, 170106, 155778, 36441, 100472, 8367, 14072, 2456, + 45138, 1449, 85419, 56978, 15246, 51849, 58602, 75312, 14577, 34388, 14985, 214746, 35609, 94173, 205371, 29378, + 191464, 60659, 83825, 4266, 1757, 79901, 4005, 96090, 13364, 26836, 20634, 9902, 161349, 52221, 57608, 45087, + 32067, 12041, 24449, 122590, 91705, 4841, 5595, 1962, 81144, 94514, 7189, 65466, 52339, 115937, 30039, 184359, + 5408, 37938, 13094, 131687, 91066, 50656, 3538, 308588, 21983, 117880, 124083, 8740, 14157, 207581, 132848, 24615, + 100545, 35998, 13259, 94379, 4372, 221513, 9160, 14015, 26630, 42025, 87194, 4685, 129112, 37014, 5514, 1659, + 1423, 35031, 86869, 42243, 29676, 77384, 91770, 8949, 213626, 219087, 14943, 2758, 4397, 146113, 19935, 39810, + 88436, 21548, 15622, 47174, 99190, 170858, 31675, 22540, 6877, 25282, 66955, 39440, 49958, 3702, 59942, 3443, + 26122, 118447, 24469, 28429, 114348, 66350, 72579, 194, 60661, 14964, 70751, 30122, 29818, 134851, 14530, 25859, + 293118, 32210, 11158, 134437, 50042, 50868, 124554, 56791, 179738, 112687, 67437, 80580, 16400, 32499, 35433, 38147, + 163423, 62209, 109887, 21489, 89627, 8619, 37255, 42560, 31040, 3283, 221255, 26057, 43973, 176482, 84209, 74565, + 36638, 128029, 50150, 53376, 45952, 23372, 136030, 19408, 5153, 189398, 9461, 12142, 1894, 150004, 6947, 43095, + 109322, 74270, 235743, 8877, 1898, 12589, 62161, 150831, 134021, 76036, 32418, 114411, 12402, 9784, 152424, 2030, + 112077, 39948, 15299, 91532, 68309, 58254, 74157, 68071, 190269, 1807, 48227, 14614, 69866, 175786, 53526, 77245, + 31938, 86410, 49785, 5548, 107383, 26754, 6925, 99713, 11522, 112823, 36879, 191627, 105232, 112178, 9544, 115058, + 11248, 121092, 115523, 216088, 14868, 164602, 6984, 12211, 39852, 3557, 11388, 124397, 71707, 42768, 81029, 87167, + 186525, 134029, 24303, 29049, 16530, 60454, 1801, 70482, 38162, 186140, 17626, 75869, 106212, 3301, 149347, 83560, + 11700, 132692, 2213, 6118, 5130, 19621, 133100, 5413, 16608, 6316, 6903, 20826, 26998, 46988, 14742, 36801, + 59586, 438, 115651, 12542, 108399, 50888, 73600, 74851, 230033, 11883, 313836, 13563, 43683, 27664, 16986, 54266, + 48135, 20496, 78612, 90668, 82179, 65157, 159306, 244506, 2073, 113828, 34210, 8905, 5015, 124130, 30133, 30478, + 196684, 40526, 10545, 25933, 189293, 20827, 73483, 91579, 16378, 24561, 168921, 100351, 23452, 105211, 31749, 3947, + 8301, 235867, 175604, 4648, 35640, 22045, 10909, 12114, 11632, 81578, 50578, 17722, 214551, 40781, 131060, 242797, + 29240, 41868, 116245, 182350, 57644, 27787, 59645, 42511, 33137, 64292, 86072, 2870, 91949, 108278, 14903, 186497, + 55157, 48398, 10332, 2801, 52384, 20759, 10283, 88468, 117313, 23727, 138084, 65635, 5090, 14195, 126767, 300, + 17717, 38157, 16186, 114320, 89668, 96676, 9742, 203368, 49363, 5035, 28964, 65388, 82238, 67525, 39995, 13922, + 241035, 69735, 11154, 193950, 66216, 72997, 12434, 16882, 29066, 91839, 31743, 96167, 184088, 75620, 1030, 139617, + 97206, 15695, 244555, 101352, 62820, 44153, 114812, 120196, 26595, 72217, 5935, 28488, 4241, 7832, 101557, 27041, + 135635, 10308, 337586, 23855, 173672, 15924, 5051, 10103, 8202, 360, 45227, 30801, 459, 13982, 27256, 9104, + 71355, 53611, 81898, 79904, 146294, 57705, 99956, 35919, 29587, 21273, 89804, 41886, 3008, 100905, 29691, 22814, + 135385, 101754, 7790, 16486, 141203, 186158, 135150, 17125, 14803, 43200, 23042, 70352, 6634, 27432, 14596, 27017, + 45094, 251700, 107172, 92556, 69362, 224587, 20275, 239867, 50925, 67860, 22054, 35132, 546, 107574, 11246, 15583, + 51884, 52526, 41469, 90704, 62011, 30436, 4192, 20677, 83296, 40746, 43027, 18829, 234584, 59250, 10989, 12045, + 44515, 87149, 5814, 22428, 56050, 1304, 54193, 102712, 89476, 74967, 28363, 182054, 87751, 63858, 4667, 36435, + 19373, 13180, 80439, 20298, 12691, 59200, 175067, 68478, 149923, 65774, 50785, 75599, 19794, 24659, 40763, 18905, + 13833, 221290, 11814, 27472, 35846, 256569, 9769, 37905, 87557, 16393, 61774, 29056, 58339, 67859, 122835, 31673, + 2884, 29565, 225212, 50663, 19145, 154284, 7940, 13382, 25647, 46917, 107024, 18714, 12224, 8197, 11896, 129114, + 11024, 5323, 163976, 216168, 77338, 91508, 61901, 29134, 64608, 87645, 71475, 46110, 122297, 22635, 34837, 26310, + 53025, 53017, 10622, 90942, 7205, 22145, 163437, 101344, 36189, 355381, 3469, 59647, 36294, 29028, 61676, 33071, + 170779, 1619, 42455, 55588, 21750, 12494, 53664, 106939, 7739, 60501, 600, 42951, 173883, 121950, 75147, 44445, + 75192, 26282, 17177, 6729, 35664, 13478, 22319, 74388, 224240, 51121, 128054, 19973, 113121, 26367, 20959, 71130, + 30181, 27274, 83822, 65840, 26267, 141848, 7294, 161141, 27036, 20489, 14220, 74392, 117827, 12263, 18511, 12425, + 92015, 38371, 93826, 46517, 106516, 24959, 428957, 108509, 55628, 41208, 28538, 6694, 203549, 200020, 130157, 14026, + 67949, 261382, 34954, 75428, 60462, 34936, 69163, 8775, 60844, 95271, 14668, 58597, 35911, 163570, 17395, 41268, + 20457, 77077, 15920, 195151, 1820, 1127, 108523, 1201, 920, 64420, 142690, 3800, 19773, 18589, 25204, 114010, + 8738, 45928, 72305, 27317, 73173, 58181, 4109, 38698, 181993, 2002, 91269, 6577, 38521, 64761, 34725, 2779, + 98254, 99182, 109347, 42999, 76257, 42992, 2481, 76329, 46008, 9716, 174991, 37659, 92796, 26911, 126742, 21977, + 5384, 89414, 18739, 22923, 26868, 2989, 52591, 14973, 151566, 3554, 169141, 41484, 22124, 26749, 78963, 86727, + 2411, 21918, 43055, 36709, 15919, 32188, 39853, 31407, 186872, 106163, 35231, 3970, 180021, 86213, 133789, 47183, + 28099, 10825, 8315, 193036, 152961, 12221, 96811, 33623, 78811, 61925, 91812, 72246, 80237, 171243, 144270, 12504, + 62352, 69843, 208025, 139707, 102653, 182703, 42668, 65058, 74259, 143770, 10084, 32242, 184890, 53802, 20214, 60407, + 16792, 41310, 4184, 1636, 123702, 13335, 68718, 46717, 224945, 64844, 113887, 41497, 29940, 10587, 27431, 128017, + 19512, 17506, 17671, 26070, 75283, 42125, 47504, 37731, 14059, 88044, 36619, 847, 112691, 14770, 55376, 575, + 92811, 347152, 96947, 9385, 233329, 3093, 22326, 45207, 20411, 273167, 31247, 6125, 138569, 8663, 357575, 28073, + 53341, 234780, 21561, 48933, 109802, 48919, 46462, 50800, 50600, 21098, 18940, 1091, 134528, 14935, 2398, 127145, + 66747, 34702, 127805, 27345, 5529, 139548, 51994, 127312, 166531, 11082, 36587, 50668, 31578, 37535, 46230, 2150, + 64732, 41722, 91822, 21109, 67189, 47573, 20129, 8421, 1596, 16448, 126415, 81846, 126357, 140669, 1937, 32338, + 967, 39499, 14778, 48543, 167999, 24888, 12192, 41633, 206598, 60067, 160162, 11609, 109752, 3487, 45910, 15601, + 119431, 19179, 93578, 31236, 207825, 71291, 47437, 21034, 78791, 32425, 31613, 91908, 91938, 6225, 26499, 49240, + 10301, 34970, 12824, 99989, 27311, 35324, 133950, 14043, 24233, 61362, 22243, 35045, 252343, 28863, 12365, 8224, + 28831, 215245, 73325, 83362, 32812, 116785, 100940, 77100, 66002, 61855, 60149, 24654, 112267, 65835, 54563, 141839, + 90895, 174574, 34653, 8453, 8786, 174076, 167014, 20249, 8095, 14050, 68580, 299481, 16824, 48793, 24856, 15716, + 22866, 165280, 33060, 49389, 21813, 47387, 179304, 131281, 60507, 145727, 21710, 16780, 174833, 11187, 19174, 11577, + 19549, 89709, 114442, 11917, 130985, 53665, 52636, 32837, 117051, 78060, 79585, 45117, 52110, 74026, 86227, 52956, + 6938, 48219, 29286, 23852, 81923, 55204, 370875, 58300, 123864, 14993, 25906, 17004, 38061, 191997, 56608, 197099, + 919, 5046, 126484, 79803, 18680, 145935, 124511, 60333, 53534, 6979, 35404, 23791, 46739, 36466, 2445, 19890, + 112893, 35958, 11939, 45333, 161529, 38751, 76585, 129315, 85429, 125900, 37046, 110236, 26761, 13725, 20554, 21155, + 11900, 10186, 81185, 44323, 81121, 127313, 181376, 68138, 91968, 77284, 14617, 15815, 15390, 1425, 15586, 9037, + 217947, 19393, 2643, 291035, 56524, 1195, 154070, 7980, 1713, 2618, 18959, 70645, 6654, 8986, 122964, 149447, + 37089, 79358, 120676, 39867, 85630, 173326, 14161, 103857, 138866, 98205, 107118, 105847, 61850, 48312, 3318, 110656, + 16491, 22884, 29985, 202016, 75577, 7108, 49432, 450007, 16884, 60351, 28287, 31574, 98296, 153369, 5508, 59238, + 73523, 2766, 134247, 6922, 6140, 15761, 20766, 33247, 44645, 98662, 62705, 5296, 6062, 16713, 27012, 204193, + 36366, 4251, 6513, 1097, 29844, 148369, 4030, 44421, 57946, 57215, 45204, 63057, 37932, 100525, 276977, 104126, + 42472, 13150, 108317, 106038, 5266, 1004, 31351, 41691, 20834, 27119, 14871, 42058, 19309, 18264, 15714, 128645, + 33753, 97813, 14991, 36632, 127182, 38788, 23800, 23029, 134259, 141169, 22689, 9008, 35810, 85196, 80190, 175150, + 41805, 96633, 36654, 189935, 45878, 63838, 3242, 5356, 312001, 228710, 66129, 4509, 14881, 203932, 11812, 70030, + 47757, 276830, 122405, 33146, 49251, 2261, 162697, 5363, 120050, 24738, 211941, 21746, 44252, 31697, 2242, 4877, + 3708, 85573, 85060, 82434, 25856, 115291, 56583, 56567, 107864, 962, 58671, 54581, 120347, 39508, 201071, 94108, + 1228, 71194, 12513, 225594, 36550, 6911, 160283, 35838, 41682, 115576, 28022, 16436, 34496, 5034, 74108, 10228, + 47025, 11047, 141530, 3837, 8393, 65028, 55696, 31079, 173365, 61729, 57479, 106029, 246526, 10526, 54647, 134609, + 12894, 3537, 244, 16862, 161607, 118386, 60183, 141700, 35670, 22051, 179401, 24135, 90785, 29822, 122577, 87924, + 126572, 2459, 80584, 28905, 2095, 87804, 54240, 102268, 124731, 60006, 15202, 109796, 157033, 21466, 164665, 37695, + 58694, 81513, 83134, 208222, 554, 5651, 7656, 87297, 12786, 33576, 15075, 146538, 9642, 40949, 163656, 9760, + 4817, 21064, 83245, 14829, 16136, 95061, 68060, 24365, 47864, 1179, 105850, 5322, 174698, 19385, 5399, 111971, + 66992, 363067, 36771, 86468, 4639, 166195, 77004, 80406, 69284, 96401, 199722, 27643, 10625, 105066, 89724, 58878, + 40710, 29791, 24556, 99909, 27763, 9231, 35125, 110086, 51738, 12458, 116193, 41661, 30404, 41774, 96495, 7041, + 264105, 37287, 172797, 19867, 137904, 45042, 61041, 151622, 109882, 58327, 51284, 132939, 52487, 238, 24806, 356262, + 42824, 71570, 114506, 221874, 57514, 290906, 425324, 6771, 2740, 77666, 51262, 18017, 10479, 14457, 11137, 19547, + 146799, 74299, 1986, 193822, 107390, 66292, 13142, 8549, 16586, 41783, 4738, 83585, 88038, 9102, 61338, 33010, + 174951, 5451, 103430, 20873, 9410, 71603, 254445, 29027, 16185, 19139, 109385, 57580, 44158, 18457, 29275, 116743, + 5568, 32928, 91629, 19307, 40658, 229962, 46426, 15411, 46108, 30487, 67181, 20224, 12763, 92267, 69682, 41491, + 97385, 46327, 89571, 20801, 26175, 104473, 82178, 53280, 108859, 90329, 60749, 15258, 664, 104876, 189856, 72942, + 230732, 51261, 34901, 19996, 67470, 20008, 38335, 18089, 46663, 52358, 14286, 59726, 14395, 26243, 124071, 9514, + 50750, 71549, 45061, 44126, 141320, 2803, 58061, 44739, 93140, 659, 64128, 26178, 15361, 168531, 40344, 8977, + 47997, 172770, 68707, 16055, 55784, 23990, 20994, 19090, 6791, 21011, 244531, 47352, 307840, 16546, 63361, 164913, + 118569, 30189, 34941, 229229, 107326, 24202, 160409, 1575, 18056, 16156, 162544, 1298, 58281, 140814, 38998, 4285, + 260415, 19797, 24212, 11490, 54691, 125241, 149765, 53575, 8790, 98579, 39432, 49317, 73332, 17200, 47059, 6532, + 45633, 17042, 110013, 4418, 7511, 26786, 2639, 40536, 45674, 39902, 37280, 138726, 143373, 16114, 16063, 95339, + 14031, 18222, 148011, 134245, 11799, 36311, 103728, 15146, 94491, 24149, 24405, 126162, 35646, 2622, 13619, 190698, + 96544, 59993, 46574, 579, 14560, 43052, 125756, 11698, 26049, 139612, 76126, 94179, 32983, 27506, 5021, 32417, + 25791, 73423, 53795, 119140, 83814, 24222, 419, 60678, 42094, 36193, 71555, 167797, 231370, 39846, 78400, 68056, + 63955, 1124, 59895, 8546, 139212, 47144, 37860, 26891, 2359, 163343, 60583, 105848, 169908, 4972, 13013, 132896, + 3108, 44849, 132211, 4330, 183486, 14009, 10090, 75230, 105867, 102476, 3031, 44769, 28197, 21633, 23419, 68902, + 32941, 109556, 36098, 52255, 124968, 209278, 40772, 6698, 26402, 57023, 171822, 87578, 88267, 23469, 27050, 64577, + 149768, 71917, 89979, 16941, 23053, 7594, 106397, 125192, 3078, 35227, 9172, 18615, 19091, 182038, 12549, 48594, + 52924, 6894, 86017, 20427, 25383, 22580, 75986, 18233, 19209, 61027, 86544, 26111, 111548, 24619, 166688, 24272, + 97361, 51184, 78541, 14792, 3959, 2430, 71174, 280134, 24880, 85091, 19069, 48720, 235061, 148747, 27783, 40579, + 9099, 95152, 259500, 59221, 24921, 73721, 170222, 102157, 161254, 66033, 357515, 82190, 151405, 105610, 28252, 213067, + 20508, 97281, 6878, 87399, 7159, 45662, 182676, 27626, 34381, 71179, 112126, 12802, 20133, 56316, 50576, 70823, + 11434, 14879, 96554, 27582, 74036, 24193, 21984, 147179, 19974, 41451, 8452, 161213, 34769, 115, 18749, 115303, + 36585, 8710, 130627, 54462, 1076, 15711, 78215, 45693, 22454, 41595, 35658, 31785, 17354, 64339, 5699, 30987, + 38727, 113863, 1046, 127166, 235160, 27501, 82135, 137484, 111547, 143478, 71619, 20477, 96454, 65400, 93505, 9234, + 117448, 71966, 130201, 9407, 156940, 10894, 113917, 102178, 91330, 3786, 25046, 137247, 37299, 14204, 156671, 48589, + 7310, 119658, 1019, 3147, 26915, 389655, 28024, 29905, 117060, 110822, 603, 9922, 51369, 186019, 151553, 53930, + 22620, 65936, 33869, 50466, 61861, 18339, 116756, 22544, 322264, 178320, 134504, 32779, 106850, 51259, 7921, 18753, + 111694, 76143, 15475, 39056, 15091, 96327, 38933, 146365, 2624, 6183, 303617, 83865, 40345, 8720, 102137, 208016, + 300446, 153481, 62817, 17230, 177064, 59995, 17444, 96781, 1707, 62069, 105642, 215627, 27389, 113620, 8641, 39778, + 54792, 22640, 92614, 72033, 327783, 56938, 97175, 28337, 132669, 24810, 100695, 42694, 183543, 96612, 26568, 321, + 59003, 67147, 64475, 124682, 17744, 254962, 92433, 55393, 20603, 153319, 316603, 192699, 151134, 16030, 30713, 5369, + 106923, 79389, 15318, 196516, 53084, 229057, 32215, 2061, 71803, 15710, 68210, 36730, 279424, 61974, 109245, 21881, + 319816, 40889, 10178, 55054, 11579, 30821, 76533, 48007, 21946, 12530, 41523, 56504, 16728, 146955, 90643, 77497, + 38274, 58777, 12829, 83673, 72711, 24324, 131406, 209463, 5085, 14864, 2408, 146954, 83391, 104916, 53219, 39654, + 88646, 106083, 13930, 24286, 40159, 28744, 20399, 11792, 25027, 26454, 82556, 24039, 34680, 36361, 145006, 21872, + 10752, 107608, 27995, 36258, 12988, 66287, 75099, 84038, 54126, 38128, 56142, 14292, 30365, 99229, 9312, 5952, + 18338, 50601, 15454, 40761, 100445, 4866, 42787, 168097, 230674, 27, 4416, 59458, 44874, 21538, 13837, 21543, + 84974, 32659, 181908, 81485, 143877, 1443, 22510, 44084, 123253, 114222, 131683, 77045, 139372, 123203, 151023, 23972, + 28082, 30654, 30914, 61473, 91477, 143646, 51334, 8042, 144002, 18818, 47219, 30784, 13096, 53692, 57020, 125132, + 219729, 72133, 94451, 32149, 46016, 5231, 19109, 89053, 50029, 67191, 30812, 104508, 42377, 43699, 106368, 9836, + 14601, 54570, 18766, 12632, 6660, 155889, 71980, 75016, 58244, 83344, 7256, 100628, 58978, 56720, 58199, 118422, + 23918, 11726, 37394, 463, 88206, 139614, 253619, 539, 113611, 38238, 154196, 29350, 64452, 9692, 12873, 4429, + 17541, 32212, 6089, 18497, 41032, 117229, 60868, 14143, 10942, 926, 24793, 66470, 12021, 18956, 23792, 155539, + 49189, 11284, 84405, 157831, 10978, 12543, 64410, 50098, 40175, 82131, 32892, 21615, 37156, 5526, 99592, 36215, + 10947, 19241, 20602, 2093, 71709, 93588, 80808, 10971, 106894, 25921, 413, 34040, 112538, 180819, 118821, 72357, + 57007, 79329, 16870, 137412, 137486, 10245, 90727, 18898, 150608, 14622, 19833, 22840, 152719, 29427, 209294, 4232, + 40615, 60643, 170375, 22011, 7746, 28136, 332881, 60551, 279716, 193813, 38074, 19946, 13101, 16840, 117701, 27751, + 19524, 59518, 5857, 368, 28708, 105821, 12973, 27739, 40578, 900, 41397, 104380, 72320, 33862, 8409, 34652, + 1096, 35868, 72140, 8303, 182051, 82682, 33389, 5630, 94527, 27756, 204584, 39519, 51275, 31654, 10240, 28759, + 22833, 178542, 47192, 48182, 45164, 83416, 42256, 42796, 81917, 217466, 53292, 37786, 77519, 106347, 83381, 18672, + 48508, 13787, 77506, 13385, 5421, 76619, 372545, 27228, 140302, 83313, 3227, 42955, 37845, 66043, 76055, 149143, + 149830, 12497, 9759, 138621, 5587, 153959, 83576, 136204, 27579, 39401, 30659, 75311, 5357, 6559, 74434, 7707, + 428725, 26547, 2082, 18025, 248187, 41435, 176983, 82585, 6326, 238794, 27806, 33103, 206760, 30220, 62067, 73068, + 39814, 3267, 31130, 1487, 32585, 16095, 47315, 334742, 89923, 102036, 75915, 77001, 44341, 23722, 4933, 28107, + 288753, 33496, 67090, 13693, 284443, 67130, 6821, 12171, 96368, 120123, 128906, 6889, 31201, 197218, 124216, 25556, + 94189, 226026, 49191, 116420, 119504, 22368, 28238, 62479, 20359, 140859, 29908, 42319, 52073, 25021, 11717, 171363, + 103216, 48554, 148106, 44322, 179, 62550, 142748, 5200, 27934, 626834, 53683, 40353, 32801, 386580, 59130, 42350, + 96035, 956, 88884, 71218, 34111, 41335, 31551, 1556, 34309, 7435, 32506, 89091, 101326, 35050, 97836, 7566, + 18198, 14509, 235440, 30012, 20704, 338945, 90305, 62331, 210266, 5359, 86970, 67633, 37643, 51918, 7476, 35122, + 27880, 2530, 23516, 55992, 141873, 9269, 20887, 235173, 106000, 53315, 71177, 78367, 19414, 8455, 3948, 72358, + 56614, 93522, 50567, 6412, 167714, 32465, 101863, 1914, 66483, 142566, 61810, 14328, 107885, 75527, 21510, 22073, + 86602, 3162, 170297, 80142, 4379, 139776, 150756, 52344, 20796, 126580, 47459, 31811, 75467, 203428, 2360, 109945, + 4987, 40280, 38609, 247457, 5017, 131195, 52873, 51358, 118857, 25612, 54684, 86642, 26003, 82237, 10347, 74817, + 34308, 134385, 105661, 2079, 114428, 3924, 56947, 20197, 29198, 93080, 30441, 23003, 6686, 189968, 44029, 59712, + 29697, 69462, 47863, 6319, 73632, 71419, 54022, 228432, 3739, 11617, 144267, 6304, 69795, 159284, 38182, 88987, + 16798, 60652, 18367, 39753, 41504, 26776, 44767, 4986, 7207, 326091, 10211, 275129, 30722, 15983, 114324, 26287, + 21436, 250022, 386, 16493, 36735, 47994, 4425, 57498, 28067, 7086, 86124, 96341, 28545, 29897, 71934, 19803, + 3239, 94102, 112964, 21957, 11221, 53105, 41589, 82164, 36031, 6367, 42771, 2307, 41889, 128904, 54967, 59098, + 100010, 163061, 65256, 39405, 19247, 129504, 97081, 10279, 317673, 79950, 84866, 47576, 29495, 35727, 17138, 23769, + 174554, 168948, 28307, 137478, 6424, 65666, 84059, 28007, 129725, 112584, 87500, 22631, 53845, 9237, 125865, 12109, + 94986, 62791, 47377, 95747, 7955, 119822, 43499, 77478, 59676, 37816, 112528, 83870, 2604, 10721, 277540, 129593, + 191497, 1803, 103962, 39100, 19735, 137806, 184562, 831, 102214, 21611, 10860, 96243, 62954, 12392, 277571, 104806, + 23844, 21269, 30123, 51663, 11872, 3731, 70610, 110093, 179525, 50391, 26607, 87825, 261436, 17108, 19172, 65210, + 34492, 179038, 18937, 8799, 428, 29645, 11956, 61342, 78404, 376484, 132083, 73837, 142035, 103650, 20615, 4466, + 16747, 74934, 38480, 234599, 17246, 46547, 32844, 24552, 27578, 22737, 103773, 39027, 37021, 1234, 22307, 95862, + 33672, 4191, 11010, 27369, 57944, 36384, 94490, 7931, 26056, 163500, 146122, 22564, 135760, 93787, 61065, 30077, + 2369, 6137, 12659, 3122, 61674, 56540, 24935, 25675, 122066, 26194, 26305, 22069, 31327, 2064, 15705, 149614, + 19374, 89531, 613, 93086, 157065, 5730, 15360, 6683, 40553, 8430, 74835, 94791, 130982, 74032, 11372, 90140, + 69619, 36036, 16092, 112362, 71290, 44790, 23930, 155440, 38855, 195955, 61949, 49611, 72100, 9710, 26268, 41136, + 92903, 169781, 27353, 78082, 95940, 112981, 249266, 45995, 51422, 17889, 6210, 74226, 165999, 87787, 28659, 84558, + 65713, 42221, 17212, 99031, 57873, 122295, 227056, 76534, 50726, 57460, 287606, 77186, 7288, 29042, 88166, 172092, + 20272, 22733, 128506, 113493, 2081, 55443, 102934, 214, 42326, 28948, 53196, 24237, 22624, 21099, 13480, 39377, + 81120, 35325, 45300, 24047, 57914, 47609, 64670, 25672, 79352, 7747, 71834, 161803, 19447, 8688, 10183, 9684, + 1684, 6277, 61421, 45761, 72302, 118558, 18353, 10661, 11774, 128325, 16327, 2665, 302559, 70280, 76546, 45579, + 161481, 169457, 36438, 37410, 96396, 127007, 10776, 56760, 13692, 115406, 41747, 83908, 414988, 69549, 169745, 58040, + 3721, 62350, 104731, 13605, 79066, 14490, 121161, 108219, 56627, 83538, 32335, 35780, 46883, 23245, 40346, 24451, + 21150, 129629, 31758, 47729, 11747, 2392, 5660, 43534, 12184, 23309, 97227, 201922, 5856, 75935, 22492, 245478, + 113458, 122567, 38892, 52163, 176117, 98436, 387939, 127565, 84416, 26809, 1689, 44206, 52079, 78841, 20795, 5683, + 27933, 162169, 34126, 12822, 3354, 45811, 72520, 20811, 59765, 13615, 3254, 29527, 141359, 123305, 19887, 90838, + 2212, 8885, 33750, 29379, 216309, 13657, 7475, 88895, 2555, 55375, 35969, 66537, 23458, 112987, 1751, 75280, + 196722, 96722, 67717, 118130, 142463, 83824, 80129, 105478, 45701, 183568, 315287, 14884, 44548, 167199, 36212, 100715, + 28798, 95743, 42919, 6271, 19418, 59193, 16434, 72701, 215, 108179, 34472, 75818, 29916, 15862, 29177, 1351, + 9396, 129616, 4305, 86650, 10574, 51218, 914, 206197, 114226, 53103, 156910, 12946, 84475, 16322, 71666, 47108, + 13520, 81329, 27088, 120745, 18694, 174187, 3645, 72390, 34056, 18867, 220604, 95316, 4524, 97988, 41515, 586619, + 90302, 23520, 19632, 127752, 62930, 258836, 36988, 204585, 13539, 57180, 13517, 6044, 19407, 65336, 268952, 132299, + 77209, 53483, 3327, 22672, 7728, 50216, 2729, 12196, 38088, 36872, 5799, 111465, 9535, 11303, 51899, 76725, + 2263, 23913, 3675, 253827, 23875, 65387, 63019, 12817, 183945, 28678, 43266, 62072, 17750, 269599, 29961, 5765, + 26274, 6555, 2446, 55197, 67172, 1910, 71875, 19799, 10585, 1419, 27911, 88939, 28042, 167002, 124915, 104112, + 22199, 47768, 14066, 16710, 7478, 99068, 196517, 131507, 51331, 27291, 42046, 63842, 66030, 117306, 144818, 41353, + 26774, 14822, 38660, 171065, 192929, 121185, 116712, 28895, 31434, 3911, 52612, 111118, 25850, 18697, 65634, 4147, + 50197, 74729, 15097, 117548, 52926, 274499, 54590, 79384, 178158, 113803, 36365, 137334, 4679, 5949, 253573, 27681, + 181256, 356354, 65776, 146248, 70184, 2871, 18045, 156661, 229600, 6542, 22726, 9001, 9959, 34743, 33915, 7460, + 105594, 269690, 12482, 86077, 72158, 12017, 58753, 24594, 73974, 3029, 1912, 30079, 2726, 109412, 146145, 35326, + 35085, 862, 90862, 85609, 78087, 43053, 160170, 33043, 23284, 4515, 162825, 69896, 35568, 601, 13016, 1407, + 51713, 90134, 750, 45520, 155676, 21397, 168585, 187237, 5401, 125230, 5635, 89220, 27254, 54715, 98930, 113085, + 11966, 3030, 1855, 149700, 17569, 56634, 16775, 51586, 223253, 10938, 121033, 70787, 71993, 76450, 39521, 26162, + 103357, 94057, 56597, 26906, 111477, 293134, 42368, 24553, 55722, 30882, 11930, 19889, 30504, 35653, 6466, 203139, + 26034, 287857, 19452, 2522, 46774, 8228, 76457, 83553, 35392, 6216, 12166, 56704, 36285, 6768, 54803, 1726, + 214814, 6895, 182419, 26778, 41143, 53690, 13669, 45646, 163465, 22665, 198804, 39125, 1051, 54093, 61411, 31560, + 16094, 26798, 90341, 277777, 81044, 169520, 129829, 46588, 6636, 71429, 29098, 27473, 76489, 47101, 118137, 125121, + 179102, 29265, 57351, 60270, 20712, 59437, 33382, 18626, 39178, 70695, 80048, 54642, 35683, 106381, 97513, 43264, + 125177, 120906, 35533, 22522, 54219, 7788, 92290, 6116, 30617, 6801, 86129, 39209, 52994, 53661, 59735, 17738, + 25324, 24278, 105977, 13689, 50123, 36059, 130088, 54180, 2543, 36656, 87050, 59769, 87529, 20220, 367, 68705, + 58995, 26101, 26380, 43246, 10688, 79793, 82063, 59968, 125199, 31463, 19802, 62223, 12388, 70063, 151361, 3296, + 60158, 33268, 27121, 110554, 125481, 31240, 69489, 60334, 131646, 25391, 20034, 24248, 7642, 55281, 33709, 57581, + 133350, 77700, 27095, 3522, 65874, 30518, 61307, 126098, 3438, 49052, 9849, 78050, 97277, 50748, 175256, 49826, + 101450, 107315, 118984, 13409, 10075, 128877, 62205, 13193, 56344, 25228, 87810, 2143, 116821, 7648, 113840, 19459, + 50778, 131885, 88512, 13697, 60547, 58403, 210177, 34494, 98016, 51781, 47807, 12099, 106135, 16443, 16925, 19635, + 13859, 8422, 14030, 4756, 14255, 48634, 3275, 4837, 16300, 230472, 6616, 53129, 77373, 22360, 111581, 9662, + 173521, 71655, 15044, 5531, 8285, 190633, 62896, 54909, 45932, 34330, 16255, 17909, 37426, 152464, 256859, 18903, + 4054, 67227, 5705, 135855, 114295, 14380, 28822, 86386, 55947, 44796, 22159, 43163, 7703, 65450, 5829, 97182, + 39114, 652, 2216, 44468, 52, 74475, 73693, 208207, 51119, 111015, 105280, 42780, 128135, 3956, 13974, 30409, + 19714, 40616, 22185, 44115, 60715, 199079, 86742, 81192, 9554, 53876, 58171, 29597, 50492, 316379, 10539, 3453, + 88180, 23111, 24529, 93240, 2823, 46332, 22213, 8752, 118271, 197846, 6618, 8946, 52993, 21325, 30302, 17074, + 122625, 9575, 29441, 295253, 97919, 3130, 132791, 140156, 23859, 8941, 106857, 22772, 37895, 107740, 9471, 34989, + 25040, 85180, 21330, 47109, 33614, 110324, 23189, 24151, 32102, 171390, 19981, 29005, 20431, 121, 38106, 170174, + 3577, 46060, 182390, 13411, 9275, 119138, 47329, 30160, 15686, 30347, 7585, 10003, 43031, 29151, 20512, 144355, + 157741, 153623, 16851, 99315, 110358, 156059, 69556, 9859, 1884, 75126, 4225, 180276, 40291, 131485, 17863, 1299, + 125391, 75039, 111409, 31614, 13736, 31156, 97629, 65733, 5008, 14589, 129738, 29549, 64881, 29351, 75196, 52675, + 87336, 57594, 21161, 14655, 77381, 35333, 37937, 262082, 70711, 100777, 11065, 52574, 43032, 79308, 11911, 5569, + 49155, 8990, 20956, 71672, 118587, 90936, 6794, 2889, 70494, 14885, 17291, 20073, 4318, 33042, 38735, 27931, + 10168, 11340, 174780, 29799, 30126, 32276, 416159, 9138, 12580, 186182, 69114, 30093, 10524, 55369, 90592, 23723, + 280104, 31769, 43457, 134915, 104001, 3107, 52049, 3483, 145413, 4347, 87847, 8340, 2862, 22905, 12749, 10655, + 84140, 32339, 14853, 21123, 6603, 75082, 30462, 29877, 106005, 84964, 69112, 129634, 13566, 31377, 1731, 2591, + 12780, 75605, 9265, 203857, 11251, 95054, 43621, 106786, 42830, 115761, 76779, 15968, 571, 316548, 48436, 23152, + 179910, 24939, 4039, 62740, 82443, 162336, 105433, 153188, 13146, 12020, 11190, 145468, 469, 151738, 6924, 16613, + 42714, 25880, 5783, 38804, 32591, 110905, 81649, 189448, 265217, 122177, 28046, 8852, 424024, 1774, 13702, 37891, + 92553, 66876, 68996, 31394, 54721, 100409, 93602, 51349, 134100, 42960, 121568, 58272, 6007, 12605, 20028, 3624, + 15242, 25008, 65373, 95897, 114681, 115646, 2589, 33333, 59030, 148878, 4427, 719, 16718, 23118, 3261, 37212, + 85465, 55213, 20762, 7510, 200214, 136975, 141829, 8623, 85982, 9053, 8985, 13680, 55174, 20625, 8519, 15392, + 165013, 16648, 8679, 27707, 23493, 74409, 23572, 32138, 56964, 21537, 197403, 32462, 82529, 23420, 28463, 4528, + 109150, 117327, 76538, 9244, 32706, 84770, 24954, 49185, 27568, 3481, 35176, 25954, 82442, 152974, 131562, 69937, + 5350, 25825, 141497, 121347, 14976, 75327, 17713, 2839, 13165, 257262, 30030, 30105, 44890, 162261, 56625, 19734, + 60021, 19579, 1465, 101402, 21343, 50719, 82005, 23880, 33978, 2744, 4244, 16973, 17264, 25584, 4273, 85481, + 4655, 19471, 172622, 36425, 22328, 212066, 128477, 64373, 27819, 33935, 83439, 54538, 75730, 73945, 182416, 338, + 16567, 164442, 82351, 56235, 55483, 38729, 47137, 36504, 14510, 39166, 16573, 4712, 17926, 119742, 48289, 74781, + 45827, 314393, 143249, 63030, 150609, 33960, 254056, 83767, 3704, 81354, 45727, 6473, 7385, 36244, 6886, 18673, + 272531, 4187, 62156, 112398, 161543, 82887, 4358, 87142, 76904, 76583, 39823, 167961, 122163, 68178, 11770, 14478, + 52405, 50115, 29516, 109139, 2039, 4206, 65909, 23385, 19165, 89405, 28262, 22275, 41623, 3099, 70734, 12924, + 14423, 41773, 25426, 95066, 228354, 10150, 40311, 18456, 3369, 167019, 217588, 126793, 176360, 66455, 4269, 8444, + 85491, 121695, 17697, 323, 7122, 20991, 35726, 50184, 35789, 94066, 146437, 243045, 303724, 21794, 8433, 198209, + 4465, 23672, 80873, 33604, 13628, 46964, 2602, 33500, 2233, 8434, 6196, 25551, 55311, 64859, 90756, 733, + 118771, 16152, 16282, 13527, 20713, 42651, 69883, 78249, 10006, 70583, 164285, 102376, 221519, 42660, 9468, 65430, + 45115, 136780, 41566, 157119, 71021, 40395, 88297, 10249, 35650, 41778, 28731, 28138, 29775, 49179, 39391, 51182, + 7337, 14843, 4441, 103029, 10864, 81753, 72912, 49213, 20665, 88374, 112909, 1667, 21142, 63823, 38287, 19613, + 1746, 41069, 30542, 41967, 15080, 138315, 9822, 40857, 1624, 120146, 62254, 46115, 32449, 11046, 21374, 514828, + 10905, 260390, 38829, 21553, 105743, 7303, 96235, 38405, 229797, 32678, 23538, 112753, 7701, 37587, 64813, 15914, + 3940, 40782, 259364, 20373, 22997, 77967, 19173, 76602, 178467, 82126, 9044, 83531, 57208, 74018, 5950, 34656, + 389057, 21826, 6662, 16035, 39683, 55167, 129407, 79420, 59403, 152449, 39047, 31506, 63344, 27006, 12334, 147213, + 63125, 155934, 26422, 197447, 54847, 124681, 52392, 3641, 69691, 15548, 83724, 62974, 18336, 43641, 194003, 56605, + 56448, 6561, 195097, 103908, 3362, 8507, 99274, 120393, 37202, 12934, 69852, 54075, 18282, 7789, 50160, 102080, + 29648, 97272, 47381, 12391, 138224, 47286, 208664, 50910, 35867, 32185, 28804, 64164, 10495, 11850, 159760, 137513, + 5911, 76063, 12977, 6056, 28814, 21821, 2163, 130, 26653, 229563, 675, 34076, 31514, 47917, 92810, 44791, + 176702, 25297, 80044, 28279, 26550, 62323, 9943, 101265, 45621, 173758, 88568, 219069, 11734, 117073, 111186, 26075, + 4525, 39923, 16003, 12712, 40543, 7197, 150583, 16316, 73944, 199805, 158502, 7166, 121080, 2343, 53537, 17725, + 27858, 14692, 138991, 22323, 155561, 72448, 37087, 173360, 14887, 2310, 89844, 54066, 44670, 35610, 30471, 49008, + 30742, 32492, 123549, 16741, 8796, 69544, 57441, 97055, 107455, 22125, 10594, 123866, 113472, 2733, 85686, 54673, + 56369, 34761, 5044, 12915, 75581, 8965, 47647, 30073, 183777, 13677, 34414, 87158, 240095, 56678, 23997, 13674, + 133699, 17662, 364, 13753, 153299, 27177, 51527, 30243, 8768, 26167, 16767, 50595, 160464, 166312, 23739, 14534, + 26058, 9664, 63302, 110621, 49078, 86820, 10195, 18754, 103971, 41541, 46431, 27835, 21875, 167947, 172353, 12902, + 71486, 20686, 45374, 12571, 44888, 12274, 1818, 10422, 17156, 10122, 31744, 9367, 9678, 87337, 19033, 70558, + 89541, 21373, 2670, 9033, 123019, 13271, 234210, 43826, 102337, 11809, 135892, 7723, 3972, 64409, 19618, 54008, + 83930, 155668, 38822, 37966, 21245, 24138, 260, 246255, 87852, 28211, 156411, 8088, 109660, 68896, 82086, 248065, + 287918, 183132, 99271, 104331, 183019, 20735, 38511, 16336, 686, 18533, 18914, 36568, 10100, 17413, 11801, 17493, + 39177, 49978, 80098, 133024, 283941, 8179, 153303, 913, 11274, 22090, 73741, 81799, 24736, 36017, 34397, 5355, + 26793, 74880, 144578, 239455, 26214, 19233, 17629, 106193, 25995, 57924, 89963, 116991, 77011, 261582, 364267, 12039, + 141580, 15178, 36187, 9064, 4070, 21836, 104740, 12532, 23742, 192159, 139401, 14516, 46285, 50127, 9705, 30183, + 46632, 6312, 66032, 10073, 30700, 26025, 26702, 43421, 26669, 6136, 155289, 120269, 19056, 202531, 43062, 10321, + 35951, 149425, 302834, 15999, 115575, 92927, 51885, 95094, 174034, 1831, 20175, 39292, 56097, 9329, 155235, 20052, + 35463, 55521, 17719, 122027, 87425, 145479, 31818, 5229, 24575, 132139, 118737, 52992, 44245, 16168, 78384, 56556, + 38701, 11367, 88487, 19022, 82317, 214446, 53146, 132874, 85922, 28449, 40982, 81866, 281616, 112901, 26578, 190706, + 100333, 155311, 101029, 171716, 147697, 12430, 68023, 26065, 61503, 69034, 60721, 126933, 7730, 7965, 21463, 59048, + 84330, 17699, 17875, 37832, 8530, 54375, 218360, 53773, 59917, 9867, 92197, 54218, 61597, 39007, 87092, 58775, + 17173, 53529, 33744, 101641, 9092, 6126, 34354, 17856, 32658, 23212, 16624, 40012, 90288, 66804, 30957, 193996, + 193136, 3361, 126541, 62118, 39023, 18809, 8034, 19719, 20381, 66386, 64493, 20206, 56654, 11892, 180795, 70430, + 31132, 148921, 124862, 23413, 7779, 38708, 40301, 16544, 1919, 80033, 29947, 93475, 1375, 135168, 156926, 69211, + 117128, 57078, 75276, 39285, 30819, 18464, 3044, 51097, 11169, 214069, 300112, 18592, 40938, 132884, 51336, 55473, + 23935, 202263, 99605, 7252, 115201, 18984, 268130, 87746, 101155, 21993, 7612, 2978, 151034, 53745, 151729, 174929, + 4835, 64678, 53387, 27068, 11231, 14136, 30257, 163776, 74550, 15754, 8669, 6350, 89388, 45349, 422995, 68021, + 59951, 87642, 86425, 54667, 91704, 28427, 56079, 64527, 107312, 2367, 6715, 32058, 167882, 83377, 9472, 24984, + 115062, 35722, 33140, 156862, 12732, 24084, 23697, 34539, 72738, 20672, 102578, 11210, 88703, 7244, 19853, 19168, + 464019, 27128, 46941, 50269, 158267, 8850, 158112, 51669, 57995, 41368, 58379, 14134, 60496, 91738, 13630, 44359, + 737, 15344, 120328, 46261, 14371, 8214, 53796, 49253, 123867, 56387, 104801, 7333, 4174, 48503, 43922, 3083, + 243339, 116418, 479757, 153147, 159946, 19349, 47019, 17868, 7568, 17831, 7985, 56769, 16025, 112323, 7079, 40969, + 134556, 11297, 18538, 58669, 110916, 153620, 73377, 72354, 38103, 205536, 68495, 102706, 191, 10869, 164292, 31753, + 80226, 87342, 114379, 12760, 88794, 19334, 85112, 20828, 29688, 22880, 32405, 3197, 27230, 29826, 77087, 46535, + 10454, 11432, 110215, 23620, 76308, 72189, 116329, 168613, 57647, 19673, 10378, 1049, 77409, 28757, 24133, 588, + 113483, 16684, 61242, 31088, 66864, 24674, 161602, 3529, 14745, 90530, 299150, 6673, 19808, 84006, 14057, 114223, + 12023, 167545, 57708, 91489, 46583, 15662, 2782, 13163, 84805, 1309, 47528, 68166, 16015, 48871, 44523, 145426, + 17102, 65184, 54856, 101626, 2231, 162868, 38087, 134570, 20611, 72893, 296437, 103821, 3547, 51502, 32402, 63371, + 95740, 8947, 63165, 25224, 250131, 70323, 10235, 39906, 34559, 51697, 134092, 90702, 108894, 201322, 13521, 98255, + 8498, 173210, 61323, 5939, 15853, 2071, 83348, 11131, 159169, 47234, 2625, 1728, 148920, 59236, 14351, 20915, + 20942, 19005, 8569, 220082, 2813, 129877, 76369, 208632, 93160, 15477, 19266, 71454, 45188, 37118, 21981, 734, + 210613, 24054, 1267, 258926, 45531, 14333, 1358, 4214, 52587, 73176, 70405, 3934, 149062, 67102, 129336, 24604, + 39782, 144525, 88004, 81838, 28194, 51093, 36216, 42928, 57849, 8118, 2715, 191067, 60965, 105811, 65180, 7052, + 84954, 70694, 46912, 219608, 89766, 22029, 26626, 102536, 84453, 50777, 25605, 105083, 100927, 20688, 87599, 26842, + 16501, 4589, 1582, 37485, 27658, 50645, 120746, 2335, 165311, 11419, 118946, 1635, 103841, 81324, 26376, 135646, + 54192, 116632, 21545, 33403, 207341, 58353, 177692, 33129, 19558, 9632, 75823, 7780, 20084, 107884, 116296, 109946, + 319622, 58315, 14925, 134360, 5672, 15528, 113198, 68474, 205467, 66116, 49681, 2705, 98462, 83417, 21258, 159469, + 61849, 81586, 62636, 15482, 36279, 20980, 9940, 193129, 13609, 130807, 18949, 73964, 147177, 131897, 86637, 146769, + 24726, 30328, 30775, 29789, 165015, 16356, 4333, 5505, 209489, 79847, 8748, 132099, 59591, 103870, 50045, 162834, + 31157, 71923, 122346, 6112, 6551, 139841, 45179, 43676, 117580, 19506, 44727, 106994, 75060, 69628, 17203, 46010, + 141146, 9659, 247052, 66602, 277310, 21659, 46258, 176126, 21072, 87, 20184, 63737, 22023, 124145, 55015, 107649, + 106474, 147290, 65612, 13076, 63041, 16396, 150430, 62688, 137443, 6987, 49604, 88814, 122965, 88723, 27058, 177180, + 68371, 34502, 30567, 11200, 5383, 48204, 26504, 19554, 42146, 47062, 6975, 51017, 98961, 25976, 71879, 161741, + 113467, 13050, 91074, 277058, 30863, 61884, 41533, 46948, 23794, 16521, 149829, 35815, 4843, 40881, 56017, 95769, + 99630, 72286, 99851, 13623, 30392, 51474, 63363, 63865, 82679, 1059, 168866, 25195, 13699, 121522, 234449, 35601, + 241612, 30212, 73616, 264919, 33601, 161573, 60734, 72643, 93146, 104874, 19083, 97309, 24319, 146272, 53100, 87181, + 18643, 3074, 12143, 84691, 32155, 10902, 38113, 83987, 95669, 22320, 37308, 44763, 40440, 203540, 152769, 7319, + 15333, 37687, 43812, 63607, 34089, 899, 246178, 71268, 67799, 16016, 114972, 58528, 142144, 3955, 144552, 72635, + 58245, 136701, 104014, 243, 38633, 62199, 14295, 9747, 114531, 27309, 21640, 159861, 117400, 124053, 13195, 210463, + 77861, 81073, 239628, 226797, 188726, 25428, 49381, 139825, 5507, 45355, 15269, 48541, 2568, 12101, 40308, 1768, + 8853, 78278, 55853, 27498, 10987, 12866, 22855, 16207, 107222, 28940, 68976, 28505, 2663, 277982, 71506, 191712, + 2421, 165066, 37699, 52827, 11530, 112085, 187070, 14784, 13345, 2370, 197969, 71689, 30075, 93786, 97183, 71992, + 41785, 19656, 26541, 5218, 118661, 37497, 14909, 185795, 104786, 64176, 31138, 67561, 17459, 21130, 111703, 11368, + 12490, 45880, 38409, 147530, 16281, 12336, 20898, 10505, 71936, 39455, 49254, 62813, 193555, 86430, 18811, 97787, + 17431, 50448, 85973, 4047, 5944, 9900, 65788, 238170, 71758, 45771, 89284, 65578, 26485, 49627, 32381, 33713, + 77317, 8559, 35413, 14870, 20803, 34468, 81897, 94234, 367167, 24080, 137854, 191387, 158, 7578, 65751, 15809, + 7362, 17010, 196493, 65502, 93430, 391382, 2879, 10420, 11735, 7147, 23542, 17615, 172445, 156086, 37413, 42670, + 46002, 31761, 57780, 41672, 11532, 25360, 90866, 49967, 54482, 3553, 67022, 173415, 930, 48911, 25321, 44848, + 62911, 34519, 229774, 187702, 2235, 26813, 21693, 1315, 23004, 97752, 23681, 170907, 179236, 168028, 11780, 33446, + 4764, 8196, 13633, 286646, 101859, 29094, 37084, 18677, 208113, 11037, 67253, 68845, 22477, 60395, 22179, 83654, + 55163, 30814, 111690, 84894, 95579, 111070, 15123, 2301, 14098, 14628, 22693, 64944, 67320, 32427, 113228, 8450, + 162556, 30175, 61058, 80543, 90709, 143529, 88741, 208523, 156949, 1923, 33966, 23151, 3826, 241299, 16138, 83350, + 57492, 27183, 107353, 138052, 4025, 107597, 35297, 67773, 34092, 30452, 43300, 6957, 87442, 94684, 16965, 217438, + 104565, 70559, 98891, 21648, 6718, 16784, 149691, 99066, 186015, 19497, 66551, 37693, 28214, 16720, 64083, 40532, + 14209, 87486, 1612, 145702, 10039, 70355, 14323, 130951, 107186, 119516, 74814, 104148, 233912, 48066, 30803, 17404, + 58877, 26118, 50223, 44594, 81637, 205665, 99360, 81833, 55265, 26920, 28438, 30781, 39828, 1038, 31826, 48903, + 6194, 56604, 14761, 59828, 145813, 74771, 74706, 51758, 50831, 37050, 3597, 24506, 105849, 6593, 4154, 16139, + 4974, 46766, 28473, 30674, 88319, 27775, 32504, 6677, 122296, 25830, 25628, 152679, 10272, 18637, 3167, 49269, + 197216, 13892, 17101, 74035, 95714, 67486, 53321, 82319, 51540, 39761, 17803, 187333, 72418, 71349, 30143, 35120, + 23324, 149892, 42804, 9890, 91555, 30670, 7507, 27360, 8743, 12725, 15462, 94244, 140452, 44821, 17416, 38926, + 250249, 54572, 82822, 54752, 51666, 63387, 47442, 57021, 34124, 37290, 40715, 29430, 7229, 111417, 75006, 22299, + 38592, 3207, 31696, 25882, 129641, 85221, 119327, 11951, 78169, 25237, 51044, 149983, 174242, 9947, 220995, 4324, + 22464, 397659, 78193, 25301, 149964, 59306, 234039, 11815, 51450, 116927, 58974, 159239, 14034, 75956, 10213, 91547, + 10026, 88574, 19060, 33083, 95376, 47430, 31034, 61653, 26190, 36085, 5131, 14374, 120062, 15192, 280008, 9263, + 14401, 19099, 200440, 66652, 8700, 156222, 62663, 66966, 265, 110, 148040, 36034, 31386, 104323, 17822, 32638, + 143573, 164335, 16580, 50402, 7203, 38721, 213812, 21515, 229889, 8504, 38602, 75516, 61567, 60579, 12745, 46326, + 4227, 18582, 60229, 59397, 140981, 39037, 55638, 17735, 2466, 3755, 51288, 30552, 72052, 186323, 70031, 82764, + 10787, 256, 117464, 143130, 10062, 6313, 63167, 28509, 30958, 1511, 26452, 130270, 6099, 62843, 2008, 134723, + 38471, 103714, 11981, 137269, 30103, 21650, 155870, 27623, 23202, 21416, 31748, 136202, 208101, 42177, 21612, 97179, + 70847, 80823, 26151, 15957, 467, 19669, 80201, 152985, 58934, 49413, 43187, 165152, 32271, 3413, 278897, 95326, + 32984, 22407, 4165, 5889, 36637, 54267, 154498, 84424, 24107, 32263, 13642, 61899, 30771, 48906, 53541, 77288, + 17109, 68812, 133945, 23919, 73353, 73829, 91032, 251994, 13650, 62276, 107145, 232161, 2098, 1645, 1664, 247395, + 157040, 42258, 5942, 117930, 67366, 16060, 9794, 122685, 66904, 16976, 197964, 13983, 106018, 68009, 103583, 28958, + 265380, 17355, 73225, 43935, 107238, 21443, 155998, 64685, 18535, 31098, 26652, 188152, 44025, 21291, 51390, 24741, + 32681, 22989, 67962, 69432, 144983, 171068, 156235, 7891, 62505, 30254, 83172, 66755, 91295, 123868, 35802, 115707, + 56120, 334807, 135497, 21871, 3082, 226529, 127778, 48841, 77508, 143672, 108714, 27565, 10322, 144014, 44830, 149778, + 63023, 9719, 13437, 27943, 36700, 13695, 163539, 196344, 81885, 30099, 44647, 4703, 224127, 11553, 28255, 159827, + 16721, 24326, 85789, 18228, 45023, 10808, 22936, 17273, 239261, 46240, 15558, 55286, 111272, 53778, 10007, 200688, + 13852, 33199, 25937, 118127, 7866, 95568, 13550, 69075, 149243, 18187, 18054, 139272, 204199, 48032, 9916, 53168, + 32309, 66646, 20390, 30523, 22084, 55674, 32559, 215681, 42029, 99514, 103068, 63726, 38316, 8856, 122667, 9308, + 126644, 295281, 11559, 40999, 104973, 114406, 69105, 9022, 14406, 80819, 104640, 60160, 43454, 8575, 34276, 11096, + 67322, 37022, 36926, 101052, 61310, 36620, 61086, 109693, 15789, 9610, 221009, 16189, 40285, 3194, 57111, 7696, + 24026, 1071, 17787, 219517, 181047, 102229, 1436, 19143, 6301, 110183, 37601, 45487, 70927, 56572, 105459, 74084, + 23319, 69989, 91217, 16551, 115823, 99155, 38977, 40934, 27248, 94397, 86590, 107504, 66693, 29641, 1379, 47255, + 115875, 1054, 8435, 39144, 278566, 3140, 317123, 121774, 63007, 54, 8414, 27632, 146844, 17916, 144167, 46464, + 56841, 9985, 60753, 54973, 59007, 15854, 105030, 302270, 87368, 102284, 52117, 2320, 180001, 24004, 45415, 28122, + 22370, 12080, 4179, 143103, 42114, 5196, 9147, 23819, 80605, 58583, 158409, 10286, 12022, 7119, 150321, 118598, + 10374, 25544, 101645, 10354, 308, 97195, 61157, 56511, 25079, 3266, 28236, 118492, 14689, 20295, 135126, 19093, + 12618, 57448, 107655, 29480, 63368, 199518, 134395, 42712, 7936, 62939, 58228, 35501, 264973, 47880, 112138, 63936, + 212291, 63680, 36241, 9561, 136713, 9208, 3926, 120889, 95999, 43551, 83774, 6921, 105801, 11525, 3247, 91697, + 18965, 18822, 61436, 115290, 32075, 47003, 24387, 26636, 48700, 190949, 19812, 48361, 52230, 62488, 108527, 105631, + 35119, 118159, 8412, 2552, 96912, 124705, 45876, 32587, 32992, 107747, 77489, 51983, 8586, 88880, 11803, 52063, + 16606, 162643, 143626, 89658, 101333, 22654, 101310, 38641, 101812, 20259, 123750, 2503, 14969, 219100, 8690, 57801, + 39930, 59910, 37399, 71781, 759, 10810, 116498, 88252, 193090, 2214, 139472, 14511, 27387, 12596, 1241, 7718, + 42914, 11603, 116092, 73428, 12937, 23266, 15835, 53439, 5058, 18649, 34255, 102275, 62646, 29092, 74301, 111969, + 64528, 103339, 89133, 263917, 38624, 31458, 186803, 51532, 25743, 71285, 12736, 12343, 37502, 180824, 143025, 172311, + 3716, 6203, 6498, 22229, 4435, 2166, 66689, 87857, 30352, 26521, 32385, 19406, 178687, 47754, 51273, 121646, + 26461, 8198, 36440, 4640, 132611, 45114, 31837, 69521, 42002, 24437, 25080, 46669, 138442, 89271, 46945, 24420, + 35833, 124503, 8025, 46899, 59582, 24849, 44172, 115277, 16345, 29941, 42848, 14801, 8048, 26136, 36090, 41362, + 60319, 2074, 33712, 41656, 49349, 63229, 13209, 66031, 309, 4824, 48391, 36461, 47800, 73514, 39421, 155688, + 49739, 46104, 1216, 56340, 90482, 5712, 163879, 113513, 26405, 9919, 71117, 80878, 34470, 7576, 186, 167527, + 63786, 17343, 68724, 45616, 32479, 50203, 8150, 47235, 85028, 41439, 143352, 4168, 39866, 18661, 19475, 52046, + 47846, 51344, 13929, 353722, 11649, 34406, 89897, 29002, 23934, 68639, 14094, 75872, 29466, 43863, 63280, 169603, + 2816, 5244, 32027, 29855, 42864, 45790, 121470, 68468, 31828, 7242, 12594, 14488, 7410, 33485, 88169, 76478, + 74885, 61809, 68536, 70978, 49632, 26100, 42262, 112129, 47629, 15034, 77852, 1153, 111801, 32807, 15276, 117727, + 90749, 35188, 38118, 105626, 19536, 124809, 8721, 101778, 18767, 7320, 62401, 5488, 105764, 8155, 101412, 36533, + 59606, 23477, 13883, 40321, 21223, 13491, 12275, 43235, 10746, 12781, 61840, 152362, 76298, 7826, 23347, 19020, + 22220, 93982, 66332, 35455, 39408, 6329, 112746, 96397, 7190, 38758, 5458, 105620, 79654, 98403, 59395, 11902, + 64856, 56883, 35273, 53643, 11602, 20326, 70616, 82969, 82156, 35788, 123268, 58910, 272765, 24592, 15867, 1454, + 17079, 21042, 67057, 18817, 70089, 24840, 111862, 91164, 245473, 26466, 103325, 34583, 51813, 59727, 75940, 43370, + 184407, 39378, 10508, 122637, 384678, 128473, 172589, 103341, 1576, 55027, 79993, 6639, 122249, 56459, 5014, 77265, + 5064, 51717, 32582, 2149, 27481, 34880, 18933, 503, 6188, 76698, 48184, 81280, 25790, 6378, 5599, 159007, + 74361, 6010, 125775, 18286, 27541, 83541, 66715, 25065, 318284, 67687, 26494, 145603, 45430, 73737, 1093, 24588, + 31488, 141097, 46614, 41796, 620, 39230, 75054, 18365, 93579, 36160, 184470, 32372, 45723, 48418, 250572, 261817, + 192118, 22725, 77160, 79580, 22670, 4248, 83282, 74287, 51913, 89394, 15782, 18868, 4162, 31369, 195445, 114671, + 70244, 80847, 32760, 73941, 35966, 33327, 48176, 61263, 26397, 21891, 35782, 51428, 16199, 17361, 60996, 162215, + 50899, 70443, 196905, 14327, 209613, 277476, 31457, 115726, 121702, 1643, 41064, 101937, 287507, 200215, 40259, 17132, + 2993, 39858, 66709, 78788, 36101, 45516, 276535, 10475, 132229, 74041, 85837, 4489, 67345, 47555, 70268, 21923, + 33062, 17585, 37566, 31019, 76295, 41197, 33727, 44308, 118628, 54158, 74493, 8091, 78705, 83923, 8776, 31089, + 52316, 104384, 21180, 13077, 34375, 98798, 124584, 38929, 107083, 5305, 11827, 45799, 107454, 122628, 99613, 39711, + 44863, 77878, 47979, 163774, 127561, 55355, 79908, 233991, 33964, 8846, 147975, 196384, 3073, 181199, 4641, 18878, + 154010, 234469, 1978, 29642, 190914, 72852, 147040, 33070, 55967, 226887, 13739, 90555, 39074, 42255, 11101, 11143, + 6272, 2958, 5785, 149827, 31047, 148068, 44726, 20098, 5550, 34454, 68139, 117608, 41123, 74247, 21830, 126493, + 26154, 125253, 9928, 34238, 98638, 40988, 315243, 29780, 47110, 42038, 38685, 1249, 19998, 18504, 2563, 17213, + 148091, 6500, 13838, 19244, 50229, 4746, 251846, 112081, 31329, 48587, 8296, 216791, 59900, 99134, 13938, 168292, + 195442, 43920, 20408, 15133, 19106, 21571, 58002, 11833, 61347, 98426, 10306, 95246, 73497, 108255, 62936, 13502, + 70015, 18245, 80358, 41111, 682, 47734, 11486, 103861, 45850, 5615, 51099, 134183, 25776, 191909, 70530, 132159, + 38022, 64318, 63079, 172030, 148951, 284196, 101745, 31146, 6288, 10262, 10014, 172794, 37411, 22511, 4387, 112723, + 232526, 23910, 161525, 17672, 109277, 67584, 32161, 96383, 27286, 345858, 68047, 143833, 32342, 125891, 44280, 13086, + 9262, 166694, 69189, 41261, 5220, 24538, 15818, 21924, 16651, 109563, 5340, 30385, 23175, 91017, 49288, 45540, + 46740, 114503, 244673, 25970, 129438, 46907, 33785, 227986, 78614, 21905, 31585, 114441, 121925, 1940, 19917, 21156, + 66914, 81575, 3244, 30495, 88710, 29655, 12313, 83379, 127952, 105486, 99459, 88635, 5563, 32187, 8229, 94749, + 21500, 48758, 166385, 14479, 34521, 359597, 72504, 153813, 10739, 78835, 39295, 138067, 14863, 122543, 48540, 34380, + 191006, 11035, 196034, 9752, 62956, 65440, 80639, 387, 17359, 20899, 93399, 207191, 16749, 28093, 88121, 92904, + 67027, 59025, 67931, 87918, 56284, 135160, 87875, 81632, 69134, 75164, 29710, 188499, 43301, 19047, 13422, 106967, + 35039, 65093, 55023, 107550, 58883, 53155, 1578, 14587, 54466, 100984, 69351, 32950, 60823, 25977, 174836, 15869, + 404451, 6689, 11576, 4477, 75743, 45266, 31052, 16005, 59856, 29472, 81237, 29067, 86979, 42164, 23945, 46676, + 7923, 90552, 46853, 182972, 34273, 42374, 17945, 13686, 83785, 52585, 13309, 23870, 32142, 64343, 98952, 28074, + 7693, 4539, 24893, 39020, 268986, 16664, 39061, 84393, 197428, 80361, 205940, 1224, 282681, 6882, 9445, 49939, + 17049, 191596, 29434, 55100, 22346, 54975, 127831, 732, 22990, 126521, 11455, 86007, 92245, 138159, 51749, 151336, + 107180, 1069, 19546, 41449, 3357, 159316, 6574, 4724, 37104, 15238, 26063, 24160, 96724, 37317, 18138, 7223, + 49153, 51769, 152694, 7631, 7683, 64472, 8352, 2685, 31197, 127743, 6860, 92869, 43267, 85011, 42057, 23724, + 82231, 8741, 18674, 7910, 164276, 4096, 12771, 7586, 23696, 47054, 195099, 1416, 20848, 13504, 357403, 2764, + 188364, 105560, 295, 178445, 22309, 57234, 22103, 107666, 24821, 35099, 28676, 58490, 158707, 25657, 23518, 61519, + 1018, 46602, 17455, 53294, 22514, 62556, 170305, 115366, 70922, 69405, 15098, 71322, 27792, 76230, 27885, 2441, + 45589, 36981, 150699, 24146, 59709, 81228, 22766, 66205, 10765, 37617, 9373, 49056, 736, 99650, 67177, 559, + 35218, 47852, 5803, 7500, 63479, 81545, 7010, 84110, 51987, 114840, 24620, 8163, 24275, 88890, 163648, 134506, + 63588, 23081, 142828, 65953, 55361, 67896, 114542, 9127, 92929, 19906, 111372, 38827, 81964, 49480, 42737, 12268, + 4658, 112744, 27101, 301, 20122, 14673, 94899, 206599, 12330, 76979, 31622, 74309, 44058, 128517, 56436, 14073, + 13065, 23339, 21315, 103178, 311456, 16278, 14920, 198146, 72224, 420550, 41727, 777, 8337, 104777, 24184, 25793, + 211229, 26740, 119387, 100011, 38979, 100498, 23747, 45421, 22590, 8336, 22845, 14459, 138478, 53166, 57049, 20497, + 52757, 82151, 2460, 50662, 32595, 50914, 9779, 140220, 133600, 20746, 24104, 216217, 8838, 122361, 11593, 28760, + 31549, 816, 28187, 5501, 94412, 60114, 28281, 153116, 43391, 8488, 90398, 47350, 90056, 27922, 39104, 94601, + 1585, 8966, 10638, 10171, 94802, 8318, 14529, 110590, 127271, 73877, 11430, 2830, 6223, 27005, 16811, 21014, + 31889, 241922, 77341, 77320, 137038, 18139, 50332, 123737, 132910, 94235, 16743, 82586, 2165, 47123, 21947, 68249, + 57616, 1395, 50542, 129396, 230152, 209588, 78454, 147757, 6080, 219127, 4180, 9021, 10748, 81158, 64973, 29190, + 36737, 228622, 98804, 17829, 74579, 16417, 183595, 101604, 134062, 17306, 3644, 19380, 50525, 72396, 159940, 117382, + 180532, 78857, 55739, 98983, 119270, 38236, 8379, 25607, 34556, 33219, 34803, 98799, 76155, 37523, 75966, 6648, + 82394, 4084, 98676, 3845, 52595, 13580, 58240, 1922, 29258, 10438, 105425, 26130, 31435, 85783, 87939, 115936, + 87820, 77028, 181067, 59464, 67996, 9819, 19251, 40273, 26943, 18184, 84410, 39092, 183878, 10146, 8789, 33548, + 38007, 71479, 208117, 24698, 2410, 113333, 13181, 6605, 13526, 49339, 7061, 64271, 180297, 17014, 2971, 168674, + 69856, 33945, 110699, 265836, 3503, 115232, 136418, 50952, 187396, 40638, 4807, 156118, 167700, 13849, 57520, 81231, + 7838, 11640, 12170, 5741, 16701, 16659, 125534, 15317, 9199, 52795, 24781, 6825, 56267, 83437, 204926, 74158, + 3661, 59223, 14235, 194403, 37407, 20530, 23146, 12357, 65994, 11931, 56380, 259451, 23767, 79929, 18293, 110440, + 5708, 110566, 1381, 116346, 62508, 48437, 65252, 42437, 221700, 23408, 20821, 78800, 67088, 5214, 80178, 40659, + 86978, 3139, 87525, 38590, 46776, 96503, 7226, 124649, 84434, 21210, 52718, 39533, 32088, 11610, 48883, 48993, + 5612, 36169, 74879, 111083, 9149, 156582, 123119, 79206, 88244, 36781, 6276, 121833, 21685, 67708, 562, 32969, + 95151, 49905, 11821, 49025, 46750, 363738, 60238, 7126, 189612, 23817, 135205, 79928, 2979, 54100, 109851, 73077, + 506311, 12222, 150050, 90908, 2594, 81368, 57202, 25388, 3628, 28737, 8460, 86804, 40074, 10968, 92876, 5499, + 105039, 2695, 47351, 172227, 78243, 121715, 27084, 78833, 28523, 73676, 464, 68232, 6651, 130040, 127800, 48799, + 38058, 37843, 5052, 96560, 71999, 133710, 27378, 191856, 30992, 147444, 29030, 53817, 12764, 121245, 60444, 26643, + 68261, 39242, 16699, 155639, 108991, 19332, 42990, 80805, 6165, 95293, 82667, 375680, 26450, 33561, 31227, 248811, + 61961, 7643, 142037, 7514, 13400, 107606, 34976, 50694, 22426, 151745, 198926, 23162, 7490, 69785, 8890, 277275, + 60890, 30537, 37432, 49609, 109623, 3559, 109101, 157822, 2070, 19341, 18250, 88785, 12958, 30738, 47073, 37163, + 50355, 61092, 55664, 18154, 67979, 11874, 16017, 16832, 257096, 63841, 46836, 35435, 7213, 39562, 77677, 20617, + 42578, 32643, 98441, 139236, 52121, 64862, 68450, 282715, 35716, 2199, 97719, 13226, 65461, 127411, 66119, 58368, + 7516, 8148, 55990, 6956, 124758, 9239, 3153, 62014, 39268, 163536, 46944, 43855, 302, 6682, 207287, 15207, + 64712, 56673, 22223, 78977, 14977, 22415, 238137, 21853, 1467, 6198, 107406, 33222, 219452, 21709, 119024, 34391, + 2840, 1157, 34974, 22756, 34229, 50276, 12565, 13069, 11121, 120511, 69104, 16271, 21602, 41109, 62931, 15756, + 19270, 52519, 17405, 24235, 63574, 6789, 324542, 136115, 8024, 15348, 17892, 47562, 1532, 70350, 35583, 71230, + 17331, 3309, 46253, 26611, 79839, 99277, 117997, 65915, 78885, 32688, 25828, 19004, 52029, 50625, 9248, 17400, + 180767, 38886, 29357, 68385, 57957, 5909, 37897, 76460, 6069, 20372, 5141, 50706, 91265, 87494, 32650, 234722, + 61380, 65571, 34714, 45634, 55767, 26279, 65231, 106901, 8927, 283, 16073, 103627, 32881, 18500, 150143, 38519, + 287603, 17485, 853, 34227, 22149, 485770, 39484, 23090, 35029, 31381, 51798, 78528, 68876, 38737, 36453, 236345, + 6428, 12075, 1812, 27252, 199567, 13210, 14175, 2341, 46926, 622, 28321, 38887, 13412, 97447, 15960, 114377, + 104132, 9242, 11929, 173622, 21434, 107890, 50877, 49000, 366616, 878, 47215, 100194, 45060, 104282, 141046, 35203, + 110046, 219551, 85771, 84943, 81924, 108674, 74715, 12699, 128910, 32654, 6935, 167969, 45886, 48348, 61573, 81800, + 52821, 34060, 4242, 56585, 130416, 152475, 207991, 171093, 29416, 186493, 59505, 34175, 77342, 15376, 12990, 99902, + 21762, 74649, 5423, 65516, 67329, 11829, 84139, 241464, 121432, 34713, 85742, 187730, 79924, 6579, 77428, 24207, + 11724, 110158, 32973, 112280, 38625, 29086, 83056, 3907, 81006, 88966, 16041, 71498, 102033, 825, 26490, 10662, + 28338, 69696, 48093, 65072, 13326, 134496, 36471, 61179, 3250, 65892, 28533, 314299, 82056, 101706, 7567, 64574, + 82526, 61878, 9810, 151779, 38212, 40297, 107886, 9224, 21112, 83917, 6731, 127019, 12382, 20817, 46524, 7526, + 111495, 45460, 29077, 14716, 3263, 2776, 32734, 117361, 7414, 4263, 57298, 257932, 86274, 32666, 76331, 77614, + 93490, 72983, 103093, 41179, 40844, 68943, 116063, 4284, 30224, 160402, 11643, 2596, 45212, 159780, 15217, 214380, + 24019, 8607, 90193, 25716, 48411, 93174, 97695, 187108, 71367, 40950, 51935, 149531, 24941, 24881, 32250, 21110, + 76729, 22520, 11901, 44780, 57776, 164255, 34822, 2491, 3769, 55143, 92422, 73099, 38114, 63649, 64110, 240212, + 202019, 107803, 52205, 22566, 197745, 21239, 67424, 3015, 31953, 41591, 28285, 76949, 237533, 40323, 293650, 232903, + 33270, 251467, 176985, 24164, 201580, 38564, 156136, 59809, 255648, 80672, 240807, 90052, 100798, 140429, 105726, 10493, + 44741, 91259, 58405, 10701, 32241, 77032, 19646, 28622, 98468, 71458, 27207, 84089, 106931, 6037, 21906, 10904, + 10085, 71638, 18970, 12327, 15090, 155131, 570, 57108, 170358, 184285, 20866, 9713, 33154, 17127, 1501, 66684, + 66787, 23409, 12207, 87238, 18819, 102498, 86382, 527, 69760, 37855, 28336, 40134, 25061, 472, 119634, 283057, + 234005, 72393, 63914, 13795, 82660, 81969, 21503, 42354, 6295, 133186, 18259, 34816, 131975, 111080, 119914, 6227, + 16874, 28237, 109468, 13462, 9076, 139909, 173435, 140650, 4094, 59998, 72608, 46830, 25005, 51675, 154533, 146622, + 17740, 201648, 55660, 9846, 40908, 71868, 61190, 22963, 19533, 38545, 29300, 44101, 220019, 36593, 119629, 19665, + 44330, 108853, 121109, 89385, 99792, 69972, 191515, 2180, 50040, 29432, 18069, 77343, 19619, 123487, 256669, 76631, + 13950, 296596, 1597, 129830, 55228, 1167, 160849, 18579, 24423, 59175, 11879, 3471, 31253, 98945, 59597, 119156, + 95308, 79988, 122939, 9124, 103177, 84168, 28969, 42697, 184795, 16008, 50199, 163322, 28590, 6494, 60509, 135058, + 82285, 113064, 23838, 104824, 5059, 80031, 14223, 11317, 3210, 366149, 3627, 19284, 75525, 82629, 76433, 17398, + 49894, 214741, 20201, 17960, 70007, 4469, 41765, 94300, 56178, 35669, 3059, 41367, 10580, 141243, 173468, 16012, + 36051, 146008, 6174, 145965, 139681, 7800, 110797, 7035, 21617, 33212, 25669, 13652, 98736, 51362, 38127, 761, + 3555, 31131, 121667, 108117, 106306, 16338, 122989, 66956, 164189, 15339, 82154, 24542, 37352, 59255, 110432, 16682, + 63915, 228093, 103923, 44235, 47824, 168857, 93914, 68839, 24883, 16577, 41048, 298253, 145530, 10841, 15100, 232215, + 61904, 5837, 125998, 35069, 28444, 58263, 14138, 85433, 11483, 143759, 34386, 73214, 19837, 19344, 20822, 8109, + 145446, 6859, 87391, 91712, 30420, 47415, 145201, 71828, 112972, 41730, 28283, 170664, 85939, 141658, 70333, 124812, + 11835, 2977, 84882, 9672, 191233, 7890, 112346, 19182, 2262, 159541, 16980, 12043, 20705, 67775, 24464, 209857, + 58630, 270281, 312308, 672, 1753, 46565, 82263, 33826, 148334, 55096, 120377, 20727, 1197, 4386, 5122, 5934, + 144714, 56754, 767, 46661, 6887, 16011, 3279, 258372, 11223, 169694, 25814, 42211, 107667, 126684, 25371, 63630, + 60879, 20178, 24287, 89912, 77914, 7710, 134186, 56763, 4151, 13041, 161212, 270864, 57417, 45691, 139371, 26391, + 81594, 36360, 47120, 2894, 96681, 102899, 35717, 25696, 169430, 114986, 52356, 18242, 1784, 96852, 53673, 123031, + 20444, 64937, 107271, 5906, 95138, 129637, 2569, 61992, 254041, 52369, 35639, 117271, 27038, 96678, 122654, 59573, + 596, 42424, 23209, 68851, 7117, 86087, 20253, 129099, 72808, 8253, 236489, 10640, 13759, 33512, 12847, 68886, + 3353, 51042, 54954, 88292, 126776, 35156, 39154, 26608, 21074, 3070, 132841, 36168, 55322, 31705, 21862, 73120, + 27081, 96769, 100873, 33028, 36942, 66613, 15763, 33080, 39547, 359328, 23281, 74973, 139830, 177478, 3930, 86190, + 179275, 148581, 122851, 1431, 4453, 146240, 239658, 55165, 713, 274, 94886, 73822, 8722, 26916, 78701, 67472, + 71399, 84867, 279082, 235, 19204, 9012, 17044, 1382, 25785, 9114, 9013, 22506, 22794, 59383, 85470, 19980, + 23923, 137385, 187894, 268567, 104114, 23511, 100004, 3566, 11291, 14071, 28270, 6390, 25458, 111325, 4382, 14700, + 102309, 41377, 7731, 3431, 88396, 37035, 150133, 15643, 75288, 106289, 2777, 70941, 230440, 48316, 25116, 63976, + 206396, 108620, 37151, 125702, 104551, 113811, 119436, 24384, 58447, 4370, 24435, 50488, 130857, 124278, 18387, 112999, + 37247, 26953, 4538, 30899, 94734, 101716, 114630, 179272, 31548, 49963, 38658, 24697, 176529, 190718, 62623, 4144, + 226077, 300866, 53306, 58044, 65159, 50710, 63541, 128908, 20104, 14650, 142818, 6874, 10096, 32173, 44239, 137621, + 66881, 7672, 38865, 45456, 94191, 63198, 21654, 91466, 237909, 17433, 116850, 23799, 27109, 61860, 54732, 29400, + 37404, 38958, 56953, 81848, 1520, 34230, 4135, 97322, 27421, 31838, 21240, 26409, 25220, 95856, 25488, 56829, + 113003, 1614, 126, 147771, 23423, 14373, 49546, 49817, 24884, 86146, 38695, 42648, 50585, 27147, 193187, 63419, + 6286, 46605, 45100, 136759, 231877, 33670, 291180, 89716, 150800, 7898, 65327, 43541, 11789, 18785, 15127, 92917, + 3226, 15816, 97588, 148034, 90004, 14309, 143531, 120478, 60642, 53426, 39390, 100241, 5053, 47683, 6092, 593, + 202400, 56336, 48570, 70208, 61442, 84297, 267745, 16889, 132531, 63667, 41905, 51392, 175329, 104653, 24808, 36173, + 57138, 33742, 25613, 30817, 30116, 31004, 44827, 110763, 103847, 17367, 29721, 39397, 9973, 205794, 68528, 30464, + 75367, 6167, 3182, 143724, 16452, 179801, 44257, 60822, 32360, 50545, 12909, 46081, 59119, 5222, 30976, 74231, + 21246, 4141, 25122, 44442, 10191, 152872, 60307, 6528, 164804, 64131, 52788, 203594, 23305, 109174, 33076, 95817, + 61051, 86156, 81508, 7369, 37348, 36961, 59494, 6598, 154530, 185385, 273203, 32275, 13214, 173245, 225200, 147861, + 5468, 57563, 4172, 27997, 50403, 22253, 19697, 3607, 66754, 52590, 44551, 213850, 130976, 17828, 3407, 9965, + 50559, 26417, 20257, 207504, 80515, 11064, 40718, 15057, 14436, 175751, 41158, 92093, 155492, 7541, 10270, 291817, + 84017, 120763, 131324, 93378, 5472, 128009, 141787, 144291, 43107, 11112, 64353, 20597, 41240, 29285, 7429, 182466, + 2890, 9936, 4645, 26881, 90431, 118441, 79842, 3776, 70188, 15995, 35014, 25366, 382, 86180, 8302, 14503, + 76234, 35504, 66433, 25753, 48040, 723, 30764, 17878, 50211, 19521, 103260, 1405, 281038, 12735, 16639, 6710, + 237007, 94746, 1277, 43465, 32115, 22848, 2422, 33178, 142178, 8284, 101691, 76065, 1536, 28121, 15450, 56901, + 22761, 37468, 57257, 336438, 96429, 11719, 1339, 3953, 1811, 118327, 157186, 30335, 31243, 47049, 38381, 35215, + 1679, 161267, 29632, 17925, 49143, 35370, 24607, 25287, 55209, 163958, 71839, 121011, 17402, 66842, 70491, 9817, + 235054, 64483, 2945, 109216, 61494, 17696, 18951, 2128, 7462, 147844, 39181, 147057, 77030, 240256, 162500, 11568, + 34925, 71572, 23258, 33113, 87609, 57032, 31715, 36819, 78002, 84868, 113775, 145786, 9499, 100577, 142045, 35652, + 9027, 79217, 24550, 93584, 73289, 21361, 23766, 32016, 201078, 16815, 17921, 88359, 101379, 56165, 78318, 16489, + 63544, 35992, 463196, 76115, 27666, 30809, 69632, 109853, 5469, 105799, 39876, 72304, 10642, 81042, 91087, 82633, + 30029, 3451, 39557, 9601, 49816, 43559, 44570, 24502, 132979, 33107, 74019, 68885, 95620, 43778, 22107, 80168, + 58086, 115607, 53717, 44189, 351930, 66820, 12176, 349081, 116300, 90000, 19710, 15777, 2110, 12072, 7937, 100473, + 2043, 23575, 189759, 185285, 30845, 204583, 141343, 98357, 6154, 24850, 10033, 166394, 11279, 9588, 63358, 66619, + 16727, 29173, 29298, 22369, 4122, 1113, 93975, 2373, 2277, 6248, 25424, 144362, 27281, 10791, 31674, 136149, + 4971, 5091, 109071, 28111, 3650, 74833, 33069, 99452, 39060, 31553, 103088, 6083, 61970, 35073, 42159, 39447, + 65951, 82331, 17467, 274725, 39674, 192758, 99239, 74038, 75686, 221820, 29305, 145449, 38151, 141438, 74464, 8701, + 11370, 40356, 35644, 219664, 130809, 33760, 32012, 65616, 177895, 96022, 44668, 36789, 32665, 181104, 107837, 21508, + 63725, 164836, 5861, 54679, 122267, 20346, 83568, 92187, 7857, 2055, 91980, 45529, 39618, 46036, 44095, 43635, + 118483, 55547, 30683, 9026, 44792, 15349, 9572, 31258, 157755, 62006, 13108, 41088, 178624, 42632, 108286, 57576, + 136994, 75081, 20067, 213455, 24260, 59651, 156381, 28506, 41308, 51673, 109778, 35539, 22471, 31926, 60313, 141628, + 12404, 177355, 186764, 8270, 21707, 53992, 20210, 175836, 12486, 35418, 68014, 148679, 30473, 15016, 74384, 2134, + 52781, 50454, 39034, 16954, 50246, 149675, 90227, 90639, 20247, 105483, 42840, 84149, 39065, 6265, 28880, 153724, + 909, 158044, 52031, 189995, 56825, 89732, 14963, 79537, 103158, 77948, 193052, 23904, 128603, 35173, 103922, 50144, + 31542, 77257, 10193, 261793, 1089, 61599, 83679, 56827, 41935, 34672, 1669, 32964, 32744, 192677, 84032, 84980, + 12428, 221609, 53227, 16700, 37963, 17089, 18238, 394, 84420, 5956, 18576, 76244, 33134, 135230, 52741, 9872, + 72921, 31874, 99863, 233313, 208449, 55160, 197159, 30521, 42622, 223154, 80731, 30948, 168151, 65889, 42412, 23756, + 127335, 110467, 63177, 112577, 147107, 45515, 164144, 8147, 46699, 185194, 12846, 5150, 38216, 15288, 59319, 209454, + 12591, 1396, 2748, 213994, 94342, 174981, 9164, 7542, 85814, 79347, 3079, 43844, 31423, 356287, 9839, 64046, + 24944, 181828, 21425, 105878, 2605, 75931, 24468, 28548, 87542, 72786, 33573, 9795, 67473, 52048, 18016, 14242, + 192551, 248913, 95190, 112505, 4496, 31534, 647, 69179, 56321, 161887, 101346, 161387, 91819, 19636, 11691, 343909, + 17630, 27347, 151697, 2034, 6300, 29522, 1714, 19625, 256183, 30736, 41363, 146757, 114569, 40479, 15465, 2041, + 202090, 13378, 121579, 195034, 33209, 67524, 29264, 68859, 35289, 9132, 124566, 11834, 34897, 23701, 17860, 41618, + 24967, 44272, 55538, 9772, 40520, 67880, 13672, 5691, 43470, 43146, 59144, 18400, 5344, 99162, 20283, 21126, + 10199, 286754, 157014, 57352, 34810, 134947, 22482, 13869, 14283, 3260, 39498, 50188, 10381, 85601, 130984, 2037, + 10017, 115073, 41784, 35604, 62923, 26892, 47516, 14669, 49924, 117650, 194265, 354551, 23233, 13596, 123144, 1265, + 64539, 13442, 26226, 983, 13051, 82353, 130403, 88007, 35686, 34010, 54566, 1384, 19698, 66960, 132131, 70625, + 11570, 29263, 50727, 110849, 135555, 4078, 19496, 118621, 120868, 32514, 188800, 161569, 44924, 24501, 105062, 111736, + 87065, 308308, 30954, 10824, 52318, 42959, 6951, 9830, 52335, 136608, 31619, 248564, 47586, 44794, 93623, 23889, + 140906, 33780, 30924, 50467, 245885, 88351, 90491, 46859, 109834, 48432, 37672, 25466, 78668, 2856, 51536, 53156, + 9065, 1466, 166162, 149156, 46990, 132982, 13320, 58757, 25258, 298, 22182, 22431, 72022, 12639, 39287, 100922, + 61243, 26416, 20111, 29015, 40838, 101281, 1681, 96725, 4545, 9838, 11567, 53063, 146387, 341555, 34114, 20033, + 10537, 143943, 157042, 19848, 11557, 42577, 57214, 27640, 17470, 179231, 12836, 195453, 36930, 46768, 313283, 35513, + 68104, 39738, 86287, 104695, 46711, 4413, 25433, 60207, 24264, 18023, 111517, 45375, 79401, 20865, 226464, 27841, + 81043, 41593, 204624, 25039, 18225, 20244, 170119, 22971, 120488, 189962, 74489, 159216, 24746, 58887, 156006, 65825, + 84338, 9196, 33923, 25183, 81652, 80939, 67675, 13888, 28266, 18067, 6244, 68109, 13776, 69394, 105951, 38639, + 21878, 12025, 34471, 14990, 46973, 71457, 38263, 85696, 1047, 50364, 18100, 216604, 230465, 75354, 183859, 29794, + 73357, 27757, 58872, 122255, 95777, 108826, 40410, 784, 24973, 20666, 10256, 47191, 210160, 225901, 92342, 20564, + 62210, 15357, 81223, 47348, 58404, 136370, 87219, 182975, 110633, 231019, 5557, 114090, 169651, 152695, 39659, 10697, + 6937, 15420, 20820, 60557, 41870, 16729, 133108, 27320, 9909, 108465, 192359, 16498, 26822, 325219, 33762, 172522, + 191062, 29716, 26412, 2097, 76553, 124900, 73484, 69292, 27519, 32870, 80707, 31445, 96256, 2314, 70692, 4058, + 239070, 10821, 41413, 95014, 2478, 35503, 100322, 236799, 205678, 14889, 48762, 33792, 67955, 41529, 176353, 46713, + 58532, 62997, 179242, 111905, 20601, 174290, 2473, 21736, 50120, 80978, 284366, 50101, 19148, 151810, 71043, 69116, + 78501, 13969, 1032, 82510, 195724, 299148, 161084, 64084, 110740, 1411, 917, 60413, 8249, 4449, 10658, 28635, + 109665, 28104, 30492, 131970, 27446, 20499, 71921, 6814, 30568, 42498, 32084, 9024, 57631, 161122, 111788, 30728, + 31425, 149345, 39864, 222740, 49752, 100795, 1957, 8606, 32820, 154188, 210448, 11604, 65504, 95671, 26463, 47243, + 50079, 54263, 5121, 8044, 3663, 137567, 25561, 3942, 102256, 169116, 15687, 13454, 219898, 132483, 29600, 88533, + 23849, 44708, 41198, 121112, 211315, 30822, 9110, 14874, 14645, 19626, 55733, 131599, 4359, 111315, 126666, 2148, + 91767, 12358, 87695, 65691, 9528, 60012, 39959, 13807, 12449, 60771, 165784, 80519, 49366, 58389, 57808, 299274, + 7941, 94765, 85206, 7523, 49763, 49374, 51040, 4812, 107848, 65929, 26938, 37068, 8930, 3191, 21092, 30208, + 27086, 3979, 56324, 7705, 50686, 214096, 86621, 19678, 9744, 23869, 7714, 49971, 10447, 184404, 140264, 142028, + 81935, 37, 202638, 112289, 39513, 2767, 321704, 16548, 46514, 195686, 36295, 202214, 1670, 15988, 55688, 23659, + 29229, 21347, 47074, 163169, 6172, 123566, 96740, 17816, 22312, 79026, 119292, 332453, 137280, 39511, 41020, 11253, + 97759, 113084, 67597, 99824, 9806, 100148, 19488, 91425, 14445, 10529, 86640, 119945, 22976, 1450, 21578, 8642, + 56458, 34421, 23850, 768, 73391, 11534, 64803, 221561, 34983, 50337, 3048, 86930, 93760, 26610, 60674, 110754, + 174219, 46834, 94439, 84023, 52573, 9508, 44750, 79062, 33149, 17148, 39204, 179378, 59747, 33608, 33811, 72388, + 2429, 27413, 53657, 1209, 136277, 15611, 10977, 18270, 75123, 18305, 73001, 65038, 165263, 120353, 2992, 111846, + 91040, 8711, 81068, 66699, 7446, 2463, 19348, 218110, 57632, 110134, 4755, 34160, 48633, 72482, 249, 46281, + 97140, 33462, 11352, 40714, 246081, 28361, 46130, 98911, 84766, 36082, 51109, 34148, 151684, 2936, 76243, 94584, + 79918, 12929, 460, 42550, 93268, 134209, 37100, 16896, 120346, 21124, 21414, 16833, 54059, 191099, 201522, 102272, + 54875, 84073, 3895, 2436, 77858, 10986, 154654, 5409, 32996, 56761, 49453, 346111, 103590, 58996, 21227, 37368, + 45276, 61068, 74997, 6502, 11968, 190483, 2851, 4516, 19600, 140163, 119135, 18019, 25849, 122333, 26208, 1253, + 17159, 181641, 62390, 34359, 132907, 44619, 54140, 33110, 42828, 34002, 172033, 159324, 16817, 22862, 123567, 246066, + 4913, 39475, 57181, 11836, 99462, 39965, 20158, 295279, 103303, 15191, 12523, 31976, 27395, 89881, 26366, 36188, + 5737, 4209, 27937, 51814, 74184, 36752, 26910, 75407, 20749, 114757, 80471, 12921, 21160, 166449, 33748, 61876, + 14377, 111451, 28376, 51624, 77062, 4759, 31489, 8667, 131403, 35903, 220511, 203998, 158735, 57711, 23070, 54147, + 10999, 74048, 6529, 32621, 27799, 92313, 30581, 51320, 77785, 63583, 107525, 10443, 9320, 51511, 19427, 121556, + 34366, 33241, 41042, 128493, 51593, 169332, 49002, 178217, 61070, 171, 83380, 12254, 33746, 11674, 26248, 60875, + 47048, 3811, 107521, 60945, 29268, 895, 49016, 21834, 36675, 58110, 5989, 35500, 30262, 101556, 212477, 10165, + 53297, 39091, 41022, 9791, 9832, 14567, 19009, 19068, 82000, 3875, 37180, 186532, 20188, 14835, 74440, 7293, + 122292, 25795, 49957, 53535, 39917, 123027, 87789, 17677, 7331, 89007, 2300, 35386, 18160, 6491, 51684, 21618, + 68301, 13263, 285552, 298645, 185935, 298900, 126950, 20478, 134830, 182343, 19829, 12011, 15031, 116530, 76262, 15413, + 4935, 40625, 164987, 49386, 10004, 44236, 39740, 43773, 165845, 43832, 56688, 2815, 185534, 81592, 56245, 1437, + 46923, 129294, 23698, 129303, 30109, 58443, 14904, 122152, 44134, 27588, 5195, 37064, 122631, 43995, 372314, 387837, + 18874, 47379, 277, 22234, 22903, 9497, 40286, 16763, 112790, 89200, 17537, 20682, 26561, 7025, 122064, 142767, + 18549, 18358, 38049, 62248, 348902, 41526, 76877, 7321, 82871, 209789, 117544, 83895, 140345, 134154, 56621, 61740, + 255565, 75916, 191295, 230290, 135390, 60673, 50087, 3175, 63556, 59497, 24739, 20520, 103747, 5481, 54327, 78229, + 145055, 27141, 91354, 101583, 179600, 37968, 51679, 147604, 145662, 21758, 1468, 32673, 296111, 37226, 10401, 244665, + 61661, 62743, 33793, 21290, 211987, 31229, 36498, 109014, 23952, 5664, 68430, 117386, 52342, 279268, 1383, 16178, + 126343, 21917, 67779, 80496, 48450, 28456, 37591, 11298, 32919, 75914, 48144, 42628, 44277, 135351, 43365, 68058, + 82185, 31919, 36044, 33488, 9591, 46231, 87880, 19683, 62139, 164744, 13946, 67759, 205363, 64547, 24950, 14744, + 376969, 125444, 36207, 34787, 394874, 9391, 29970, 31633, 108461, 29004, 95508, 16726, 18040, 1474, 161241, 87333, + 120885, 78180, 1312, 156395, 14798, 49849, 30378, 61417, 39863, 215063, 70563, 17245, 16571, 4898, 117368, 45833, + 16794, 119877, 56493, 56667, 58271, 114337, 11790, 85404, 81025, 1115, 70207, 196483, 55206, 75037, 286099, 52410, + 14640, 28529, 108282, 19807, 118656, 48399, 13926, 14142, 6361, 12773, 19250, 37526, 44092, 14182, 3300, 24788, + 85970, 100512, 5089, 93502, 6262, 1470, 30526, 6736, 153863, 47611, 5419, 5204, 108244, 23917, 15546, 201845, + 180200, 9222, 61948, 51408, 72264, 60586, 13704, 87398, 79947, 75005, 105096, 35548, 38044, 163143, 46082, 43224, + 10050, 223780, 42559, 63853, 5735, 24066, 14942, 134623, 61221, 29913, 32948, 152876, 80712, 15291, 19415, 47687, + 5471, 16468, 19049, 875, 115689, 4926, 141440, 86953, 11391, 96224, 41116, 29097, 11661, 9977, 16554, 59410, + 184257, 4916, 59752, 123609, 20010, 44968, 127762, 3094, 60116, 31503, 22578, 77738, 30320, 46196, 21138, 9271, + 19327, 143121, 101458, 26727, 134598, 33180, 123460, 124908, 45455, 1725, 24171, 1975, 27542, 3320, 81552, 83876, + 18004, 21115, 5583, 180685, 125092, 158497, 38663, 698, 452272, 14139, 42821, 65816, 1549, 15658, 88083, 33362, + 91523, 14865, 6630, 176968, 46567, 36614, 16181, 20495, 180063, 61084, 102959, 47886, 156026, 28065, 8610, 114642, + 2608, 73306, 8419, 8283, 11174, 348806, 348428, 8950, 58848, 3040, 12266, 87926, 37788, 7990, 32289, 57688, + 65907, 28786, 408131, 92280, 80194, 123266, 156847, 70303, 30490, 3057, 70321, 174337, 5786, 5649, 71496, 65938, + 40775, 32358, 26015, 20333, 119519, 11504, 86693, 47220, 106241, 179717, 45913, 80350, 11323, 10871, 42117, 44122, + 156297, 8264, 34534, 74130, 8425, 20168, 19195, 95738, 61495, 86901, 91525, 56486, 2617, 21807, 76315, 32644, + 29675, 132700, 187527, 94065, 2425, 48583, 146946, 19438, 59886, 21696, 112525, 185026, 112582, 107474, 21779, 520, + 49142, 27517, 27611, 26275, 59618, 68585, 34382, 269197, 4863, 78549, 51824, 198549, 50597, 91695, 79132, 75817, + 86710, 86822, 13732, 13511, 50650, 3411, 18621, 279970, 168632, 42104, 4038, 32572, 27693, 8881, 65349, 54005, + 85641, 7547, 8478, 41579, 83643, 30439, 9416, 5869, 18993, 49065, 16745, 12818, 25768, 25667, 58681, 44991, + 84284, 204061, 61201, 50637, 28090, 5082, 34635, 107900, 93592, 201432, 170018, 36616, 36627, 200119, 6933, 53709, + 312804, 36724, 17312, 3158, 43381, 5167, 26919, 23400, 9887, 26673, 28631, 66018, 9402, 230847, 24255, 102572, + 100931, 154504, 170013, 201115, 97165, 34404, 357, 179763, 3311, 108, 53023, 171976, 173330, 95887, 211961, 23099, + 66805, 113640, 18352, 46361, 28935, 107138, 46668, 51711, 4963, 45839, 54816, 7932, 184460, 1611, 103983, 141455, + 3201, 270150, 175156, 43537, 78102, 12443, 37971, 414715, 19126, 6340, 130684, 52220, 21607, 284399, 102147, 95375, + 37496, 136001, 219663, 16689, 24938, 7390, 85416, 109375, 22615, 13046, 57826, 15407, 16153, 137199, 62614, 211231, + 32862, 32298, 16119, 175170, 134792, 46807, 3951, 27849, 153318, 105648, 120022, 22136, 61120, 56637, 91935, 136213, + 6067, 4114, 134337, 42278, 2812, 85502, 6569, 159359, 12826, 125922, 25619, 62295, 12792, 79563, 100822, 11222, + 23825, 49867, 29919, 872, 37559, 4587, 112918, 136034, 64662, 126530, 131167, 14498, 202250, 56243, 23092, 121186, + 102694, 22868, 89387, 64332, 155488, 39744, 156087, 176575, 85881, 39781, 47236, 12249, 149193, 26406, 1951, 155992, + 46233, 184401, 41366, 8036, 65383, 139356, 189733, 213982, 15521, 49577, 159577, 2521, 106982, 58186, 55467, 51702, + 11358, 8682, 17785, 29934, 176786, 109645, 122828, 41281, 25752, 29762, 94798, 25186, 39717, 17547, 52095, 8022, + 34208, 129165, 49581, 119910, 24510, 12967, 247959, 15952, 32464, 81364, 137598, 14637, 77742, 123065, 16222, 61255, + 39371, 43724, 57019, 24082, 72028, 104622, 27929, 89643, 138229, 10055, 122728, 41443, 60688, 136821, 3274, 7812, + 71386, 67606, 3295, 100611, 102834, 3428, 102932, 35148, 132477, 142278, 25402, 33288, 2208, 132017, 146591, 21568, + 11548, 73095, 756, 36537, 63670, 246597, 20653, 141984, 271279, 12711, 23553, 27463, 28351, 23214, 77413, 75614, + 30338, 23444, 235758, 28565, 38620, 46299, 28150, 5788, 32491, 43962, 168549, 29503, 99845, 200267, 70204, 12986, + 143885, 941, 50969, 55284, 152266, 187576, 3532, 57733, 13252, 143761, 54421, 60086, 2825, 16104, 18211, 69263, + 178663, 103869, 8702, 98648, 108097, 15531, 162361, 61008, 1775, 84427, 119944, 23016, 78201, 82106, 24005, 236154, + 14897, 34582, 63819, 36233, 113573, 102759, 41120, 42814, 163346, 29508, 164161, 54185, 62292, 9036, 39296, 6640, + 171129, 26814, 9984, 294489, 22183, 40191, 69406, 90599, 96598, 18066, 85600, 87824, 38733, 247243, 7387, 161131, + 64971, 38204, 92974, 85988, 166776, 29624, 69098, 72876, 117445, 68760, 22363, 52379, 9968, 148571, 26622, 186006, + 146393, 31071, 60120, 13612, 44677, 19448, 68338, 7874, 74867, 64490, 29421, 124401, 20908, 16585, 60838, 7384, + 97328, 50410, 20319, 19156, 12761, 5816, 351477, 2786, 25656, 165861, 173732, 7123, 26785, 149182, 20793, 27143, + 9148, 25949, 18006, 7795, 112344, 192537, 14513, 5682, 26115, 40202, 70306, 48084, 99176, 7153, 108473, 44332, + 110121, 9428, 8180, 27860, 22437, 60581, 205918, 99858, 49547, 116936, 35145, 8901, 6180, 5175, 18803, 16208, + 79320, 28649, 38726, 54774, 5835, 30655, 86440, 25966, 31392, 70939, 20843, 111672, 43328, 94380, 88995, 15321, + 33377, 2984, 16233, 43332, 75870, 19044, 156499, 127031, 119860, 122514, 65060, 17517, 69497, 64192, 44374, 6455, + 80273, 40630, 92629, 46425, 7325, 37171, 79359, 140226, 155219, 32133, 40753, 22059, 43167, 16881, 38630, 50086, + 111173, 44484, 5941, 101815, 28347, 373200, 356553, 74832, 20222, 177340, 108041, 180076, 3763, 27194, 171523, 10928, + 71752, 148205, 39888, 74776, 55041, 6358, 148477, 294704, 47252, 52974, 89175, 4682, 14618, 39392, 58585, 50619, + 28088, 100276, 36110, 31596, 15012, 93646, 4953, 6629, 97805, 46485, 18765, 9249, 194698, 52307, 85352, 33772, + 54636, 209534, 25378, 210, 2036, 151416, 255499, 40976, 41349, 134520, 150432, 11157, 6173, 62536, 56094, 34646, + 96604, 85745, 27243, 6622, 61530, 18737, 8699, 115599, 51859, 41290, 143452, 40201, 43782, 14061, 74656, 29726, + 13361, 136026, 23149, 4742, 24334, 18683, 24925, 85168, 22668, 224320, 16450, 76194, 24792, 35366, 2119, 56976, + 7070, 16021, 24155, 51463, 23441, 3622, 46105, 126702, 16789, 276646, 86393, 21589, 3209, 111511, 12134, 13169, + 36211, 227939, 16170, 16148, 208475, 99712, 10351, 419718, 26242, 79670, 44112, 4569, 32880, 339579, 30648, 97335, + 122181, 70716, 13387, 85134, 182407, 26932, 273051, 12495, 21444, 28811, 92202, 62532, 31060, 48235, 131669, 47086, + 88459, 54224, 9712, 80410, 29929, 35255, 5466, 174314, 77907, 10011, 1541, 31577, 12716, 153148, 263135, 100373, + 10934, 79991, 36589, 35238, 97005, 23398, 171539, 8580, 20599, 128843, 2723, 53858, 31690, 44768, 11242, 19051, + 8403, 12581, 30429, 200097, 58445, 64447, 17535, 77337, 30303, 7870, 87634, 120825, 22700, 347, 32033, 8588, + 10336, 47933, 95540, 18314, 86731, 43480, 122415, 48270, 103115, 61655, 3386, 11664, 337709, 27487, 202769, 59646, + 72556, 173070, 8469, 81115, 46788, 18684, 133404, 96594, 112566, 86812, 64663, 85306, 47684, 65109, 21999, 787, + 67089, 42642, 9467, 104964, 53548, 77932, 89601, 50531, 36874, 103369, 55381, 55386, 56487, 110574, 42321, 104493, + 41387, 87478, 10390, 37145, 125985, 271334, 8940, 28636, 26893, 223188, 56969, 202288, 40071, 34929, 116962, 28213, + 106683, 28231, 42013, 24525, 73712, 66578, 95581, 82535, 18787, 2469, 64097, 192370, 40105, 33082, 137529, 8248, + 72992, 28876, 9399, 7582, 67246, 4830, 5079, 188992, 137276, 8793, 145644, 146116, 50802, 69878, 200577, 15640, + 36790, 214109, 63824, 55941, 70328, 47477, 2167, 52953, 138827, 24284, 37429, 16033, 22466, 117793, 20960, 43422, + 39263, 48912, 81141, 192574, 183295, 1837, 14713, 28579, 29858, 381, 62358, 27575, 50975, 47277, 158226, 20747, + 9322, 21799, 8759, 19551, 57397, 25924, 60257, 23908, 10654, 24152, 18912, 12247, 24364, 134961, 117953, 43806, + 30383, 28739, 36894, 57851, 55799, 12140, 208514, 12059, 41600, 16395, 47450, 48286, 23584, 118890, 25589, 8681, + 127295, 234074, 47071, 125810, 296610, 11331, 18254, 15170, 129078, 16080, 226323, 40895, 143558, 94050, 23705, 131198, + 244131, 60925, 25356, 21260, 86397, 300199, 46792, 88237, 36049, 206902, 15590, 21351, 1085, 93619, 11791, 83320, + 80677, 34168, 26403, 64840, 3820, 15926, 1847, 16734, 108139, 3510, 11982, 209610, 5476, 22002, 108428, 55260, + 34767, 29252, 98069, 88530, 24683, 25427, 54524, 21159, 7758, 58183, 73508, 29449, 13060, 45920, 148846, 105330, + 7239, 2883, 81088, 12697, 131671, 7549, 43047, 4805, 250593, 12157, 34279, 12914, 59556, 76223, 25084, 20506, + 103392, 43609, 100817, 171460, 29810, 37880, 81256, 174784, 4188, 149828, 64134, 59705, 252323, 25997, 44940, 97369, + 39404, 5069, 90268, 85619, 116877, 19634, 56035, 36905, 7651, 33380, 130707, 133829, 43600, 25142, 75703, 40295, + 40338, 60316, 24687, 44342, 13554, 8678, 9961, 1732, 157253, 93469, 29687, 49688, 39196, 21767, 41224, 21529, + 25978, 36956, 66355, 33481, 144387, 50146, 129773, 13311, 61211, 62169, 77703, 101581, 234, 27986, 9318, 13341, + 50104, 58984, 15733, 9924, 6129, 59308, 32036, 85687, 10449, 47782, 8891, 25720, 93777, 35277, 8953, 13811, + 9240, 22192, 6432, 17202, 356378, 52015, 66393, 12508, 274148, 255059, 67115, 30737, 4439, 11480, 231776, 166051, + 72970, 82790, 96236, 26126, 3724, 86291, 14281, 11950, 147770, 105431, 20726, 77543, 78680, 17490, 13496, 21992, + 62570, 4476, 98692, 112842, 115877, 74277, 124883, 83834, 40027, 21132, 19464, 47232, 40547, 89457, 28687, 14573, + 36817, 103723, 300665, 15319, 224392, 26400, 40495, 22877, 64609, 18201, 23154, 72374, 34795, 27583, 78778, 23667, + 165027, 32508, 73622, 56731, 67440, 2495, 103298, 105353, 2477, 41716, 11030, 8686, 37206, 79590, 125885, 13625, + 23431, 4395, 220465, 150736, 50754, 5523, 27215, 232561, 164797, 91433, 63055, 7083, 46018, 251531, 40722, 70383, + 94995, 7924, 77757, 28613, 170982, 867, 21717, 13321, 27051, 21566, 114874, 18681, 7957, 7438, 19655, 84979, + 22767, 101166, 277403, 47583, 3674, 112331, 65307, 4882, 27900, 40861, 34152, 26594, 56419, 29707, 25132, 78891, + 18930, 58166, 23382, 32025, 60701, 65952, 21108, 607, 41302, 44913, 98469, 73043, 2692, 100592, 76874, 140991, + 84749, 15560, 29248, 219368, 339721, 48121, 96609, 79943, 61996, 45630, 28536, 16244, 111094, 26428, 57889, 25111, + 80221, 69552, 27326, 124506, 50129, 75574, 64173, 83505, 1045, 142814, 170324, 19671, 8153, 208336, 12576, 12623, + 62945, 184743, 32415, 73714, 19202, 2698, 136438, 116392, 8250, 15337, 70178, 157991, 37208, 8242, 26035, 58589, + 37418, 6014, 58480, 1274, 32560, 127652, 47847, 148702, 79477, 92504, 29034, 87904, 41106, 61295, 72948, 79082, + 88569, 164147, 34772, 53574, 33963, 129292, 2501, 7461, 36693, 68888, 18880, 65806, 5892, 424331, 17516, 24390, + 30570, 2113, 9490, 25280, 1581, 110856, 24330, 24537, 66471, 15890, 104155, 126634, 49647, 82695, 115436, 114480, + 11922, 54150, 34729, 37512, 160717, 69615, 2014, 40558, 29442, 49537, 9489, 90588, 5643, 197221, 9955, 87575, + 114865, 94728, 2057, 19542, 82962, 71746, 2865, 8021, 95982, 61016, 32535, 150782, 132098, 15408, 30334, 114765, + 22633, 27477, 74001, 34329, 22838, 9812, 99985, 6414, 94726, 41615, 168290, 212638, 54556, 24532, 127124, 32488, + 28566, 631, 37608, 9436, 205039, 166709, 41813, 62681, 162340, 51007, 104187, 135517, 33216, 370029, 46677, 42823, + 16849, 22305, 32170, 4155, 35847, 216420, 4908, 9704, 221339, 1461, 36764, 69322, 94851, 163847, 168141, 1238, + 26533, 64284, 196577, 46554, 71469, 97500, 18030, 75035, 1805, 59036, 59485, 22807, 3804, 35946, 47500, 82026, + 12935, 1196, 59186, 36123, 45483, 48905, 122736, 84743, 71020, 21859, 105891, 19409, 36310, 14933, 324632, 9477, + 65381, 15301, 17544, 116221, 192960, 28345, 33914, 9479, 34240, 10843, 107872, 10760, 35165, 170015, 15849, 66429, + 59773, 117561, 48895, 53810, 1248, 297457, 78131, 22215, 46954, 15473, 66440, 19176, 155332, 67372, 63874, 27562, + 96864, 44731, 33316, 68027, 4246, 61528, 60417, 153158, 388800, 242889, 139912, 30680, 16129, 184234, 14284, 220334, + 57133, 2684, 29537, 163409, 74592, 22341, 14608, 7820, 44807, 52082, 34669, 735, 442014, 5199, 156652, 115585, + 38203, 46928, 26751, 41163, 42574, 23000, 40485, 193202, 80818, 24685, 30063, 46336, 91592, 38350, 16019, 204886, + 26377, 35729, 24114, 14839, 36424, 82137, 58468, 208317, 65760, 107517, 111976, 169534, 977, 88148, 88506, 104279, + 77387, 49741, 55001, 102462, 22628, 16787, 70803, 44152, 147610, 49926, 3305, 34988, 28018, 39850, 1762, 172940, + 30561, 35822, 23734, 154547, 98454, 6287, 35558, 37540, 6969, 16948, 182134, 68275, 119628, 26385, 69050, 27987, + 63648, 91787, 15241, 69688, 18170, 23405, 63549, 172789, 36854, 149944, 199582, 50387, 26601, 61662, 165764, 15740, + 64018, 188861, 204663, 14506, 22027, 34003, 37949, 76827, 37279, 69128, 41728, 50954, 51395, 91070, 77327, 418272, + 152934, 102026, 34299, 2147, 21153, 7074, 4236, 29765, 9430, 213559, 43803, 10595, 58760, 69911, 261653, 87745, + 194742, 224, 344942, 28518, 5330, 188455, 29445, 39380, 55115, 37739, 135330, 40527, 34158, 67980, 2019, 20921, + 28917, 61353, 29277, 143760, 174111, 25315, 233758, 5380, 13171, 38385, 49725, 63589, 122326, 12646, 695, 115120, + 1526, 21427, 111543, 128260, 43896, 37771, 92334, 46393, 66094, 257494, 18659, 47526, 25325, 7287, 24994, 1200, + 4234, 136264, 35864, 150235, 148354, 9687, 28790, 43378, 11450, 185999, 16029, 3010, 275, 54840, 18620, 5465, + 18999, 14941, 133430, 7102, 112191, 67383, 37978, 43984, 44365, 118346, 5294, 4294, 22723, 21609, 95097, 56653, + 68409, 57456, 462, 22817, 8733, 4115, 95791, 28344, 57746, 79153, 1397, 207599, 96565, 211156, 90894, 88357, + 75007, 67110, 7600, 143795, 14196, 17993, 7370, 47401, 108844, 40816, 2129, 578, 29475, 1352, 164155, 115861, + 88599, 265011, 72917, 44900, 177563, 66133, 61076, 81186, 100792, 66415, 27198, 24480, 106156, 32719, 8226, 19302, + 86323, 65704, 91571, 74710, 93726, 40774, 166720, 71206, 14248, 159482, 104866, 13711, 135341, 12882, 22933, 26886, + 194131, 14226, 57391, 10865, 40126, 114370, 59004, 62802, 47099, 37870, 61471, 61712, 14779, 23150, 60956, 17913, + 8272, 31742, 43627, 355564, 39597, 43789, 24868, 17215, 95983, 10850, 171578, 95826, 171398, 43329, 52382, 39205, + 65882, 22816, 30560, 5482, 28052, 257734, 116033, 49489, 10393, 42391, 140158, 74221, 47213, 10652, 133629, 42182, + 40689, 31081, 114221, 24833, 22120, 71238, 95884, 1589, 72212, 29981, 49555, 59882, 106829, 22147, 12985, 19337, + 34964, 98868, 7993, 32641, 109146, 79730, 82886, 12040, 753, 26623, 20550, 6160, 648, 37626, 156500, 74280, + 21660, 112069, 102650, 29846, 30047, 37920, 38707, 22416, 17527, 43165, 20567, 16777, 181025, 163230, 77041, 93275, + 5619, 104536, 14442, 35376, 145357, 711, 91293, 267964, 4085, 61569, 2548, 27538, 216883, 57568, 2256, 60727, + 9359, 183328, 33919, 77609, 186951, 2807, 102609, 77085, 24779, 43042, 7697, 75938, 53931, 15103, 12294, 16402, + 24545, 7447, 558, 128098, 36197, 23062, 46707, 17077, 223296, 66504, 27982, 13141, 62628, 55296, 54207, 95832, + 168101, 45485, 3146, 80848, 4243, 2868, 16953, 33358, 107313, 27188, 53837, 35648, 30321, 5098, 8014, 6864, + 26642, 216648, 15542, 69518, 13049, 164033, 260760, 92714, 51059, 3646, 38963, 53961, 31027, 65718, 8441, 195966, + 107912, 99428, 28516, 18239, 807, 22455, 35712, 38565, 73550, 133659, 118825, 32367, 26551, 30301, 28367, 11762, + 84369, 86004, 37814, 44184, 122422, 57026, 4072, 40865, 165872, 25809, 54060, 35341, 49754, 17581, 103700, 118548, + 74213, 178685, 2053, 49373, 234264, 17223, 35164, 99392, 33151, 130808, 2338, 24598, 52014, 213186, 156444, 16006, + 33100, 87907, 2116, 183683, 87183, 121897, 25550, 4995, 22365, 1221, 128172, 24344, 52500, 5554, 89782, 2016, + 40013, 70221, 66896, 49588, 23302, 76353, 61452, 21765, 5652, 195585, 31863, 21028, 72723, 127694, 101106, 10744, + 21404, 46840, 4864, 158406, 26107, 219205, 22949, 68216, 48434, 46124, 8871, 21467, 587, 110874, 46178, 110709, + 94049, 110687, 194252, 73380, 168493, 40871, 84591, 4279, 96418, 78366, 113568, 80733, 32871, 103415, 9257, 39835, + 70860, 74701, 43788, 318366, 31709, 7528, 5382, 22765, 25128, 2525, 52257, 20911, 443, 56027, 55517, 12403, + 71597, 15617, 196829, 263547, 166253, 113889, 151910, 27229, 15670, 56695, 27118, 39691, 70994, 5859, 9227, 98270, + 26341, 176547, 121365, 69700, 133675, 164631, 110015, 41477, 28280, 6013, 20657, 210461, 75491, 126050, 3380, 248649, + 54756, 7374, 93034, 88191, 47955, 22642, 141231, 44151, 56028, 10051, 42742, 54389, 26182, 179888, 17595, 29573, + 11952, 45860, 19676, 264, 113272, 40733, 10627, 68835, 472197, 159006, 64051, 34066, 64640, 150352, 25206, 68762, + 110753, 23633, 22112, 20042, 17919, 163270, 55949, 19557, 337, 124601, 60155, 130764, 50642, 63188, 8357, 143714, + 22576, 35676, 36362, 24812, 142, 40272, 89438, 24448, 87371, 105259, 16947, 36015, 53391, 136, 23199, 36073, + 93210, 18431, 41934, 10593, 126694, 48847, 17533, 34029, 15285, 65070, 33592, 113537, 19642, 73194, 284546, 137425, + 85667, 129350, 26968, 6511, 14148, 31670, 30205, 11076, 1506, 220046, 7034, 329567, 42092, 19955, 30867, 21935, + 52962, 31820, 69932, 14236, 33243, 12574, 10827, 39690, 11970, 174135, 28153, 146891, 20734, 126712, 21323, 40265, + 135843, 199355, 58115, 49038, 57044, 54396, 16409, 31133, 58880, 32408, 26984, 27298, 13002, 35976, 171154, 61387, + 219134, 82951, 17625, 78088, 22876, 4261, 5497, 9679, 64754, 2492, 263964, 159492, 232519, 20547, 74430, 377501, + 4257, 30374, 461, 33465, 43120, 162710, 65294, 50518, 321573, 5955, 131205, 93895, 24799, 9658, 5858, 13871, + 64526, 19652, 2901, 70075, 124728, 146097, 98082, 100551, 52579, 153823, 113631, 22528, 81472, 25792, 60475, 32767, + 70831, 74749, 72861, 41755, 276848, 7157, 25389, 110028, 17403, 27510, 251623, 58039, 74359, 18091, 50708, 89467, + 23021, 20850, 106672, 3962, 182101, 38811, 122104, 32394, 74597, 3381, 18651, 101115, 23744, 97817, 110293, 37253, + 148267, 36198, 3519, 55601, 12055, 42944, 34271, 133263, 7976, 95413, 91635, 5598, 5349, 58679, 57950, 154564, + 11772, 149366, 22890, 8626, 67540, 3799, 18687, 5012, 167347, 116945, 44732, 33287, 65318, 71035, 71417, 65941, + 18720, 30185, 48199, 202749, 127832, 1587, 3246, 105625, 108238, 117159, 28014, 104782, 22828, 55738, 81738, 11163, + 90233, 17250, 4750, 188510, 9987, 20638, 16031, 14461, 259080, 49305, 190390, 14691, 118185, 22004, 72005, 37574, + 73209, 86700, 86886, 144339, 79270, 53654, 14505, 129204, 13775, 25931, 7131, 15330, 90100, 52622, 10877, 9834, + 137742, 2258, 125407, 10878, 90799, 219424, 85936, 9073, 70569, 42871, 11487, 17205, 15699, 176689, 145652, 13840, + 19961, 3187, 108771, 19907, 9087, 61130, 38770, 130505, 67475, 29769, 18202, 59283, 57872, 91097, 48087, 6535, + 39731, 194914, 82462, 72799, 8810, 20369, 69590, 161138, 134897, 1655, 228453, 124246, 131868, 32125, 24266, 32982, + 146849, 36964, 41316, 2497, 19898, 151088, 186502, 25460, 3334, 5126, 116461, 28025, 2281, 35537, 44808, 12910, + 167061, 87358, 186367, 9640, 91073, 100691, 122545, 45892, 60356, 46145, 149118, 90693, 467742, 151743, 138009, 53160, + 74712, 7571, 311165, 28817, 188, 2377, 69313, 29598, 53179, 50217, 129275, 459470, 137679, 65308, 18477, 51126, + 92769, 135453, 6508, 23061, 29241, 3159, 63876, 36946, 9537, 41270, 4306, 50909, 132595, 108954, 42730, 19664, + 80119, 116744, 47895, 141726, 1041, 20154, 230804, 208216, 88962, 27133, 7095, 34643, 5976, 24608, 31719, 12859, + 40171, 32072, 88409, 11313, 44911, 91010, 24472, 25842, 102859, 76732, 14848, 25181, 139059, 102051, 109383, 112500, + 104558, 41471, 92391, 7314, 98987, 4288, 8166, 373942, 47761, 86881, 41222, 3712, 19272, 32290, 21185, 46273, + 71472, 64765, 57109, 135608, 113864, 48751, 60216, 15724, 175377, 27567, 112796, 18273, 73338, 51592, 245687, 100900, + 10857, 55010, 31720, 50806, 23402, 71732, 1067, 129268, 23758, 39673, 37515, 71954, 1591, 31733, 27822, 35424, + 139864, 46227, 8017, 50399, 5644, 74021, 28076, 85584, 4076, 127136, 37464, 102260, 118760, 24879, 11109, 28407, + 16427, 136669, 88300, 8782, 134198, 49502, 5217, 243528, 18853, 31871, 85719, 9313, 20291, 111347, 8859, 232793, + 100849, 152830, 105869, 31365, 37423, 46174, 5288, 386390, 22038, 11906, 5770, 10139, 36677, 146746, 11499, 81033, + 19071, 79322, 71912, 26542, 195521, 10379, 25001, 31339, 57445, 2750, 6263, 58554, 11069, 15459, 87289, 89651, + 31869, 529, 435067, 57142, 55718, 10724, 15530, 50660, 66882, 102319, 10462, 143371, 148490, 217411, 10252, 44760, + 44, 24999, 38673, 7037, 41296, 7080, 103758, 32647, 75242, 101191, 76455, 28603, 49704, 54470, 60157, 16716, + 166810, 63760, 52521, 49375, 9906, 135040, 4540, 38220, 20943, 103257, 50219, 154213, 122258, 73230, 121482, 29653, + 49112, 43518, 71058, 63793, 105667, 44288, 91681, 25034, 15969, 72202, 23056, 81246, 10344, 93662, 911, 44819, + 6408, 52562, 5953, 16497, 187694, 1131, 63578, 47977, 21399, 61396, 149533, 57971, 72742, 5611, 13207, 221621, + 58970, 79728, 27059, 171306, 14867, 7768, 72956, 52885, 14376, 30921, 9280, 8158, 81889, 33376, 39155, 71120, + 41856, 131166, 105010, 30693, 23225, 58453, 31791, 3690, 15042, 11816, 43900, 117272, 56993, 64225, 105202, 54205, + 16074, 71491, 22908, 25691, 131389, 59527, 11646, 42043, 74276, 34270, 28537, 26163, 72407, 41791, 219412, 115141, + 53875, 38258, 76312, 84827, 383108, 40765, 11202, 132777, 53597, 167648, 158387, 30192, 37363, 40773, 1146, 72250, + 68993, 32806, 59487, 128806, 44854, 6158, 96774, 39870, 122548, 5997, 50579, 45575, 430927, 3282, 94975, 100509, + 198279, 200501, 32872, 18750, 38430, 107646, 104178, 8936, 80409, 100084, 192864, 160858, 12245, 67627, 83578, 90755, + 2981, 54132, 8983, 55539, 628, 33889, 292355, 74386, 2818, 172034, 214107, 19271, 17760, 834, 34352, 169686, + 37437, 145284, 73189, 54617, 238229, 4948, 71657, 94135, 7968, 30905, 23213, 304209, 46892, 61572, 104947, 117623, + 2200, 107472, 204184, 88505, 3730, 10311, 20501, 150712, 34190, 70936, 38672, 37360, 65983, 2571, 19225, 134448, + 37959, 151534, 4637, 20807, 112291, 80673, 5980, 35705, 87850, 61874, 47965, 31875, 70827, 182878, 35126, 164376, + 6522, 43040, 97902, 71998, 20750, 38267, 227637, 76251, 73913, 116387, 6656, 46715, 111621, 13498, 6684, 248541, + 41652, 32337, 11354, 14685, 19587, 16826, 46225, 7338, 2780, 13317, 78374, 44055, 58668, 1088, 55437, 86278, + 25916, 40461, 78119, 116159, 85259, 146157, 14297, 2255, 18200, 39030, 6734, 42798, 168962, 119973, 38038, 22310, + 95781, 125342, 77701, 24562, 42445, 51140, 84077, 79797, 102050, 55904, 96004, 87841, 7776, 193841, 23065, 73430, + 57662, 41418, 52176, 99003, 103313, 45282, 85005, 26027, 47742, 65487, 4547, 20906, 96358, 53110, 36530, 18902, + 41964, 66288, 159127, 54902, 66174, 22317, 184026, 50221, 100396, 167624, 175279, 22979, 29069, 146485, 480, 42086, + 204735, 209840, 27750, 486, 19679, 2874, 101071, 35317, 27978, 40196, 245641, 90054, 7479, 4028, 52060, 61770, + 40264, 884, 39262, 47984, 22534, 2991, 111271, 48232, 61183, 27973, 80376, 56009, 36081, 165143, 131566, 47390, + 107436, 17924, 139245, 18948, 58223, 25723, 60245, 119466, 14680, 77779, 96719, 17119, 23002, 182396, 70393, 3161, + 101311, 78419, 54640, 23621, 24716, 19555, 111633, 409, 124108, 27386, 2245, 258216, 12908, 23526, 10212, 69858, + 5646, 124783, 685, 22255, 31750, 16915, 43000, 28583, 40380, 54912, 117699, 74670, 14215, 28794, 179610, 6571, + 232799, 21298, 4374, 40448, 102772, 82421, 69208, 18356, 14122, 42989, 68207, 134104, 96775, 342439, 22630, 77974, + 61398, 44393, 92954, 93632, 50917, 174764, 71894, 24307, 12096, 8836, 209934, 117370, 149880, 80277, 3249, 24377, + 67833, 1203, 220478, 45163, 598749, 19930, 49756, 19462, 124194, 104517, 87257, 36828, 155871, 228667, 44446, 48887, + 49216, 262088, 45585, 33699, 790, 142940, 1214, 51939, 14677, 49283, 82736, 30811, 65319, 13215, 29155, 8388, + 56917, 46741, 80552, 21318, 7440, 61972, 13877, 113912, 145744, 1135, 64582, 45451, 95701, 94255, 93908, 15993, + 12206, 4906, 63468, 345616, 49405, 22021, 38717, 72500, 129269, 41952, 6995, 118167, 76199, 30271, 54602, 17741, + 45732, 43176, 108447, 54078, 9767, 2976, 31141, 98669, 11058, 3418, 36920, 28316, 9047, 12901, 81784, 44333, + 210885, 31029, 19486, 48518, 11051, 76288, 43203, 81787, 7392, 48679, 35182, 66212, 26307, 52539, 2090, 49456, + 2130, 146238, 10565, 420461, 8132, 25081, 22634, 263972, 147526, 30634, 102378, 79556, 20802, 111069, 161896, 208679, + 232698, 76981, 47548, 35236, 115660, 54019, 196404, 365534, 50060, 190162, 23999, 64445, 59789, 24002, 10716, 4605, + 57344, 32663, 9089, 10394, 107623, 22597, 164592, 49576, 80343, 5393, 4211, 9135, 112676, 3507, 56842, 26209, + 23291, 13451, 13815, 72724, 9866, 28546, 4585, 11238, 160971, 144491, 17045, 4629, 34032, 57499, 26471, 40064, + 118291, 51528, 233736, 42900, 15719, 86514, 25147, 31587, 59730, 27331, 34546, 93487, 164911, 55981, 1017, 52398, + 28975, 111773, 14576, 7163, 2659, 23748, 94127, 7591, 58046, 118031, 293274, 2302, 78480, 108856, 26363, 33313, + 21854, 78915, 9334, 18530, 66209, 195667, 8136, 17789, 53863, 87424, 64346, 25683, 109085, 21080, 41596, 10885, + 116045, 55411, 15616, 32700, 29021, 102829, 16981, 4307, 46481, 113835, 166601, 106369, 107552, 19257, 124248, 5139, + 22130, 56921, 41150, 3696, 18764, 5694, 250839, 81058, 70254, 74576, 7048, 33695, 31272, 9587, 51251, 12329, + 11300, 219240, 77500, 229429, 52460, 62159, 41229, 11359, 43933, 8689, 31439, 145750, 84738, 78427, 136685, 118384, + 20721, 79633, 31973, 11098, 53869, 52988, 3196, 72237, 190840, 185112, 79139, 28639, 79673, 97047, 9398, 15414, + 105300, 137313, 85877, 40993, 7561, 3929, 57151, 108868, 321817, 76740, 180609, 35166, 66924, 90464, 82436, 5838, + 13940, 45290, 67026, 19641, 33281, 26857, 36996, 98291, 101046, 54893, 50303, 114759, 183181, 305580, 14099, 33514, + 32176, 17094, 121586, 67669, 71878, 42975, 11177, 90273, 5678, 12886, 12867, 27433, 62924, 67753, 13445, 21183, + 79535, 24720, 155338, 122583, 56646, 51283, 120295, 43762, 40303, 86530, 14604, 190243, 19559, 88421, 25961, 150460, + 27703, 51292, 26294, 109748, 93042, 39631, 12236, 40405, 73368, 57016, 74750, 36429, 42187, 74845, 176283, 98118, + 3670, 19744, 24913, 39170, 37376, 11919, 150215, 7637, 46440, 61291, 85017, 262212, 7023, 214701, 15499, 13794, + 36816, 64790, 2778, 127954, 109628, 14013, 60615, 35098, 20680, 32322, 6631, 27129, 5940, 3801, 64451, 3605, + 276440, 16166, 42526, 46620, 275233, 3268, 98778, 42589, 170848, 13985, 52719, 40320, 112093, 58444, 41626, 122502, + 136996, 26123, 73872, 45093, 14984, 31647, 91921, 180982, 20226, 196060, 19058, 85542, 111441, 139969, 139431, 39188, + 77693, 106755, 134828, 38509, 112960, 28035, 81045, 32920, 48577, 41211, 1169, 51509, 39370, 57152, 7562, 65585, + 55707, 16633, 123632, 42194, 50314, 21232, 7589, 10730, 123404, 177035, 1102, 100263, 26570, 53756, 25965, 84962, + 54281, 98616, 1761, 302765, 61372, 4231, 11004, 6580, 123391, 152361, 53908, 15894, 4857, 138528, 185271, 79781, + 35928, 183068, 199461, 71306, 246740, 25571, 212575, 29887, 132507, 135536, 39243, 171966, 106646, 38340, 64208, 46548, + 44241, 12489, 59841, 52248, 7196, 49745, 18871, 214620, 92258, 92083, 128377, 10678, 9825, 273, 124186, 8170, + 37688, 47121, 81217, 46232, 51197, 22560, 24038, 76445, 303141, 269387, 168244, 7423, 5590, 148765, 8018, 44136, + 15476, 24093, 81595, 151484, 132986, 88790, 24759, 249112, 10955, 113624, 1490, 61893, 85782, 267543, 81516, 155220, + 34486, 85773, 20198, 5827, 26696, 182958, 47877, 133459, 7991, 47474, 35921, 5055, 28813, 58299, 36457, 2282, + 18858, 199851, 25971, 38032, 39546, 9892, 80469, 94210, 11703, 7530, 262048, 119116, 11365, 73348, 20636, 76662, + 38185, 147830, 33090, 12842, 5716, 136557, 29208, 2738, 93555, 29435, 37866, 29232, 11925, 18290, 200392, 9150, + 40121, 28353, 66557, 42934, 60002, 7949, 165121, 44607, 6985, 46265, 88952, 117927, 10976, 127994, 45713, 196202, + 171384, 16237, 17072, 82678, 3887, 53943, 11497, 25289, 43394, 194558, 17520, 13713, 13337, 10319, 13519, 27977, + 56346, 1512, 69563, 16655, 6381, 207119, 32722, 52407, 252336, 7634, 295451, 137340, 39573, 12588, 51167, 91362, + 75042, 12378, 44599, 44996, 53711, 62621, 59425, 154573, 1028, 6863, 70608, 89277, 31781, 37585, 33115, 45438, + 44295, 19441, 13395, 85115, 95925, 51396, 98398, 31173, 131713, 76310, 2356, 18575, 7214, 48396, 22367, 234325, + 68125, 225602, 25129, 98668, 43571, 24981, 27341, 97198, 70954, 116350, 138790, 6397, 213234, 6496, 199647, 33078, + 1628, 16810, 38131, 5432, 8760, 45379, 187797, 54575, 13391, 66830, 185263, 43959, 65485, 1291, 64470, 193537, + 17320, 32341, 19589, 46922, 56038, 16790, 33933, 93128, 1710, 22162, 60010, 166501, 25248, 110735, 133168, 19601, + 60803, 25674, 33996, 3691, 57399, 83385, 754, 371286, 19839, 26834, 28124, 75623, 3870, 66122, 97406, 47346, + 326, 7492, 166, 56406, 281556, 73496, 63104, 70066, 50945, 17847, 47599, 57275, 72400, 26598, 25290, 133091, + 21460, 6170, 26197, 90337, 316244, 23380, 15810, 44542, 75619, 115807, 61180, 160228, 56246, 187764, 113793, 119939, + 98775, 14502, 129669, 102901, 481, 18772, 33749, 181939, 13513, 80163, 57426, 95677, 55765, 123256, 156375, 131050, + 50427, 85036, 176699, 53355, 7388, 116459, 9415, 65345, 123519, 14467, 53003, 35892, 32929, 29108, 141336, 7280, + 57908, 121975, 16321, 66321, 27124, 17980, 13278, 64623, 61316, 27319, 33770, 106870, 101097, 65509, 32584, 7662, + 57467, 42561, 35131, 72066, 30228, 57341, 330807, 12061, 48792, 30042, 47357, 5873, 10788, 40943, 43598, 71460, + 20402, 3772, 196771, 8512, 37318, 98225, 52470, 20068, 50526, 32163, 6724, 21, 155730, 109241, 153690, 41562, + 34456, 4040, 322, 2322, 158065, 82495, 155777, 84593, 145305, 52472, 4167, 12757, 17832, 20800, 6523, 97949, + 43733, 5158, 13272, 52760, 64052, 64053, 5758, 88849, 4506, 28625, 252038, 68417, 35072, 144810, 45051, 20202, + 205355, 88775, 28776, 58617, 177028, 20250, 77055, 25756, 184004, 13595, 134521, 29706, 187081, 41138, 39350, 69874, + 68616, 45852, 200508, 35308, 35938, 187989, 19158, 97, 83703, 30783, 33740, 8875, 10367, 31971, 9154, 23524, + 36892, 35352, 34135, 16609, 93186, 10325, 61501, 123818, 260137, 38611, 132695, 10376, 51934, 40069, 47671, 13891, + 89970, 27882, 128001, 251194, 4985, 3138, 130144, 157048, 23445, 50690, 120275, 98986, 26755, 15060, 110524, 20070, + 21558, 21044, 241916, 81443, 7948, 110867, 98839, 117178, 78741, 134771, 8767, 9438, 23376, 10179, 78727, 73418, + 124405, 70709, 73897, 174550, 85594, 44683, 44048, 151892, 68596, 110549, 32110, 143310, 57536, 6531, 94425, 3086, + 49351, 16758, 85882, 1621, 12619, 28857, 47203, 30663, 56593, 71986, 13128, 49100, 132668, 101411, 14597, 17845, + 99924, 49653, 254641, 68928, 109728, 11860, 46362, 38227, 71844, 106860, 76428, 22535, 71935, 47939, 100970, 214387, + 196018, 26420, 64000, 108949, 65464, 85292, 57337, 107755, 17617, 127677, 45676, 32224, 14987, 15885, 23365, 73603, + 89701, 45904, 16644, 214947, 143773, 45929, 4638, 50137, 33997, 6717, 227758, 15712, 8687, 13499, 81744, 240749, + 22701, 43383, 31416, 14599, 33258, 22158, 46890, 13056, 2914, 36843, 11727, 56468, 4436, 2709, 77185, 66068, + 72108, 79794, 13174, 6017, 85610, 25092, 117935, 33783, 9671, 20404, 190908, 59000, 49067, 32677, 203049, 32652, + 2327, 64872, 122696, 32308, 82988, 180624, 3995, 109656, 1361, 89288, 58639, 34932, 1672, 23129, 114033, 56989, + 44033, 18540, 122789, 23587, 35777, 124932, 162078, 37243, 30269, 38337, 87579, 108472, 24057, 7318, 56171, 14522, + 10605, 36398, 2675, 68455, 82236, 229893, 28011, 180822, 616, 32685, 24540, 51512, 15793, 167325, 12485, 108625, + 13919, 125674, 46942, 46777, 47249, 192801, 19663, 7112, 112086, 41046, 136949, 15147, 116698, 102811, 103661, 220539, + 9484, 36775, 262931, 43580, 80207, 119020, 20707, 35285, 90574, 74644, 25380, 59691, 33304, 112273, 100827, 37498, + 68624, 28392, 22953, 10192, 93127, 6246, 78709, 81865, 56101, 3539, 141381, 87190, 42210, 15605, 33843, 301219, + 160929, 15074, 16495, 322258, 4827, 113897, 61108, 7004, 38995, 49556, 40164, 186048, 38095, 7638, 85669, 24310, + 4701, 41162, 181937, 6723, 125119, 103514, 13105, 42284, 5027, 699, 66990, 9452, 33680, 16754, 14243, 12215, + 9236, 9693, 21846, 11245, 231236, 26285, 119411, 107569, 14135, 13418, 13188, 12438, 87837, 154259, 13184, 45250, + 23318, 104289, 32255, 140998, 70261, 10596, 72378, 28503, 78893, 204776, 26664, 107431, 30151, 53770, 139833, 72090, + 81482, 31259, 9478, 50013, 14332, 4473, 8040, 96916, 1084, 3459, 15193, 216567, 74256, 68159, 8514, 64488, + 27532, 4063, 51181, 118473, 46644, 135301, 17479, 19499, 79185, 32968, 60750, 136481, 3148, 433, 1609, 42629, + 62615, 19220, 27930, 149236, 6981, 35309, 3835, 52812, 55672, 37988, 64541, 83189, 31668, 21678, 96359, 51037, + 36825, 16611, 55113, 22398, 1849, 16045, 71184, 149329, 14536, 231005, 89158, 27040, 37446, 89933, 98572, 9183, + 14738, 180870, 66669, 119194, 23779, 41333, 133198, 165958, 43058, 12753, 4695, 25960, 162749, 13009, 104735, 168462, + 72199, 56087, 30780, 17289, 1168, 63376, 32319, 148939, 21346, 183499, 57766, 111111, 5592, 163921, 51902, 6385, + 85932, 38348, 98269, 48430, 85302, 6828, 144609, 62182, 9668, 31728, 14877, 74838, 18351, 19716, 44331, 8938, + 57135, 111212, 35796, 196509, 150360, 14184, 81850, 64400, 2080, 43786, 89260, 297313, 228015, 4259, 2273, 106355, + 77953, 79956, 258556, 39080, 34670, 38042, 9041, 17580, 119302, 94867, 12312, 65168, 151751, 14199, 84612, 41781, + 31009, 41498, 2644, 8135, 106725, 11873, 65756, 50677, 84265, 100278, 19103, 25944, 45214, 71109, 29333, 199959, + 59289, 28006, 82735, 73888, 74178, 9357, 31611, 55567, 113071, 91025, 22225, 16280, 263206, 143472, 82038, 1016, + 111009, 3454, 111282, 6624, 4021, 148481, 82606, 3337, 44449, 87085, 117440, 117450, 188119, 96922, 66328, 69088, + 192629, 7164, 177681, 13842, 123592, 13678, 3709, 1404, 392506, 88363, 34011, 3111, 292847, 35632, 135945, 165656, + 114487, 28103, 36070, 105813, 12831, 13767, 283706, 20001, 205858, 222376, 26231, 74767, 9852, 14707, 55251, 21778, + 20780, 81817, 61071, 27574, 79648, 73651, 59592, 26903, 75767, 1269, 47843, 22121, 357014, 1880, 118720, 33888, + 97721, 25074, 162232, 165250, 18166, 101436, 126490, 14600, 21005, 87059, 98216, 138975, 67950, 11357, 183453, 60893, + 33226, 31203, 30167, 34258, 204009, 158681, 64517, 2772, 16536, 8063, 37505, 72518, 2987, 13469, 1060, 70404, + 11335, 58467, 606, 80478, 66717, 28334, 72244, 111835, 144910, 7759, 9382, 16929, 47950, 24768, 18442, 3812, + 17262, 78772, 57860, 139250, 55060, 192875, 157943, 1432, 15063, 5317, 91497, 24602, 2934, 71481, 112610, 7454, + 51038, 67723, 15783, 27576, 26775, 96465, 16801, 41801, 178497, 1952, 4258, 35229, 66008, 12359, 80274, 39191, + 3427, 79381, 23113, 37373, 49433, 369, 197495, 44949, 128592, 41764, 85267, 26089, 20036, 84751, 45080, 183648, + 157553, 6777, 354790, 8756, 63861, 32306, 73479, 133947, 3089, 129130, 28938, 68599, 23015, 83388, 4921, 46304, + 51210, 118961, 162098, 13808, 28696, 56249, 178796, 65420, 35933, 130109, 17053, 154383, 49942, 34368, 45185, 215004, + 71135, 72056, 58502, 92030, 231518, 3061, 31419, 84551, 99620, 59670, 13439, 24874, 17248, 49981, 120, 101486, + 21835, 11954, 21387, 80202, 176536, 69099, 25576, 83929, 20676, 54972, 21235, 121229, 16944, 207747, 17151, 62192, + 38700, 14914, 67294, 97250, 165831, 65091, 88309, 65140, 233253, 78072, 119737, 2388, 295625, 75403, 48047, 5295, + 36723, 97958, 31105, 11399, 13023, 94081, 105941, 87375, 52745, 78093, 142695, 58118, 10907, 36936, 47744, 98453, + 19423, 63195, 135090, 155076, 67972, 116671, 181141, 76674, 125868, 7497, 7282, 18173, 95473, 37926, 40931, 83084, + 82875, 9577, 10786, 82123, 1183, 13043, 25845, 7930, 108455, 108021, 144839, 298080, 52685, 25893, 74303, 83892, + 33417, 167899, 3363, 124169, 64095, 3491, 11574, 30740, 21433, 19985, 31353, 13355, 52438, 74347, 30435, 24346, + 33191, 43579, 92911, 14343, 127809, 55919, 202307, 87794, 44505, 85592, 45, 37946, 211823, 75477, 10315, 11020, + 7810, 8418, 14824, 32246, 2752, 43445, 78058, 51648, 95548, 28128, 139532, 78431, 162031, 54035, 118501, 105304, + 7185, 33563, 841, 39346, 91196, 67423, 12694, 225158, 47563, 63637, 48178, 53247, 61721, 57205, 32651, 77002, + 33359, 256934, 6389, 100897, 17897, 60470, 2058, 38569, 23682, 58623, 3141, 225129, 42806, 36558, 74186, 65297, + 178101, 65934, 727, 11385, 22874, 28253, 252846, 104582, 49707, 66565, 52743, 129552, 199897, 4526, 6020, 48400, + 75419, 48628, 13529, 146713, 82456, 3396, 19131, 77933, 8618, 26055, 136494, 270517, 11171, 161242, 27374, 10081, + 79712, 4483, 132351, 34334, 116847, 98238, 115123, 62251, 18783, 13247, 236033, 17204, 44190, 86807, 31546, 3315, + 46564, 61125, 188041, 5405, 5346, 199954, 2749, 50374, 59046, 179633, 124580, 35456, 95032, 68263, 16037, 104183, + 7893, 68877, 23745, 4536, 14916, 44942, 88134, 154142, 3214, 4074, 126680, 444, 26800, 13417, 122265, 141794, + 24172, 9594, 4737, 198047, 121453, 10533, 56408, 70714, 34362, 74346, 24830, 15909, 10250, 226352, 86319, 65102, + 17408, 113698, 65723, 192701, 18888, 29804, 92420, 119089, 4840, 52158, 6809, 47211, 68696, 127333, 9079, 122944, + 22831, 10761, 65707, 48241, 13162, 83275, 41056, 127568, 36005, 52744, 10097, 2969, 32512, 44770, 40452, 180033, + 14800, 160155, 1451, 16499, 62357, 50678, 47022, 118784, 41723, 26067, 60563, 169681, 45046, 58619, 46085, 32423, + 27247, 64149, 46183, 1743, 37486, 3802, 17194, 24647, 5372, 19756, 118649, 4609, 2564, 83610, 112562, 46541, + 34261, 17420, 81688, 66264, 298500, 194042, 239563, 21230, 66509, 23479, 154376, 33179, 133920, 158984, 47967, 146524, + 89138, 72960, 127604, 25358, 31305, 13512, 95336, 27420, 117697, 9777, 159295, 2091, 19097, 4938, 15518, 81702, + 108304, 448058, 126466, 7371, 81386, 239535, 1607, 211558, 84106, 53806, 374, 125065, 23802, 113479, 52151, 46803, + 46411, 21764, 51063, 22432, 63304, 31295, 6642, 26992, 946, 58917, 74562, 10672, 51417, 1090, 157283, 6175, + 41777, 30873, 49434, 31205, 41041, 70979, 28851, 11292, 19501, 20377, 16444, 125033, 115864, 49848, 35298, 117499, + 60743, 35553, 7657, 117188, 294354, 1111, 19913, 107692, 37955, 25543, 107874, 180298, 94165, 181537, 43123, 3058, + 18116, 7886, 11895, 8962, 1156, 81655, 22350, 1372, 17937, 241608, 114745, 32122, 20645, 31944, 32077, 3682, + 57114, 6050, 45126, 4535, 90804, 42423, 18205, 88298, 58042, 4218, 130816, 58944, 48643, 192539, 3516, 129850, + 92288, 67482, 5645, 6008, 9861, 19038, 25523, 48393, 2557, 163977, 7711, 69978, 88546, 98350, 106098, 5102, + 61333, 26201, 45643, 21353, 101008, 50380, 9018, 90864, 12853, 151908, 45670, 48604, 5148, 32715, 39819, 1925, + 87856, 45697, 2007, 66226, 4152, 9542, 6588, 36053, 144503, 58516, 41489, 32328, 73841, 5629, 33075, 228550, + 18718, 1278, 118540, 138339, 9789, 1171, 99245, 73118, 147565, 168255, 51267, 5701, 10846, 18000, 6368, 25481, + 42085, 30996, 30750, 21874, 12789, 33285, 67958, 162991, 30223, 14020, 26867, 21295, 8993, 66175, 79515, 148442, + 56352, 397258, 104422, 69338, 67203, 81, 103746, 192218, 2448, 103541, 4268, 26498, 28215, 26795, 119536, 166999, + 6052, 116957, 18923, 77982, 23540, 54054, 31372, 18430, 126319, 39890, 1756, 5893, 75933, 40146, 271195, 76657, + 36689, 21496, 89710, 61547, 80235, 15197, 81995, 39003, 23231, 6321, 26632, 19783, 23561, 14948, 63802, 56505, + 21383, 38408, 16252, 72824, 38800, 196129, 74267, 8127, 77548, 48102, 87833, 36171, 102798, 47900, 15495, 173684, + 21234, 30001, 52403, 46069, 31468, 16788, 57124, 45489, 158608, 82654, 59957, 10410, 46188, 171714, 31155, 59538, + 63960, 149773, 98372, 22924, 191679, 82803, 49057, 16752, 8051, 157358, 24903, 23142, 67014, 23023, 30666, 6199, + 11185, 61761, 108153, 49193, 170078, 64795, 286604, 157605, 42186, 22327, 189946, 54197, 28827, 199449, 44686, 59637, + 34777, 152898, 16645, 83206, 41930, 102775, 24648, 36524, 212079, 95709, 1548, 100504, 12421, 68448, 31268, 474576, + 34750, 218355, 92781, 134981, 24111, 67882, 54747, 174240, 110344, 94961, 114973, 72370, 73918, 15923, 15159, 7647, + 45171, 65568, 43530, 34464, 70826, 34474, 60365, 197139, 141949, 210472, 46891, 121996, 40063, 94408, 12981, 134679, + 23979, 51874, 24714, 19995, 24254, 36223, 157770, 7326, 37309, 94133, 22106, 78205, 26724, 2537, 25382, 106656, + 27179, 66311, 110415, 22901, 24718, 57112, 55550, 53766, 83648, 13063, 9413, 49639, 54938, 59353, 51453, 13425, + 14591, 124743, 17157, 12381, 17425, 61233, 6845, 73147, 29525, 129664, 27744, 5277, 102675, 54090, 98234, 13352, + 48975, 17754, 16594, 102750, 48654, 39678, 70262, 34873, 12316, 8508, 46395, 134713, 8929, 37841, 2872, 44089, + 60640, 45964, 41710, 35853, 41709, 32561, 10847, 30533, 50168, 131914, 35502, 7179, 63264, 101004, 13098, 13461, + 89812, 195792, 68289, 50907, 50716, 618, 42226, 56670, 48632, 3817, 23883, 6968, 38755, 21629, 7154, 3872, + 138583, 91142, 62213, 50389, 160123, 68183, 5493, 38, 55579, 58839, 15930, 157836, 71829, 40468, 28030, 16196, + 24230, 33693, 43412, 1608, 233277, 6049, 16708, 17549, 46322, 16760, 153973, 62078, 39650, 117125, 26112, 36797, + 166388, 166224, 101342, 69645, 34718, 6450, 41096, 88450, 24108, 54215, 7626, 56703, 98252, 7498, 5996, 104382, + 7031, 18513, 1464, 20355, 106695, 12121, 15785, 39445, 62498, 113, 143048, 42438, 18258, 69031, 20970, 28430, + 35062, 185809, 306428, 189608, 217827, 3480, 29084, 79447, 78731, 32985, 65014, 127203, 34824, 4550, 257781, 321144, + 33354, 3217, 218324, 88938, 19520, 11596, 43514, 141333, 60852, 65248, 6297, 11095, 2432, 14119, 13748, 7469, + 60224, 12819, 57422, 64540, 8587, 52981, 73978, 23601, 2836, 30776, 142181, 19639, 62955, 10337, 595, 211745, + 702, 140990, 56342, 19255, 20227, 135292, 29499, 348456, 42285, 59021, 23153, 398, 40560, 12943, 20085, 118311, + 95592, 31441, 30421, 18709, 62486, 96516, 37157, 85251, 11094, 115980, 17285, 128523, 53035, 4890, 10141, 87821, + 143291, 78302, 59857, 33639, 18842, 69975, 11769, 10009, 46177, 505118, 54757, 192067, 77994, 12960, 50591, 24130, + 1770, 3662, 133316, 209915, 9657, 48891, 39791, 15630, 107422, 16844, 81827, 8655, 172915, 60826, 36740, 246774, + 32655, 23684, 174097, 93563, 128716, 9168, 16463, 138739, 25886, 3373, 1869, 35842, 25164, 41241, 1126, 17486, + 156740, 84370, 68201, 98020, 119928, 18998, 6457, 120651, 165875, 3291, 58021, 5480, 85817, 5424, 7918, 43877, + 11007, 125299, 24181, 67060, 89110, 233405, 35096, 471, 33956, 19798, 152016, 168120, 12652, 156405, 133111, 116254, + 65156, 42113, 9958, 120153, 180266, 111540, 269252, 123200, 8494, 34196, 8578, 82607, 36889, 12572, 69053, 33057, + 19958, 67796, 22249, 3640, 20955, 15759, 9894, 33572, 96, 8969, 80871, 33260, 1264, 67165, 9108, 87451, + 118288, 65977, 59131, 17708, 135769, 66193, 98679, 61993, 44754, 23134, 122439, 76614, 86671, 113511, 7544, 55848, + 5632, 21119, 183800, 40580, 19026, 82001, 99485, 55491, 168220, 108940, 236173, 16220, 289120, 163513, 75470, 32902, + 33569, 88755, 3113, 96883, 93821, 299927, 114933, 5574, 66144, 80618, 43070, 125428, 70635, 119887, 3673, 55958, + 7687, 101495, 5831, 97794, 5642, 23968, 7105, 10753, 2714, 39381, 104590, 82035, 55445, 30394, 30985, 36078, + 56636, 42720, 49190, 134117, 71545, 2558, 567, 53812, 184182, 144, 41897, 149050, 93133, 9426, 48576, 19715, + 7332, 68965, 74846, 31701, 37389, 27344, 9014, 157686, 12643, 17067, 29166, 15457, 52315, 133218, 97956, 26560, + 22729, 19455, 100687, 66306, 11014, 31241, 157229, 1036, 158742, 1911, 18856, 8404, 17050, 49637, 105257, 26263, + 152889, 33300, 24194, 15449, 50178, 57835, 89431, 93344, 34601, 52068, 6811, 39076, 41945, 41909, 38286, 221235, + 52136, 190373, 62204, 7398, 9948, 20577, 17514, 97470, 26914, 13538, 27727, 115631, 63548, 60637, 26694, 36204, + 95721, 237466, 17635, 8311, 115951, 155407, 372775, 119613, 40759, 7149, 153893, 29766, 8960, 16679, 2573, 75649, + 158258, 17939, 56612, 138333, 38938, 4371, 39651, 46863, 49232, 4029, 76050, 62734, 48325, 4870, 8916, 70189, + 42234, 482, 51761, 85517, 81523, 299332, 91771, 91710, 253019, 32687, 95521, 66713, 66128, 29849, 16707, 26650, + 40978, 43719, 4139, 42193, 20048, 230595, 55007, 13024, 238048, 4557, 73590, 107873, 38760, 29575, 21439, 27927, + 62928, 129083, 22364, 3858, 122560, 50006, 32925, 40998, 118532, 6970, 5412, 10487, 43687, 53543, 72899, 67285, + 137472, 16857, 46509, 32903, 163689, 209351, 336887, 206325, 26680, 147850, 10276, 166671, 9878, 111570, 4130, 56293, + 17448, 129038, 49791, 3323, 51035, 77462, 50111, 20907, 16211, 5982, 24162, 56278, 60834, 28521, 209493, 95578, + 36749, 39356, 52739, 208999, 29178, 58645, 19127, 12057, 14241, 39982, 78123, 80813, 59868, 6719, 15571, 19762, + 150086, 20349, 278029, 2666, 2305, 133772, 121203, 3570, 26477, 55785, 32824, 6151, 42422, 310941, 94923, 18190, + 34342, 18288, 70657, 25029, 165341, 34909, 320097, 33308, 83279, 75404, 45112, 17301, 33894, 4577, 18669, 9219, + 14257, 107253, 512, 36710, 29928, 16653, 7279, 43478, 12743, 3862, 3899, 37090, 13957, 212990, 2895, 53197, + 125571, 40119, 7450, 95395, 89134, 18938, 54751, 39220, 66712, 44978, 98747, 35334, 10952, 9840, 217378, 35750, + 16507, 209521, 52915, 36820, 147839, 56525, 13285, 1981, 30146, 51236, 7364, 104143, 7249, 222095, 6399, 165008, + 16565, 13029, 27969, 105952, 45399, 41770, 169129, 53714, 39874, 7853, 68938, 61382, 114565, 59737, 77275, 29135, + 215780, 6342, 153711, 36717, 31990, 50742, 2834, 52961, 230881, 31804, 87897, 73954, 171477, 9339, 91416, 34110, + 102, 54930, 1613, 11876, 196546, 591, 20460, 50406, 44538, 17056, 34267, 74596, 10880, 9193, 96244, 29689, + 84948, 20424, 26253, 45632, 281705, 119042, 185961, 44497, 86651, 192207, 11795, 114742, 10617, 265712, 62568, 92499, + 31395, 217897, 187475, 50383, 2342, 23312, 42405, 26096, 453658, 416, 133446, 93547, 43569, 10623, 121978, 21757, + 60561, 74826, 42853, 104403, 132901, 72040, 67234, 59013, 21845, 102518, 153642, 11146, 17727, 102632, 86313, 72775, + 58556, 407048, 21027, 6844, 258242, 89512, 3527, 107940, 22262, 23956, 4850, 30270, 58728, 16998, 117875, 20689, + 4008, 14646, 93000, 1170, 77997, 167859, 54531, 246835, 11806, 26637, 167713, 64014, 37431, 88103, 62362, 304585, + 30599, 44120, 31653, 42563, 81375, 43712, 1534, 49941, 109137, 145866, 103885, 146125, 185787, 22202, 33673, 3681, + 114085, 22277, 2072, 10477, 217292, 47874, 25630, 61064, 97453, 94351, 180344, 34224, 169085, 68184, 1881, 53451, + 30593, 100128, 39311, 3509, 60212, 90102, 7297, 23473, 11544, 261844, 8370, 27945, 102056, 100221, 39918, 115112, + 65580, 27264, 10582, 66982, 2384, 29292, 51255, 8837, 91655, 60384, 127046, 34788, 4855, 82574, 50338, 58959, + 95725, 135206, 28047, 308666, 7295, 41193, 95306, 61581, 157994, 185542, 36976, 28033, 16228, 38830, 10448, 15213, + 20669, 49883, 2493, 9951, 53276, 25223, 13971, 17396, 141590, 48264, 32377, 28668, 105246, 22993, 20557, 139621, + 17334, 37996, 19658, 20744, 25440, 34122, 116482, 22910, 76067, 36796, 67819, 11102, 17967, 9536, 55606, 117501, + 39321, 17563, 65478, 3571, 38911, 73504, 54583, 10444, 11362, 66927, 89439, 14702, 28559, 7682, 1513, 70102, + 63807, 20057, 46270, 21001, 21627, 256959, 184942, 165920, 26468, 36183, 112227, 4448, 85816, 65617, 77464, 8502, + 40120, 56527, 77638, 3102, 1025, 32749, 21811, 153766, 15234, 14725, 328941, 53852, 58319, 90203, 39231, 68729, + 69516, 3450, 70578, 52687, 66512, 8474, 90859, 4090, 124548, 66064, 39981, 28642, 75845, 40634, 22064, 107475, + 78873, 112238, 18036, 60258, 22137, 89278, 35142, 92257, 46681, 60200, 168934, 5658, 51242, 3324, 71459, 64772, + 85683, 32, 475, 18068, 32909, 134334, 1380, 31213, 76747, 9749, 4201, 23438, 35555, 83518, 23878, 30619, + 43396, 224662, 33147, 28171, 101465, 60063, 34256, 71664, 1765, 160809, 93427, 22239, 73094, 35152, 17172, 38091, + 1077, 83764, 93545, 57605, 2962, 9185, 13119, 81080, 39028, 85967, 49835, 9330, 66777, 27605, 184827, 77443, + 57831, 49230, 57654, 174766, 42048, 200323, 166050, 27249, 15828, 76305, 4194, 44524, 13962, 51363, 67697, 14701, + 36041, 69963, 72263, 4470, 63657, 47301, 159567, 22495, 52412, 69094, 411, 3653, 5242, 4607, 116518, 99099, + 58846, 58552, 89470, 53229, 2141, 273007, 27018, 137017, 5506, 4365, 159178, 120422, 219012, 127162, 69641, 26407, + 134451, 42927, 9321, 12622, 3936, 145843, 24141, 64837, 182230, 170529, 104842, 135255, 17558, 17348, 53327, 12872, + 17153, 10391, 30, 74060, 152237, 14185, 205380, 31566, 22621, 30796, 108736, 26638, 49377, 673, 90154, 51194, + 170216, 63571, 45050, 32131, 35257, 17360, 55199, 20393, 68233, 118377, 63209, 4669, 65374, 65781, 84411, 92579, + 6510, 108160, 56952, 192831, 11126, 55557, 110245, 73583, 212151, 16429, 369879, 20590, 7184, 24497, 41359, 86934, + 2480, 131718, 116501, 37833, 22517, 9731, 10757, 78661, 3437, 19565, 6471, 13896, 33073, 74906, 286, 53375, + 30156, 29608, 44034, 13989, 16557, 36336, 11786, 9016, 3768, 30883, 52210, 33318, 55067, 37827, 143891, 319150, + 86829, 93201, 25144, 48395, 91000, 56891, 94800, 60129, 12350, 105311, 185, 78672, 148650, 2793, 13582, 54814, + 66017, 22898, 190292, 69218, 79424, 1785, 10144, 90001, 70885, 6226, 65791, 123504, 49066, 19757, 151381, 633, + 28250, 21135, 4879, 91112, 21369, 79333, 54846, 138302, 51213, 7684, 96590, 49597, 34533, 30172, 13591, 193897, + 11510, 1702, 73010, 42908, 3258, 13889, 151689, 47726, 18176, 159526, 179101, 31058, 18465, 26752, 188829, 15744, + 84413, 58916, 38458, 15452, 6315, 50155, 89783, 5933, 36411, 8754, 11471, 15409, 163765, 12966, 2808, 36697, + 4346, 161209, 70415, 56116, 356, 211098, 14356, 36846, 107618, 53672, 48457, 30017, 598, 176513, 81011, 18982, + 13727, 86974, 48130, 58055, 285026, 119331, 69388, 22734, 162695, 127129, 6676, 10209, 8749, 19120, 28354, 35996, + 14583, 2294, 21774, 7098, 7132, 30506, 68020, 26105, 63521, 287047, 31652, 29144, 184253, 45019, 59739, 54276, + 32378, 4681, 28618, 155387, 25991, 60268, 28085, 115514, 5604, 89273, 73475, 54698, 30961, 26093, 14648, 29343, + 53675, 45157, 6691, 46637, 4874, 13617, 45597, 95292, 84693, 71181, 195063, 4205, 5086, 45424, 49483, 57401, + 34811, 81433, 10920, 60312, 26978, 122016, 164823, 61302, 56564, 35695, 36714, 67995, 7904, 42503, 11148, 636, + 33519, 45175, 78078, 5252, 51221, 95264, 51178, 64304, 113942, 105499, 51841, 54146, 69253, 1646, 75338, 105535, + 6664, 10892, 158642, 62272, 125563, 215912, 33549, 19391, 22055, 105382, 20979, 20764, 220680, 38079, 4062, 13641, + 102008, 4203, 20143, 28206, 742, 47639, 13458, 8201, 51930, 16197, 22187, 25787, 19494, 75134, 114304, 94871, + 176108, 45555, 116, 160484, 44424, 12397, 146985, 10330, 35123, 239546, 28027, 7835, 13025, 82191, 412132, 7092, + 75685, 170210, 25010, 106234, 11759, 43065, 8341, 715, 74335, 50832, 12326, 62728, 22250, 132367, 15166, 1485, + 181453, 100489, 159053, 49558, 131147, 218225, 72843, 179732, 16984, 57645, 5036, 111986, 132115, 50930, 114625, 1202, + 154311, 67481, 16722, 47767, 11991, 40423, 208598, 57602, 76452, 34425, 3714, 48128, 52609, 81295, 9952, 36845, + 85351, 16065, 20654, 19823, 196, 24873, 26870, 1297, 30969, 9296, 54228, 102469, 9198, 72519, 70039, 108396, + 74919, 85172, 66125, 101377, 2529, 117805, 96810, 24778, 56838, 145070, 22575, 3889, 71779, 55500, 69200, 56256, + 29187, 1545, 88869, 2923, 116304, 4719, 101420, 2691, 3504, 4334, 13196, 24283, 62330, 11167, 46433, 98976, + 41190, 4304, 38379, 134621, 86153, 55398, 18685, 16560, 28393, 104952, 236521, 190161, 254926, 97238, 29381, 35076, + 104228, 132048, 21165, 43831, 105189, 81338, 60610, 75143, 13264, 30942, 42614, 10992, 84359, 171469, 36755, 2955, + 3574, 50858, 7720, 113363, 12415, 1082, 8309, 16640, 8534, 89729, 1994, 39280, 57147, 259158, 159887, 14424, + 10175, 59116, 66944, 434056, 188174, 112776, 24021, 39757, 59504, 17275, 18210, 6474, 60932, 53833, 48968, 4909, + 66318, 8851, 66549, 14450, 16407, 3384, 60036, 132244, 107921, 15561, 173708, 120543, 17638, 31452, 27253, 351, + 99103, 3968, 104995, 54896, 49278, 134414, 51028, 59123, 28403, 11528, 46089, 883, 39786, 58165, 83022, 52349, + 145865, 50737, 25618, 47214, 8462, 6871, 83132, 9635, 3558, 132475, 8680, 97166, 43406, 3898, 139071, 89536, + 142271, 8320, 29089, 83232, 29139, 37374, 11887, 13573, 21989, 22330, 18987, 8277, 36552, 94008, 13402, 19357, + 93955, 192242, 23973, 19276, 72365, 83440, 43343, 20913, 7176, 15598, 107084, 9241, 13556, 46936, 46456, 48864, + 106185, 54904, 62326, 15469, 37957, 3365, 221144, 71002, 321774, 79203, 35192, 114690, 17782, 21700, 10335, 97643, + 129017, 125386, 115842, 123788, 51154, 15859, 74495, 30765, 27938, 35855, 55933, 25082, 24952, 27639, 71742, 23512, + 1935, 32522, 98612, 72854, 39366, 29090, 7051, 75719, 2791, 156786, 23966, 41744, 33489, 127417, 36727, 13017, + 41582, 128016, 125842, 141281, 56078, 108220, 31920, 77429, 24558, 59290, 9558, 18760, 9311, 51229, 19776, 17252, + 5449, 45702, 18396, 21552, 218786, 72761, 87355, 23965, 27808, 90969, 38872, 26050, 190429, 178071, 13825, 15009, + 68013, 27591, 38972, 14110, 36020, 50172, 200538, 281255, 86003, 53465, 47708, 215042, 29735, 28534, 1368, 69656, + 3404, 24318, 47650, 37653, 87348, 40656, 8697, 25748, 119357, 39951, 51347, 22849, 115714, 1560, 13392, 73201, + 2324, 35889, 27677, 41987, 86796, 378138, 63587, 34445, 81973, 5437, 94808, 11707, 40992, 27780, 19572, 62130, + 43376, 35564, 204011, 21987, 93621, 4561, 146551, 233073, 28784, 56146, 1533, 99739, 16808, 78879, 204037, 27916, + 36367, 31905, 55776, 19527, 2517, 49088, 49061, 67853, 2909, 67751, 72529, 12945, 191926, 57369, 47731, 345715, + 24978, 37675, 6961, 26371, 55303, 17783, 45945, 57303, 53308, 102816, 50376, 152609, 205724, 29088, 100615, 14981, + 60068, 20755, 67912, 22984, 3098, 119389, 76407, 31834, 21375, 86387, 71132, 54368, 64784, 154591, 7007, 38915, + 23949, 15688, 86044, 32410, 26579, 72957, 140702, 2534, 63121, 109577, 36571, 7087, 12319, 38027, 139635, 69422, + 80145, 58104, 77565, 134884, 39967, 128121, 122239, 98149, 97861, 77841, 122108, 53941, 6757, 105412, 5433, 45309, + 70143, 41083, 108846, 68195, 67642, 48548, 22786, 15898, 37082, 16924, 108012, 18926, 34698, 130920, 7208, 84736, + 69140, 115981, 3166, 30758, 122883, 16988, 18278, 9920, 46151, 31634, 162250, 22353, 62187, 128316, 15612, 158856, + 80934, 678, 6963, 76542, 429, 111036, 69613, 18534, 19437, 41943, 186274, 23104, 135071, 64054, 152408, 69055, + 137885, 55427, 70719, 41478, 222647, 50820, 94953, 48990, 13331, 245898, 18180, 15149, 154065, 124925, 217145, 82559, + 327, 139288, 191206, 14470, 61778, 6536, 73668, 129358, 74257, 25611, 26141, 105172, 40116, 65034, 30807, 96573, + 37493, 33559, 28049, 4551, 14855, 7513, 48252, 20443, 85079, 69697, 8617, 95869, 237641, 48366, 36858, 128756, + 42342, 3347, 3587, 16403, 102164, 201481, 17664, 68064, 199965, 62177, 224396, 15650, 71204, 12089, 125579, 76206, + 4662, 192158, 115192, 18064, 29368, 28388, 88784, 93189, 5042, 144274, 26733, 112088, 113914, 8714, 3328, 85297, + 122214, 129118, 14482, 6034, 8955, 1500, 17059, 143211, 13149, 46390, 180700, 11460, 102503, 31711, 124947, 75478, + 43173, 42126, 146701, 30953, 5659, 29827, 78342, 38880, 163787, 38389, 57177, 12232, 69003, 35027, 31713, 32846, + 307084, 9190, 1177, 53022, 63392, 36783, 49720, 204857, 171080, 2218, 24668, 46908, 21390, 44902, 5316, 16460, + 81918, 10429, 22493, 40140, 86666, 28478, 19197, 39367, 36622, 197757, 35536, 74, 24126, 27886, 49478, 60977, + 28887, 159663, 25478, 34942, 5736, 100422, 11653, 280716, 28054, 76213, 269086, 56854, 207170, 12900, 26987, 34564, + 163428, 35718, 204153, 14178, 79891, 154374, 93383, 82977, 346467, 2260, 194527, 234413, 95363, 223815, 84524, 24639, + 38980, 45400, 11669, 39361, 111806, 107478, 23180, 284178, 80828, 13760, 60535, 160002, 9200, 111334, 9377, 23027, + 19288, 10467, 219338, 9403, 294896, 38891, 13604, 6964, 114468, 33089, 246026, 89623, 87405, 73198, 53282, 51034, + 111715, 3542, 21564, 14671, 141705, 108308, 240720, 1039, 7015, 84375, 20894, 62644, 72754, 61580, 52142, 32302, + 68463, 27046, 8380, 114459, 48738, 174003, 101876, 70865, 243147, 15573, 1008, 73339, 33397, 75662, 295288, 97483, + 101210, 83260, 287998, 315229, 37051, 33599, 14151, 14676, 98801, 6611, 61888, 45855, 82847, 52987, 104357, 56918, + 20397, 29312, 1971, 1398, 4940, 37732, 8028, 358536, 185559, 183958, 188192, 3892, 18716, 73119, 6906, 4369, + 54718, 83934, 109096, 203506, 83491, 133450, 4849, 4027, 11725, 13003, 142499, 87752, 40803, 56197, 37560, 36968, + 1128, 46164, 119031, 23432, 12128, 100279, 84518, 28176, 23060, 41295, 20799, 1558, 5174, 23677, 97773, 39548, + 7745, 10446, 105114, 10769, 67007, 87718, 236680, 161719, 46701, 32017, 4931, 19125, 1571, 28423, 43674, 64085, + 27807, 77944, 8529, 13223, 180186, 17974, 13548, 140128, 256996, 51253, 43890, 57544, 18975, 7744, 44658, 28313, + 16837, 105, 127130, 126370, 7877, 20389, 133921, 174075, 212758, 64619, 11797, 47857, 250379, 30209, 1917, 86594, + 15440, 21744, 135164, 34325, 87954, 11946, 12540, 311, 57755, 56815, 231078, 40529, 24719, 141657, 31438, 81877, + 124057, 251942, 13765, 33986, 83461, 63216, 7231, 198398, 258, 90044, 88181, 173923, 50864, 61645, 10174, 54553, + 8874, 28481, 1537, 50513, 29038, 39872, 84988, 57246, 71289, 12015, 94709, 4871, 31627, 279590, 81311, 43545, + 15429, 59109, 71443, 10416, 9005, 6896, 26184, 104749, 4061, 38580, 23344, 2875, 113851, 13804, 63848, 29618, + 107716, 136811, 54885, 33689, 82819, 50624, 149592, 114467, 13651, 11306, 73580, 85777, 79656, 166366, 222515, 39565, + 117851, 47704, 43581, 137265, 17539, 25867, 43261, 1034, 111446, 117360, 32008, 3664, 12938, 250542, 16992, 14203, + 39724, 83746, 187150, 20582, 190787, 110150, 10914, 42740, 4352, 122213, 31831, 49606, 15402, 8583, 34572, 18519, + 21070, 72128, 605, 14069, 62708, 68579, 26360, 45158, 8539, 33700, 23921, 174967, 23777, 21086, 47035, 21403, + 73853, 29070, 14402, 55968, 13552, 25123, 132516, 99114, 38810, 83727, 93287, 108584, 86117, 4037, 69026, 148748, + 16285, 151286, 2526, 67336, 56400, 114949, 38156, 64061, 1718, 14168, 68963, 19987, 75342, 72606, 92460, 25819, + 142863, 865, 10759, 102417, 29033, 128297, 176851, 239106, 378, 3476, 126764, 880, 110113, 35294, 142380, 60787, + 180321, 12139, 33148, 3243, 32586, 75807, 8895, 69743, 23606, 76161, 0, 32234, 26393, 15253, 101159, 37444, + 160984, 100458, 106798, 118153, 207987, 94453, 31068, 199369, 23783, 55406, 72063, 196753, 8406, 65985, 6649, 169369, + 16904, 2912, 14378, 60219, 24596, 44552, 22129, 79241, 47274, 46556, 155072, 25, 11693, 93339, 175196, 82189, + 46539, 25983, 55777, 39522, 60524, 133678, 18470, 77553, 78595, 83411, 33676, 121561, 48423, 93395, 74963, 17181, + 31718, 50927, 45519, 1283, 20170, 4973, 55435, 106949, 9009, 50903, 7663, 1685, 146268, 116269, 1759, 101910, + 15183, 77271, 25151, 101108, 191060, 44211, 30237, 101323, 172765, 81369, 7540, 8670, 1349, 2983, 72423, 57110, + 138436, 10840, 116808, 148570, 37365, 13235, 144011, 326438, 10970, 137859, 245169, 166948, 40509, 8825, 6884, 140489, + 225817, 339638, 308668, 14440, 20021, 172262, 20842, 35348, 70394, 77862, 44480, 114795, 152138, 205967, 80249, 66813, + 21541, 21337, 74233, 246096, 66559, 255232, 7873, 74405, 66544, 78592, 41670, 16122, 89352, 272783, 66583, 16290, + 2725, 97449, 74073, 152635, 17258, 14662, 164479, 7400, 91345, 77718, 116341, 23495, 7313, 145703, 66781, 95536, + 3815, 59962, 62075, 48667, 115868, 88403, 11136, 31573, 8660, 48458, 10303, 30913, 40362, 76502, 54728, 210919, + 4071, 10395, 14132, 19281, 103524, 46479, 2475, 9346, 39388, 11097, 7103, 61594, 118275, 49094, 17865, 16935, + 21950, 49474, 38996, 44270, 6549, 83808, 6663, 54784, 38226, 40402, 13514, 51690, 32754, 33017, 146868, 3834, + 209574, 31460, 2290, 5991, 29201, 189025, 95100, 116204, 43495, 37630, 621, 42790, 18028, 147636, 74713, 72101, + 20296, 39620, 52377, 149079, 40597, 71087, 48840, 41861, 18370, 78232, 9624, 43707, 47520, 67447, 30945, 84196, + 202450, 110891, 61689, 177365, 24134, 22168, 57538, 47780, 2219, 96079, 30892, 4049, 8195, 17695, 2313, 196966, + 69545, 7785, 159038, 116984, 38041, 85191, 95576, 35677, 136934, 87868, 8522, 47264, 46374, 90011, 2454, 121060, + 19041, 15646, 218596, 20647, 159811, 5273, 78222, 5790, 84865, 690, 36264, 169096, 58620, 49769, 19870, 56574, + 846, 44944, 30096, 21418, 98749, 62549, 49755, 90559, 13569, 65527, 23045, 35183, 30714, 1120, 129237, 133919, + 97246, 73893, 1242, 5886, 32186, 73097, 9292, 172076, 4479, 48341, 159618, 10702, 27355, 9327, 3896, 145089, + 92973, 127562, 80631, 140609, 35214, 40087, 6621, 60953, 151491, 93233, 133991, 5681, 75963, 13834, 189749, 48238, + 37631, 42943, 70301, 22025, 1561, 101790, 52288, 23519, 27200, 15594, 330, 128333, 238083, 77116, 27436, 14656, + 23182, 176130, 43018, 93766, 60756, 68727, 36117, 110670, 12291, 90265, 17003, 86775, 25766, 50378, 97783, 72630, + 39355, 51597, 76915, 9532, 102333, 333464, 26234, 91954, 87362, 18461, 14063, 123415, 65741, 87119, 13821, 69651, + 59906, 29250, 5334, 111676, 19538, 14466, 71314, 49631, 201575, 100994, 81169, 99402, 48772, 1958, 86381, 84313, + 102938, 3927, 14847, 115137, 24438, 1719, 106468, 14344, 292580, 113808, 25709, 4422, 39964, 14448, 9781, 167464, + 66366, 38612, 21306, 135701, 9004, 3967, 14882, 60987, 61379, 70755, 79621, 230123, 50548, 251137, 45013, 68059, + 37622, 8601, 52714, 131016, 38732, 161564, 5054, 17043, 28379, 100315, 11542, 22026, 68180, 40581, 195387, 5840, + 76622, 286510, 15220, 21299, 17488, 10435, 34592, 77414, 69849, 292262, 92100, 61536, 5963, 40657, 72834, 85029, + 7219, 137580, 32702, 24398, 48143, 18315, 7679, 10221, 43413, 55964, 56264, 22174, 55358, 421, 32944, 33586, + 540, 28638, 297324, 19055, 58691, 13123, 1457, 13545, 29506, 12292, 90917, 28767, 19245, 95156, 15265, 9677, + 52193, 329199, 120474, 31550, 48621, 32454, 173844, 6940, 5518, 104662, 27465, 11002, 13048, 23406, 127455, 219189, + 118625, 966, 55994, 71978, 44755, 44425, 58484, 81947, 191081, 71933, 4128, 149448, 42061, 45036, 34993, 81007, + 89197, 17973, 12644, 20286, 2259, 12086, 128406, 38420, 60665, 7439, 63696, 63513, 66994, 154461, 3204, 269842, + 71210, 72373, 47920, 147744, 51232, 11668, 11964, 5455, 3585, 62466, 87325, 135079, 142096, 6038, 3351, 59851, + 55024, 90797, 96831, 151965, 7892, 59192, 18848, 84506, 8345, 62542, 50046, 26538, 58653, 123823, 70584, 33087, + 463307, 52217, 94498, 162262, 65658, 37813, 1798, 133237, 64319, 20468, 342279, 2483, 137941, 174532, 134882, 13053, + 136323, 101319, 14858, 315661, 53499, 38349, 1883, 36549, 12746, 4783, 17315, 26629, 43492, 1433, 30848, 103477, + 6978, 53491, 254027, 59138, 95163, 23047, 130383, 71358, 29925, 104048, 147110, 20605, 60175, 141493, 1502, 26372, + 5128, 8043, 160154, 205752, 7352, 24589, 41005, 8787, 41463, 180084, 35056, 128767, 184756, 116446, 216131, 52109, + 121146, 31824, 120507, 78611, 20473, 80909, 52814, 53045, 18426, 47765, 19434, 251918, 4598, 37752, 2736, 30186, + 5309, 104505, 61873, 39702, 54580, 60995, 6795, 78461, 14277, 102209, 56595, 76365, 121151, 84271, 40571, 22053, + 10691, 60167, 8833, 12920, 56711, 2593, 16106, 101890, 43880, 47882, 105443, 26296, 63409, 2286, 236404, 3535, + 76682, 31275, 91877, 94825, 11830, 60322, 77261, 14017, 172218, 120226, 23764, 296, 264898, 128743, 243138, 11474, + 32632, 32455, 30389, 37844, 66536, 221177, 32878, 17990, 81062, 25157, 197499, 6108, 31649, 68174, 4883, 39604, + 25788, 24670, 22233, 11063, 92774, 167596, 40860, 103202, 60222, 67201, 150525, 69109, 11100, 21724, 207627, 23699, + 63159, 50235, 17317, 132722, 9432, 137156, 14594, 85114, 224657, 60669, 32727, 134466, 25240, 42617, 6453, 86119, + 53613, 52551, 39633, 95001, 152, 22181, 33930, 6888, 138620, 139630, 14275, 67967, 163829, 44096, 23693, 35554, + 2397, 2230, 20289, 26508, 85345, 44002, 373, 23558, 12501, 6393, 128738, 2065, 37507, 108535, 43648, 37304, + 64932, 28919, 39223, 9316, 38575, 60236, 72946, 52874, 43522, 3500, 42635, 18532, 65789, 350700, 113725, 232391, + 80198, 62151, 18623, 5216, 78796, 80102, 187580, 46871, 35226, 102412, 1673, 53825, 3851, 294484, 119721, 213745, + 108891, 1551, 16270, 77, 73311, 86689, 90501, 11580, 3060, 174403, 54046, 2060, 28391, 42872, 40600, 5734, + 93671, 48215, 33532, 19766, 39663, 848, 33334, 33841, 46142, 2841, 116488, 9276, 30982, 41267, 161026, 52345, + 66041, 22012, 75546, 14211, 37281, 137475, 48692, 71432, 68157, 27935, 25905, 26558, 26553, 91667, 162874, 41931, + 71872, 49342, 134603, 3964, 81142, 58684, 69664, 61624, 211527, 194930, 43281, 38136, 39747, 141202, 109912, 103720, + 118119, 105830, 244717, 53752, 22695, 3660, 15950, 115237, 2859, 29995, 32157, 26681, 11066, 63677, 2677, 27475, + 20232, 20055, 83960, 187268, 168911, 71409, 21339, 67656, 7933, 3860, 3943, 4936, 197005, 39134, 94952, 21684, + 17066, 56970, 61053, 6076, 111071, 9161, 30747, 75947, 44434, 9778, 40744, 3960, 133994, 16681, 158292, 120209, + 120798, 124190, 20560, 28017, 12766, 9520, 51106, 98867, 227798, 634, 8951, 16361, 45756, 34211, 71984, 20818, + 132852, 2771, 8039, 31695, 29917, 46819, 43140, 4911, 36076, 31350, 146547, 12832, 55352, 87682, 102259, 20181, + 65281, 125529, 76789, 74087, 129800, 20270, 210263, 10610, 41958, 199960, 136842, 106466, 18944, 91106, 109596, 45385, + 84678, 1826, 26239, 13942, 18580, 74780, 96474, 109106, 168093, 58817, 77576, 139329, 132621, 264328, 131918, 3004, + 39997, 277046, 10657, 188062, 54092, 9389, 208482, 138342, 27740, 112130, 141428, 159919, 32354, 78912, 10848, 248488, + 3770, 58675, 143326, 309222, 114118, 121341, 21213, 104326, 103242, 247904, 10251, 17363, 43918, 50107, 99756, 9914, + 15899, 1529, 57591, 10958, 18574, 191645, 27047, 33819, 145944, 50973, 10931, 515, 3190, 56895, 26750, 118357, + 123469, 6582, 232402, 185264, 219903, 14816, 104060, 2257, 24169, 2672, 34078, 85037, 100571, 54688, 18794, 11296, + 62403, 48268, 85546, 58594, 16776, 5744, 7336, 60494, 92779, 55136, 31725, 41292, 14535, 43085, 170495, 62016, + 17168, 20160, 13301, 31775, 16475, 23866, 68028, 7708, 14304, 27024, 412982, 50360, 37231, 31172, 8313, 18133, + 1712, 149874, 165108, 53176, 28218, 60812, 11433, 110651, 242754, 239758, 103026, 54413, 61688, 60159, 42554, 63279, + 28980, 26817, 24147, 239906, 1318, 94868, 66949, 9143, 51359, 25429, 40100, 150526, 222657, 6742, 15209, 35877, + 99200, 23325, 5454, 116860, 67989, 20127, 66101, 152710, 31772, 35795, 93237, 22499, 23932, 217156, 63347, 85225, + 35351, 44967, 43097, 63722, 201251, 232588, 42922, 76661, 49041, 51431, 91264, 26250, 27306, 16146, 171707, 8346, + 19128, 15883, 46818, 14147, 40135, 98017, 121209, 5750, 3986, 4803, 194061, 109091, 77333, 135407, 15726, 139517, + 19649, 240604, 296254, 18468, 120683, 261045, 15054, 49607, 151927, 90876, 33988, 94815, 21081, 4338, 3114, 25278, + 7076, 168237, 197598, 5263, 78375, 28858, 96610, 1656, 77501, 139877, 164638, 29453, 101599, 77549, 15067, 106520, + 8011, 191624, 67048, 47927, 89585, 15567, 16358, 40740, 96715, 19262, 3368, 257227, 58179, 82342, 164701, 77237, + 5378, 183488, 891, 51832, 102138, 1421, 126828, 72223, 106793, 152999, 9464, 38181, 26051, 369537, 47723, 127195, + 217276, 19701, 87052, 2899, 4197, 91519, 1222, 71232, 16297, 58674, 236388, 7640, 27014, 38262, 53246, 56437, + 46721, 45960, 154554, 93517, 13322, 57858, 23476, 62092, 22806, 29494, 60980, 59522, 5278, 152497, 10089, 42687, + 29629, 152505, 1415, 65715, 70632, 27834, 5144, 69061, 9647, 106305, 17509, 83636, 71519, 6066, 32864, 49020, + 58818, 62070, 83328, 51524, 40603, 27209, 14130, 13006, 128530, 34289, 191758, 175447, 8903, 6644, 258111, 1630, + 36770, 19694, 13099, 93412, 56900, 8695, 92739, 9361, 31483, 22727, 24402, 12783, 26935, 64112, 50162, 73354, + 43845, 97824, 38035, 43333, 34265, 4521, 118246, 36777, 34920, 27802, 62580, 75613, 87658, 36480, 37311, 41140, + 6114, 54859, 37921, 25174, 84767, 123137, 100067, 76401, 64250, 31608, 5782, 50390, 47318, 8602, 146787, 57571, + 50720, 184949, 26374, 49411, 19264, 59669, 72340, 61, 162514, 37868, 20961, 23322, 33357, 104140, 12370, 75813, + 13117, 173944, 10984, 8143, 6705, 185922, 4223, 25479, 46696, 78515, 10296, 173037, 75032, 3611, 146130, 87547, + 71054, 20284, 76208, 179291, 87004, 88099, 25915, 10612, 42035, 58634, 8935, 45162, 69138, 29960, 4954, 9827, + 11903, 75332, 189060, 145940, 99854, 68852, 2428, 35935, 102328, 55387, 95597, 31446, 19611, 66526, 25614, 75226, + 34525, 91178, 34821, 57507, 69312, 5965, 54613, 73567, 6431, 38968, 305485, 72661, 49842, 46229, 13066, 35961, + 101600, 6117, 6625, 94731, 133178, 5938, 14272, 71096, 108751, 55431, 8310, 74568, 41829, 15997, 69572, 115173, + 89939, 33817, 27169, 164256, 225664, 113979, 208705, 24886, 48916, 4745, 105265, 2285, 99556, 76020, 16691, 17685, + 9195, 67243, 29412, 15704, 130387, 12347, 87154, 6073, 5960, 22667, 45766, 126328, 36857, 50552, 563, 58566, + 116724, 31042, 63335, 55856, 53518, 24055, 44320, 132215, 94002, 7583, 31931, 161536, 39077, 14213, 37853, 130359, + 53996, 47829, 71470, 59662, 6945, 70919, 15101, 29373, 22261, 65721, 32624, 111143, 64291, 96324, 23614, 39737, + 8314, 9557, 132103, 65689, 152785, 54960, 122712, 73272, 57588, 3782, 8427, 101263, 16522, 3063, 1332, 9763, + 9091, 103598, 131811, 79941, 5048, 53350, 22985, 72314, 87671, 137942, 20832, 32726, 35441, 97544, 14838, 32661, + 39509, 25235, 12815, 229677, 1945, 18478, 104804, 53001, 12423, 99415, 90412, 47427, 158923, 19643, 66679, 33927, + 178413, 13851, 2378, 163553, 37549, 4735, 113147, 13151, 14095, 34868, 84260, 49719, 1475, 55719, 70677, 22297, + 73188, 7304, 101811, 35472, 62878, 110472, 31193, 7011, 4819, 44319, 37242, 22235, 24012, 34311, 34630, 8117, + 68534, 47855, 55542, 88606, 42606, 10807, 64311, 44304, 4010, 54889, 48956, 37274, 120809, 12117, 36576, 15154, + 57497, 55581, 76707, 88824, 59564, 29146, 75878, 346295, 101758, 137754, 84121, 153651, 103718, 85489, 61427, 59883, + 32701, 4692, 17006, 92343, 80116, 63122, 3829, 201429, 12345, 10158, 29504, 164968, 95834, 119755, 125824, 142638, + 46918, 23943, 90123, 20494, 21468, 29167, 157195, 24079, 53213, 49163, 19311, 138007, 15665, 149198, 44724, 55347, + 31200, 1573, 21255, 26337, 3867, 100570, 205427, 109262, 5140, 8979, 83224, 17644, 96013, 1279, 32509, 16380, + 250744, 103649, 111338, 4321, 21016, 68917, 10756, 39197, 10069, 10563, 184865, 35905, 13968, 1109, 7847, 19871, + 35449, 21656, 7996, 38626, 180829, 25293, 37599, 10356, 27683, 46005, 32258, 8111, 39704, 15702, 161889, 13627, + 59956, 21006, 29672, 64295, 22893, 319443, 755, 33239, 3115, 11630, 35242, 316161, 26293, 180051, 34293, 25262, + 32785, 45248, 4291, 1345, 75934, 380808, 185068, 5400, 62445, 95085, 113696, 37657, 141162, 2763, 1716, 31145, + 62720, 101394, 197152, 222158, 2018, 103950, 53054, 8291, 83638, 37618, 74005, 127265, 19949, 171632, 21168, 31182, + 114012, 109942, 16057, 103239, 95006, 48470, 141582, 50740, 3330, 57743, 91063, 68640, 99829, 25131, 192726, 1408, + 130935, 113922, 160076, 66999, 309272, 153746, 62089, 54683, 9565, 23036, 233538, 39614, 55874, 51238, 28998, 51475, + 121727, 56411, 33932, 53786, 37017, 49406, 91778, 26837, 23586, 252174, 2540, 47569, 319858, 177485, 2308, 5581, + 40970, 118880, 34878, 1602, 27602, 64100, 36001, 13488, 8625, 28038, 116561, 45356, 112329, 100, 159062, 48033, + 61060, 53312, 14278, 30173, 100088, 27580, 20456, 230013, 118525, 51822, 34883, 100756, 25922, 52426, 18317, 13881, + 16232, 8187, 317935, 69863, 1907, 53514, 75569, 36902, 60671, 5105, 60024, 76920, 51583, 106419, 20458, 110614, + 44553, 36111, 187025, 173919, 80993, 52249, 116521, 11851, 5262, 26289, 48960, 29999, 94679, 33367, 125032, 72126, + 8676, 211498, 44721, 235091, 940, 97176, 26565, 5948, 20736, 10278, 50485, 12407, 11823, 41971, 135546, 103878, + 3020, 21249, 253851, 97728, 16476, 7536, 49750, 6746, 12340, 94756, 71789, 16549, 152600, 40488, 30681, 120494, + 97416, 90981, 75736, 36235, 3703, 36522, 4051, 90148, 25744, 143233, 74114, 50674, 66826, 186399, 55544, 63905, + 16245, 17811, 43885, 57562, 16876, 12660, 11009, 92234, 46446, 722, 137637, 44043, 84798, 27042, 5314, 21312, + 74227, 10917, 92213, 211986, 176020, 52818, 44610, 174541, 45192, 85977, 66236, 78509, 61955, 14788, 256627, 12032, + 75496, 83996, 101534, 82235, 23946, 104883, 3183, 12850, 23626, 57697, 3225, 16042, 40372, 9541, 108356, 126495, + 26036, 101893, 24582, 39880, 6149, 95087, 46546, 59092, 11822, 168583, 82335, 18729, 30582, 146256, 6074, 39807, + 16541, 11986, 80148, 61456, 41914, 80721, 142480, 21004, 82385, 83655, 47670, 4769, 232823, 41862, 437909, 4116, + 40921, 59664, 133104, 38104, 80773, 101843, 38426, 90874, 14930, 55522, 12793, 23708, 3631, 8582, 3112, 415975, + 7517, 106586, 112390, 31555, 39619, 56075, 6299, 30930, 4348, 38188, 23437, 11888, 36180, 29057, 78844, 52556, + 126106, 24776, 65214, 92664, 138939, 39642, 153427, 17494, 62611, 31501, 49371, 27056, 1477, 16503, 156270, 23995, + 113512, 205238, 84709, 77316, 47321, 67623, 42436, 36548, 25052, 10369, 19122, 181758, 14546, 6743, 124696, 124095, + 118881, 58058, 29158, 24211, 29060, 38102, 144694, 42736, 23589, 64142, 131963, 43763, 128322, 42128, 13330, 35824, + 36795, 108915, 43897, 15657, 18401, 58900, 14806, 118110, 137921, 30097, 47026, 4142, 104699, 4185, 87711, 85997, + 267929, 153261, 228359, 20660, 36194, 54339, 43073, 5692, 172791, 8213, 26146, 5686, 18113, 28694, 17786, 77352, + 4766, 1852, 168140, 76409, 188215, 2179, 198971, 53244, 30083, 37124, 32195, 48123, 332586, 62934, 88005, 15880, + 94089, 68377, 87929, 133891, 5805, 11217, 130365, 104237, 77909, 44881, 6260, 14391, 22194, 42271, 49170, 261884, + 68234, 79361, 14141, 107542, 154976, 30425, 14602, 73251, 43220, 47978, 4990, 19457, 40660, 21608, 104477, 3582, + 70001, 38324, 113052, 201756, 184893, 168071, 1921, 43965, 138095, 106316, 100248, 3825, 11128, 115487, 18833, 101956, + 103010, 11802, 30806, 12615, 22663, 5074, 17663, 19426, 5108, 141373, 42930, 183720, 212615, 34077, 17051, 59686, + 8485, 22524, 8691, 78244, 5565, 124298, 80099, 217111, 49222, 20498, 66793, 529503, 54614, 4186, 75927, 40419, + 26530, 57883, 5327, 44876, 42639, 19706, 60003, 74433, 16319, 1599, 26748, 132682, 55062, 6955, 63969, 38461, + 152662, 43166, 12979, 233, 105595, 18768, 75949, 56729, 21114, 135004, 23948, 42101, 22216, 320051, 21698, 23811, + 10294, 826, 29297, 123324, 68158, 26264, 158791, 95573, 10436, 191222, 9993, 12298, 86950, 123108, 20858, 320830, + 7206, 182464, 11757, 81755, 62115, 101508, 64002, 77412, 3977, 47597, 13370, 37935, 3657, 20526, 27967, 53371, + 59874, 220318, 6962, 46594, 456, 88484, 47078, 50207, 118617, 53233, 59503, 10872, 18156, 12054, 11152, 77825, + 106663, 95945, 18093, 1644, 112229, 13310, 264515, 9070, 80992, 55835, 162270, 15035, 17442, 111426, 61158, 71574, + 8217, 10224, 82739, 45979, 55551, 15768, 183976, 64417, 5133, 165269, 38884, 710, 34344, 57175, 27784, 31790, + 251927, 69733, 22676, 125462, 51153, 73708, 53147, 16750, 39364, 77866, 28089, 2461, 201321, 1010, 4858, 82482, + 7816, 123565, 3065, 124019, 66803, 1441, 2105, 147722, 23057, 2238, 2465, 109286, 156724, 37091, 60022, 193667, + 145664, 52352, 15876, 275211, 14276, 101280, 2609, 1583, 95705, 18345, 47036, 168953, 979, 1876, 17511, 99269, + 2796, 7756, 45223, 209272, 6875, 13501, 19972, 3039, 13429, 88164, 40587, 48347, 212525, 23645, 107978, 39326, + 50602, 71127, 74343, 5725, 26276, 95436, 69705, 115587, 28284, 114395, 42685, 63375, 3424, 66319, 70412, 12400, + 19465, 68740, 40524, 83167, 52397, 11466, 284475, 42974, 46963, 1472, 38647, 63800, 31420, 5787, 41395, 21919, + 51399, 116728, 34891, 76404, 206476, 116758, 145291, 39684, 92317, 182072, 3771, 102030, 48851, 66664, 589915, 408336, + 637, 73775, 252578, 42675, 100820, 82225, 43433, 158753, 83349, 100070, 42160, 13599, 10317, 42154, 91152, 132379, + 60227, 237752, 99758, 270587, 21972, 37249, 93833, 30614, 6908, 73035, 2132, 42490, 282439, 134241, 47775, 14940, + 32857, 14647, 284209, 112665, 224767, 50372, 44452, 85605, 95629, 194385, 241460, 28172, 83882, 169721, 7505, 79753, + 42106, 57611, 175719, 33441, 87338, 1110, 81138, 125252, 69757, 29461, 19692, 61492, 29840, 67754, 2077, 4100, + 68709, 14021, 73781, 501, 37665, 25322, 62392, 106943, 45244, 60281, 4687, 224380, 114577, 31964, 38842, 6435, + 49188, 3526, 19117, 97765, 175943, 38531, 30032, 27448, 54009, 25775, 5789, 30779, 186746, 7022, 85999, 42272, + 106158, 428476, 5637, 191524, 70168, 172347, 5932, 69204, 3358, 12019, 9877, 55515, 234002, 64516, 10898, 36793, + 50555, 139948, 27732, 39934, 9221, 14752, 203772, 22375, 129338, 7278, 3245, 117137, 9562, 29787, 9323, 102610, + 20118, 12371, 39558, 8236, 32923, 70519, 19395, 17047, 78479, 97791, 16130, 89463, 118280, 22050, 37024, 89641, + 65752, 52765, 71810, 108442, 4977, 44545, 69952, 19070, 10474, 23484, 18908, 21788, 102174, 82995, 4717, 97497, + 60947, 27112, 3005, 36385, 129006, 15642, 16432, 25512, 10570, 43825, 108125, 73183, 83451, 75116, 95227, 44522, + 8598, 15990, 22094, 1787, 8078, 14629, 33317, 78398, 159367, 125840, 12669, 67807, 123785, 64349, 37447, 22738, + 80438, 10768, 15104, 98321, 16742, 22746, 11237, 41146, 5905, 13127, 47595, 35425, 5281, 12465, 54216, 37342, + 181513, 107706, 142857, 10267, 42402, 87816, 7247, 28472, 6977, 228601, 32966, 2076, 163136, 1044, 31531, 56439, + 93179, 59512, 313709, 36615, 42191, 80611, 121959, 23989, 14968, 38253, 2001, 194381, 50421, 88797, 45161, 11328, + 112401, 85555, 7543, 130762, 105440, 130853, 18911, 12575, 33456, 139214, 29318, 62849, 57347, 85786, 171698, 27142, + 121611, 1138, 80752, 54791, 4221, 77844, 47524, 140582, 94954, 18043, 313972, 120855, 36517, 63651, 18688, 30456, + 24046, 74888, 58460, 70311, 27796, 50699, 84011, 20285, 6255, 200303, 74910, 57724, 33394, 16958, 104867, 22244, + 72990, 7001, 57825, 20114, 135408, 43742, 54632, 30820, 116627, 34162, 194876, 27214, 1233, 2542, 89114, 33428, + 57874, 24432, 73244, 13342, 25654, 36812, 42775, 137065, 95419, 3034, 109640, 69998, 68156, 15470, 40930, 43371, + 401399, 12617, 9352, 26808, 313338, 55155, 36975, 17890, 55208, 52186, 226992, 16324, 45573, 22248, 14465, 65809, + 93124, 76501, 130378, 8533, 119251, 28580, 43089, 52170, 47200, 44870, 76202, 54398, 38196, 30768, 33917, 36165, + 11909, 25697, 353546, 54562, 130667, 372979, 149630, 9125, 45391, 144756, 57973, 19599, 73904, 2645, 258411, 25268, + 64964, 61348, 128044, 132867, 167846, 141414, 52489, 108908, 4137, 41282, 39617, 33797, 115606, 493331, 11554, 30049, + 52036, 71190, 77943, 31602, 62214, 2239, 12602, 19146, 7969, 18428, 131922, 852, 160925, 6259, 23383, 165390, + 7187, 12808, 189017, 142381, 1132, 41841, 18701, 29043, 134835, 99, 15791, 9068, 40309, 98444, 127461, 27332, + 73195, 112064, 5097, 39453, 64494, 72450, 24011, 7695, 80472, 162172, 41084, 177912, 444841, 55640, 51858, 96982, + 61111, 3710, 41637, 100576, 26500, 116386, 75146, 6890, 45323, 42246, 54944, 26323, 40743, 9228, 36712, 134278, + 53625, 8230, 40754, 28377, 52797, 241804, 26552, 261558, 22659, 53109, 11974, 49532, 15631, 300307, 18096, 92, + 29739, 107565, 55632, 22306, 36706, 120177, 123369, 52145, 28841, 81811, 2967, 6374, 39147, 48191, 58003, 205210, + 102836, 34591, 52967, 14473, 26794, 120917, 61270, 53041, 10536, 41117, 108058, 363097, 14845, 27166, 219697, 10564, + 87305, 3185, 20437, 15052, 45874, 8849, 48484, 35021, 12241, 1310, 87883, 136400, 127587, 39626, 11562, 40667, + 83833, 19748, 78478, 3152, 57183, 48701, 22123, 86751, 79722, 116436, 5100, 237306, 30844, 5999, 191075, 65796, + 41304, 91788, 24098, 149740, 84655, 17286, 186934, 44080, 20825, 86063, 13887, 29758, 92500, 168321, 114890, 64965, + 3722, 47787, 198506, 43466, 25655, 5407, 13240, 101325, 27811, 1251, 36859, 37980, 10157, 135970, 13500, 104703, + 81634, 106088, 4011, 23578, 31362, 89054, 38594, 186633, 34088, 336505, 15479, 9229, 92487, 49521, 16527, 53016, + 70123, 18389, 75248, 16479, 22190, 140271, 80094, 10382, 185100, 9051, 71918, 40905, 72658, 25068, 29185, 15832, + 139035, 62499, 80063, 154077, 192523, 94489, 96542, 73906, 88241, 73266, 64690, 151133, 2078, 76322, 24201, 11290, + 230490, 211312, 36153, 103, 44528, 9539, 36954, 4068, 85638, 788, 127939, 83761, 100198, 46098, 285530, 41414, + 22088, 50928, 15371, 56983, 29982, 53638, 112051, 38452, 291233, 156894, 16748, 48674, 241062, 61341, 921, 24898, + 13865, 8071, 235453, 118716, 4445, 177183, 19708, 21189, 137791, 14558, 145674, 60895, 37835, 46685, 36314, 3038, + 107218, 25232, 61381, 204828, 31726, 43310, 143414, 32798, 19718, 11254, 127092, 88407, 38234, 37863, 143492, 19751, + 72528, 6573, 60076, 8937, 23046, 86273, 12926, 43848, 19177, 134938, 18033, 102451, 66695, 155909, 57463, 34776, + 5109, 25338, 58091, 2353, 17251, 3298, 36065, 271, 28077, 50946, 7379, 25203, 5617, 135531, 5335, 33853, + 21554, 1514, 322875, 85207, 47839, 81931, 162589, 73080, 72425, 41666, 2205, 107217, 133825, 13839, 41951, 21025, + 1486, 3869, 68800, 11775, 73065, 13658, 35920, 41391, 181275, 74450, 84216, 8800, 141508, 82895, 22465, 8542, + 21768, 13000, 8240, 5971, 62971, 23798, 245635, 47169, 63082, 25334, 10884, 141547, 2512, 44744, 8650, 42190, + 34200, 155357, 119815, 26059, 9904, 121764, 22752, 11476, 120309, 20694, 128696, 2930, 6392, 69057, 11264, 13880, + 91243, 58841, 35178, 12534, 68416, 70963, 20428, 809, 268253, 4190, 31257, 66625, 41199, 52789, 24931, 112688, + 116757, 6355, 182513, 90872, 138551, 2904, 50394, 19487, 185526, 10604, 30792, 10910, 41246, 165026, 42224, 113107, + 28986, 50164, 19196, 60791, 4093, 348976, 132032, 123165, 19057, 48820, 65522, 51163, 17295, 95175, 4484, 1308, + 4148, 239196, 12160, 143978, 245766, 14112, 90855, 4531, 122360, 139905, 44798, 12441, 35356, 12686, 74598, 10815, + 112075, 44675, 49095, 131228, 20301, 124634, 94533, 9419, 75441, 56741, 243625, 2690, 10998, 35598, 155583, 1231, + 7977, 46730, 242597, 113005, 19769, 157864, 288814, 15932, 62922, 81227, 227889, 46275, 937, 11294, 103072, 3033, + 63547, 8861, 201131, 23991, 100196, 49148, 38402, 10013, 26427, 1584, 47267, 32310, 157820, 36321, 1694, 31226, + 20983, 78184, 30061, 128167, 236696, 33413, 66741, 24836, 22935, 47910, 35635, 2678, 8140, 12435, 14911, 265634, + 90315, 43108, 109685, 72546, 156004, 8866, 37002, 25727, 47204, 51301, 8652, 23492, 140973, 93561, 41906, 95949, + 7726, 23391, 15506, 42212, 45097, 63870, 38047, 35887, 52725, 285299, 46733, 43623, 22636, 21753, 30231, 2194, + 23436, 125735, 107590, 55499, 257282, 66930, 21546, 157729, 105247, 1991, 14483, 140680, 522, 45784, 342095, 166370, + 88389, 122072, 21358, 24129, 216031, 54089, 2437, 107595, 202204, 163607, 26303, 41895, 46812, 40542, 78707, 2821, + 211666, 62835, 14082, 13734, 19693, 1603, 38978, 130765, 68828, 24577, 173255, 173341, 81691, 26679, 29930, 3426, + 45925, 32463, 13450, 24567, 11256, 19340, 65455, 7397, 30292, 9441, 111484, 57305, 372, 668, 19439, 90961, + 5236, 16883, 27753, 170633, 167826, 153144, 144689, 113366, 88328, 50765, 15108, 221902, 232776, 9372, 46450, 174146, + 151611, 40736, 105766, 1040, 5360, 65342, 48072, 55757, 82104, 206605, 5190, 78870, 18841, 118613, 78897, 18174, + 80393, 27669, 110040, 158621, 25465, 12309, 35524, 25100, 18285, 9967, 2817, 4292, 20320, 37594, 50630, 17986, + 72377, 87322, 63433, 19730, 31730, 51337, 57653, 20420, 33160, 147371, 43603, 49870, 45803, 188912, 85055, 11824, + 38715, 173010, 66391, 15353, 27705, 31430, 7938, 120390, 37379, 123726, 25452, 345, 24163, 78121, 86371, 105015, + 18360, 52480, 28116, 10182, 103586, 12625, 43431, 22706, 4015, 110996, 23635, 211386, 32305, 9669, 5607, 79602, + 269494, 361, 52000, 75409, 91252, 16133, 217163, 25649, 20080, 40640, 108653, 52740, 36567, 24900, 6769, 93887, + 54650, 108271, 21897, 49233, 7797, 97128, 54633, 281648, 57073, 36934, 16265, 167589, 12650, 209086, 62181, 5542, + 31164, 122349, 81815, 67651, 42209, 38682, 31383, 122219, 6375, 11705, 72211, 22777, 261663, 21772, 69350, 85211, + 105528, 83301, 31254, 16229, 81661, 5138, 1814, 117287, 106002, 97442, 104335, 3000, 2800, 69084, 40361, 68511, + 5375, 11262, 76099, 146464, 17247, 106291, 8526, 92034, 43151, 22579, 624, 893, 4442, 9159, 34683, 63801, + 15727, 54296, 36030, 7741, 194619, 26966, 57386, 521, 100855, 29101, 4249, 27495, 144898, 122045, 157745, 29287, + 62320, 13800, 162790, 170925, 78465, 1720, 58206, 24580, 39929, 46731, 7089, 342751, 16454, 254942, 25844, 55945, + 1967, 64379, 6774, 82424, 28311, 43620, 50135, 58126, 61363, 144665, 15325, 125247, 17219, 113505, 10215, 205466, + 9395, 52783, 670, 169135, 8745, 37048, 58689, 91650, 121445, 22644, 27467, 23132, 76939, 233859, 30141, 24917, + 80385, 41981, 8292, 35986, 162380, 57998, 43320, 212379, 22009, 3420, 17253, 49172, 54191, 105827, 25802, 3604, + 44248, 112875, 6127, 145960, 16299, 26569, 47356, 14611, 122830, 30067, 21003, 192364, 48151, 121965, 327180, 5291, + 74429, 11440, 101228, 65667, 78291, 16492, 12720, 3052, 64755, 138800, 114706, 157348, 14238, 155607, 17452, 62457, + 44966, 41313, 98226, 78628, 2511, 99734, 74253, 133560, 17712, 1897, 74082, 2056, 67954, 146843, 392522, 79571, + 93583, 59314, 13047, 43084, 829, 117157, 13262, 38703, 105899, 54574, 104172, 22161, 49935, 96314, 98235, 13239, + 84750, 53580, 34154, 114889, 11591, 72967, 66707, 104862, 33185, 16902, 22355, 42766, 85447, 36865, 165090, 84531, + 42717, 343264, 93849, 81105, 27409, 214283, 113392, 12987, 208542, 45776, 2947, 83062, 28965, 25376, 24971, 20612, + 62052, 26121, 83563, 52492, 52525, 3766, 104113, 40486, 5597, 125, 87057, 6525, 25694, 6096, 199883, 183361, + 65594, 40734, 5880, 182733, 16343, 16696, 86236, 796, 63224, 7501, 54031, 26465, 276188, 66868, 2954, 84925, + 12475, 2514, 223628, 75284, 9331, 78770, 22694, 89261, 127507, 56323, 111184, 138073, 38522, 10128, 31041, 55066, + 57287, 41322, 157136, 126272, 24128, 75696, 37221, 12395, 133161, 60386, 24760, 6499, 79723, 24499, 76440, 34317, + 105548, 60338, 146011, 210310, 133695, 189639, 390311, 10417, 48917, 77547, 25558, 125695, 27558, 50249, 50758, 54053, + 43278, 12651, 41946, 33000, 46520, 10764, 46950, 144684, 13778, 1185, 25117, 155135, 141954, 96252, 124109, 34349, + 110785, 24351, 73685, 9493, 83366, 25352, 100253, 91513, 17715, 18369, 120888, 58948, 46317, 59607, 74045, 40939, + 105763, 18522, 3886, 43064, 66298, 18918, 33778, 108305, 147013, 68203, 169050, 212793, 41086, 17383, 9620, 80428, + 94180, 46324, 90898, 384293, 16478, 2162, 57856, 4565, 220447, 72458, 27818, 12844, 44611, 11585, 8997, 5614, + 730, 93897, 76702, 124308, 19722, 10289, 81775, 44327, 78975, 156207, 11289, 31827, 117889, 212382, 163177, 125496, + 125643, 46366, 29130, 40665, 26254, 99947, 10510, 84091, 16574, 120313, 104829, 138305, 18480, 16829, 8176, 17121, + 65006, 22646, 16279, 64878, 15806, 31922, 5544, 11868, 38549, 32569, 219914, 15814, 246418, 24003, 289255, 313945, + 46052, 157270, 10005, 11234, 36056, 153288, 35135, 63031, 8440, 1275, 43404, 298391, 34984, 44385, 45317, 14880, + 30170, 5875, 12546, 45344, 3163, 4914, 1779, 66305, 59800, 72227, 6487, 36265, 4458, 73948, 78254, 47797, + 115442, 39130, 71796, 11449, 4283, 30975, 181777, 25982, 41970, 19821, 110322, 41466, 33507, 69754, 31950, 489525, + 104078, 4158, 69239, 85214, 1653, 56217, 34223, 9944, 22, 35473, 131427, 25058, 121158, 166086, 254762, 9745, + 276486, 4120, 33379, 30250, 3655, 10419, 70458, 24518, 6338, 80002, 3419, 4757, 24048, 2139, 938, 210466, + 133421, 8823, 35575, 1574, 23641, 94423, 17694, 113822, 2161, 40833, 49449, 5625, 24422, 131600, 516, 97149, + 36006, 24910, 111249, 104788, 8086, 63142, 17790, 49461, 10675, 30015, 25712, 12492, 181474, 43374, 19331, 39246, + 12307, 170327, 251360, 85956, 29514, 4826, 115208, 41680, 59143, 108007, 189711, 48650, 14729, 28727, 61638, 233522, + 52509, 9807, 83278, 43091, 87128, 205247, 225046, 135671, 122470, 46278, 118683, 70557, 19446, 17394, 5920, 32166, + 80852, 8067, 86203, 410557, 33314, 48904, 140515, 16642, 24573, 28653, 11481, 199433, 119864, 5958, 55298, 63357, + 14237, 29162, 73299, 12246, 9652, 101149, 98618, 9611, 57779, 9592, 43988, 1014, 6612, 44197, 23629, 95201, + 51851, 69205, 94416, 985, 15284, 36462, 143673, 5555, 98871, 71794, 26730, 9761, 90581, 198325, 133604, 57541, + 124466, 1970, 23017, 51130, 156831, 80242, 24852, 47760, 21190, 76629, 13862, 175502, 22015, 38086, 18855, 52006, + 71380, 1872, 6332, 88866, 161906, 10811, 92768, 111004, 87247, 58129, 1244, 4815, 69201, 134735, 125070, 78458, + 18392, 146150, 4886, 66816, 17908, 31646, 6778, 69889, 108470, 70270, 87563, 7366, 72962, 13622, 24210, 28614, + 40719, 40963, 61900, 8988, 14338, 191234, 63447, 35774, 17911, 12734, 257831, 101714, 95260, 78005, 17582, 88301, + 43339, 8921, 3918, 82471, 20610, 84864, 253574, 21526, 78916, 58506, 1435, 433170, 20710, 147863, 28259, 4548, + 72451, 2742, 1878, 156795, 11315, 75234, 32104, 46510, 31448, 15513, 60382, 37929, 17263, 142357, 16994, 34753, + 58853, 23100, 69236, 5182, 178878, 2545, 32116, 152276, 48111, 71595, 81171, 93964, 116002, 57412, 4245, 19877, + 45497, 156962, 37019, 27777, 80506, 87318, 64770, 13895, 82605, 511, 8366, 87800, 85880, 42605, 80454, 41603, + 36300, 27404, 35498, 82743, 121755, 128166, 1871, 81847, 25215, 5995, 60337, 177660, 36118, 54831, 138197, 126290, + 301929, 182977, 7852, 68390, 88728, 111588, 58683, 115820, 405223, 74730, 36671, 37592, 276136, 25471, 6538, 61648, + 553, 89532, 101905, 27590, 34704, 25370, 42539, 28362, 212438, 98439, 26905, 4804, 49970, 60393, 7138, 33248, + 78329, 11555, 51340, 1796, 922, 17391, 17952, 3534, 20711, 34703, 17977, 21148, 25036, 13075, 6201, 46397, + 257130, 73731, 213188, 107033, 38295, 56744, 112539, 122324, 145369, 82762, 25343, 6279, 18128, 140444, 35989, 4474, + 15385, 14399, 63732, 21041, 30829, 98387, 31090, 23985, 55656, 40284, 132675, 4230, 48345, 64072, 15739, 73537, + 8012, 23754, 107906, 9060, 3561, 183232, 19339, 47011, 28004, 85175, 46305, 37773, 122041, 123221, 100918, 79448, + 192900, 143005, 63829, 123166, 58338, 37779, 33298, 109415, 112508, 115553, 68473, 184384, 41085, 82184, 61692, 70292, + 29976, 115765, 134590, 89398, 87040, 39038, 8330, 21348, 47117, 350, 39878, 167810, 23905, 28058, 42971, 15124, + 4336, 12612, 11230, 18966, 92061, 59408, 12185, 128370, 138880, 27612, 27335, 49677, 97407, 61794, 36678, 60848, + 42083, 7518, 89496, 57735, 172121, 74451, 85960, 144252, 6256, 110791, 28888, 21551, 25192, 5388, 80308, 1657, + 172671, 42290, 48, 195157, 5, 36476, 3307, 89171, 93568, 157177, 7894, 66766, 1420, 14058, 167637, 30649, + 12677, 121794, 69021, 13231, 31605, 230408, 111760, 67720, 56743, 31038, 52660, 61746, 40620, 81659, 12864, 14180, + 6015, 215868, 90084, 141993, 78415, 116175, 52441, 7100, 231077, 112914, 39586, 51204, 31298, 52736, 64704, 3584, + 80026, 74802, 13972, 215480, 13902, 93110, 20076, 42335, 19048, 41860, 36983, 169990, 24924, 34272, 70434, 20851, + 170586, 12670, 115383, 39041, 32955, 71225, 30362, 30667, 176119, 19860, 10392, 48818, 87859, 7042, 111918, 36751, + 36731, 38144, 156426, 19519, 6773, 997, 29515, 53787, 27711, 72672, 159500, 51584, 24658, 40226, 1920, 70951, + 26475, 36713, 58252, 424939, 115216, 30508, 35252, 34310, 133207, 17198, 8490, 5530, 93250, 121485, 122775, 66308, + 95820, 59227, 108938, 53397, 88522, 6280, 46904, 14869, 8317, 25024, 14413, 98196, 5714, 54882, 8596, 43570, + 124047, 5657, 302052, 35, 55219, 105769, 57203, 241055, 86860, 6086, 121752, 2426, 19677, 79643, 9137, 36087, + 23961, 35741, 73879, 95404, 22928, 151374, 193172, 7078, 162209, 225418, 143148, 102355, 8904, 2279, 71928, 20953, + 225992, 15275, 49711, 9805, 359835, 337011, 5747, 97478, 56084, 49629, 13712, 164652, 96201, 13261, 69730, 256157, + 29392, 154079, 57395, 17417, 96558, 76159, 89103, 154068, 86071, 1481, 55845, 81677, 93643, 5966, 26830, 128209, + 55114, 215629, 46997, 96220, 13347, 6748, 42089, 26362, 8183, 87721, 60802, 344, 95129, 74756, 31533, 58592, + 82012, 86256, 21385, 21021, 2017, 92952, 1400, 17103, 123336, 48961, 24557, 31513, 34219, 94330, 102993, 33899, + 115554, 104210, 45821, 24373, 157159, 41476, 17357, 64836, 47747, 20243, 42746, 100702, 101684, 123185, 46219, 68593, + 41008, 135956, 132526, 17386, 18735, 62966, 27255, 23471, 193781, 89150, 2616, 57853, 104151, 97883, 5292, 181287, + 226906, 114560, 56557, 82841, 7552, 61282, 26131, 1854, 179874, 74127, 60152, 17376, 124113, 21668, 38821, 14260, + 31159, 29704, 82096, 204953, 21162, 64569, 11540, 37899, 44010, 79498, 50023, 39667, 14771, 235800, 15254, 382134, + 51268, 23240, 7289, 52385, 166128, 26710, 362, 239, 31382, 28658, 33227, 60344, 73124, 109004, 15966, 14103, + 77438, 65958, 13394, 9762, 92830, 27896, 85690, 1773, 205709, 14225, 11061, 49451, 12113, 15690, 22771, 34399, + 1292, 19355, 16900, 29634, 38937, 103120, 45588, 32502, 13114, 67382, 21201, 255605, 1334, 78137, 14683, 11960, + 2118, 39992, 111625, 158751, 15597, 47752, 3544, 123201, 69581, 8045, 25346, 232034, 14449, 70422, 61526, 29243, + 21934, 152079, 39735, 29007, 76618, 18195, 153, 27890, 48728, 123899, 68311, 48831, 67038, 51326, 21747, 8692, + 14967, 4922, 69709, 15204, 51495, 12889, 28173, 172579, 24243, 12645, 53481, 33706, 87736, 191077, 102314, 17799, + 147249, 101629, 161049, 4798, 26720, 45298, 89381, 76472, 11119, 5522, 59583, 86712, 46063, 85661, 57137, 135132, + 43749, 6257, 4316, 44510, 5843, 232177, 211804, 97913, 44147, 11778, 5876, 129172, 152629, 43931, 5404, 124003, + 133428, 97102, 297704, 57882, 65703, 24717, 67411, 23646, 14269, 13007, 172728, 4591, 45604, 57195, 12344, 102301, + 57982, 61997, 95626, 254590, 28672, 129544, 121196, 14934, 55616, 30754, 102467, 25644, 45957, 41766, 105011, 59359, + 8438, 80296, 37663, 282854, 95433, 8167, 10263, 65749, 37698, 56099, 237283, 25218, 220862, 42641, 90021, 45007, + 132034, 144203, 26155, 333670, 39456, 63723, 27049, 39050, 61870, 153573, 33337, 28349, 4161, 99154, 19089, 5689, + 26501, 109719, 53252, 38261, 73560, 103696, 77848, 61508, 56418, 8146, 14836, 30856, 9845, 7946, 131232, 127176, + 4654, 201895, 63780, 158964, 20916, 91453, 54086, 43072, 10456, 54618, 169463, 46325, 88920, 26447, 165915, 26064, + 119358, 76658, 11753, 55625, 9015, 143112, 11520, 269278, 65931, 121236, 179251, 9829, 96507, 5826, 140198, 247937, + 48029, 31061, 53953, 19018, 38534, 95073, 117331, 37745, 21676, 22041, 2439, 21017, 109081, 12120, 58943, 64119, + 43078, 113509, 30640, 37723, 34943, 350151, 79862, 143849, 25089, 16375, 77573, 16298, 6131, 168435, 40662, 50028, + 28766, 133195, 55258, 45544, 23665, 5135, 54199, 18072, 5477, 69501, 106176, 37245, 10255, 33562, 9039, 7013, + 16695, 29965, 30540, 106269, 67, 100273, 20386, 1936, 45778, 12653, 4617, 22300, 42443, 22525, 113152, 24837, + 42770, 66959, 15725, 52734, 29534, 17022, 9388, 334890, 23733, 206600, 71568, 50746, 100513, 18013, 100834, 84319, + 62617, 6595, 63101, 185162, 42630, 76641, 44106, 8165, 48746, 35407, 72152, 6974, 14191, 19580, 30414, 39009, + 43753, 63797, 66647, 169610, 50295, 55790, 16670, 533, 26007, 9475, 35597, 110160, 8792, 8536, 3652, 9896, + 57243, 120506, 37648, 15821, 43119, 131495, 35035, 35955, 54725, 31031, 169396, 210089, 164253, 337858, 11412, 77625, + 58250, 28866, 7134, 37720, 112304, 27676, 81211, 65246, 131796, 9378, 94506, 130705, 25165, 11403, 69938, 129091, + 4651, 244698, 53305, 17335, 3188, 92793, 26039, 26725, 24831, 59363, 5282, 15758, 47748, 53877, 45181, 5916, + 3705, 107447, 7016, 107200, 19540, 12998, 120359, 9387, 13211, 7466, 190758, 9392, 102095, 49540, 128418, 188973, + 5593, 50901, 14566, 196318, 18699, 54825, 76278, 15395, 23666, 36000, 88985, 17589, 32005, 23087, 139968, 578970, + 117571, 145460, 7486, 3620, 33541, 117754, 26398, 27210, 60584, 18889, 20981, 1633, 74573, 38616, 14563, 6638, + 86311, 86421, 25557, 42564, 99443, 36130, 97953, 1966, 25172, 83868, 86024, 136991, 27222, 67124, 48935, 48573, + 168938, 36866, 83414, 114109, 7143, 95340, 16828, 116402, 11853, 106392, 138070, 12698, 53560, 1736, 35550, 51146, + 18834, 22574, 37375, 59652, 19960, 32431, 133520, 38384, 86522, 43137, 73914, 52482, 28217, 63002, 68351, 25347, + 53266, 74606, 37238, 64254, 117700, 2458, 69113, 78847, 72989, 4146, 51993, 68944, 34323, 36106, 26991, 208463, + 18721, 68637, 46037, 28177, 66450, 253789, 306984, 98512, 34346, 8717, 62376, 80238, 74056, 18246, 43446, 108106, + 47217, 76264, 2682, 95684, 202002, 20293, 22279, 7215, 46269, 5998, 50165, 12049, 9429, 33348, 73125, 27380, + 68582, 110884, 139796, 3350, 75458, 44810, 10607, 55791, 37823, 11401, 91589, 124148, 82843, 54499, 20716, 21192, + 96652, 231365, 33208, 41522, 32549, 31981, 17453, 40828, 145144, 39135, 25049, 9457, 27958, 103347, 32810, 43385, + 19820, 53369, 126180, 23951, 158086, 100187, 144947, 109980, 31955, 45450, 139714, 69089, 201406, 73593, 53985, 61675, + 135379, 20004, 20095, 20957, 31207, 58416, 82105, 5163, 192545, 15375, 4004, 14708, 12950, 3007, 33958, 201104, + 51704, 2284, 38239, 215421, 9094, 170678, 68181, 18112, 248263, 264771, 5839, 65767, 76147, 15661, 13266, 123439, + 64028, 57174, 54342, 51589, 110009, 117919, 35421, 20125, 79407, 23467, 26562, 86760, 89345, 56253, 2635, 17366, + 99284, 45433, 65377, 5392, 223492, 45675, 94215, 41399, 47966, 42373, 88171, 10577, 26848, 109297, 198937, 27318, + 15359, 54014, 189688, 14526, 201137, 11181, 412, 4944, 2861, 25417, 41460, 9506, 110507, 60409, 42693, 128247, + 71231, 35793, 106673, 70206, 72297, 6817, 45319, 20585, 31851, 34828, 8363, 13868, 118777, 52262, 102674, 38674, + 71039, 6463, 77753, 62309, 151051, 2673, 41364, 138637, 240855, 165918, 128697, 37821, 16333, 64502, 32568, 39824, + 50766, 20504, 14159, 65654, 14727, 29996, 6383, 42332, 7939, 14308, 8137, 87581, 4149, 88311, 165279, 7060, + 80908, 49103, 28053, 140076, 418780, 65632, 172843, 34187, 88378, 29997, 545, 51172, 59276, 68688, 2146, 123301, + 1327, 93829, 16449, 152910, 7284, 87749, 23351, 3595, 38576, 26223, 23401, 96146, 79814, 32323, 172636, 114120, + 65820, 86539, 208708, 76711, 42199, 188268, 14011, 15148, 84860, 5832, 11441, 25143, 49574, 6953, 65399, 115759, + 62596, 134341, 14187, 18034, 12396, 16855, 108734, 25950, 70598, 14918, 2364, 136070, 40117, 153617, 71320, 115693, + 8648, 35391, 90995, 4541, 7994, 124212, 8041, 66856, 16836, 11191, 127474, 68712, 7630, 145631, 110314, 22066, + 14047, 1763, 167187, 174630, 359699, 151667, 4177, 41323, 106878, 1793, 50733, 69877, 525, 43923, 4828, 82891, + 29037, 171929, 324595, 65899, 28064, 91271, 65021, 37989, 13380, 31251, 2943, 407325, 11675, 18588, 38513, 8392, + 50669, 90077, 18495, 9450, 74216, 88387, 45547, 81293, 103539, 10795, 31036, 15610, 180314, 21332, 14365, 65897, + 27449, 55756, 149067, 98485, 56299, 40269, 32366, 5250, 172344, 23410, 92231, 18561, 19274, 44347, 122063, 8423, + 7301, 50878, 28326, 38202, 246099, 51058, 26017, 51056, 32043, 22795, 132375, 6943, 19422, 300889, 9098, 15040, + 36506, 86225, 57465, 49440, 129317, 12410, 39803, 48164, 6806, 32451, 82234, 132656, 30140, 15444, 60069, 3760, + 4614, 25897, 159, 28083, 46639, 99936, 101909, 30904, 66926, 173983, 34220, 17421, 932, 21450, 21043, 113018, + 86600, 42052, 132088, 128256, 6322, 29, 124274, 99295, 27847, 33188, 30130, 20462, 233103, 11629, 82802, 23282, + 10541, 47224, 44501, 68401, 39025, 9673, 4555, 49397, 34887, 30110, 111225, 33589, 3517, 74773, 153162, 143852, + 12972, 3530, 24385, 31076, 26220, 32573, 109457, 43144, 2031, 300519, 129953, 9238, 66561, 196911, 99752, 28368, + 115015, 174339, 10996, 8970, 48658, 26284, 10285, 694, 47596, 445399, 13275, 71937, 12714, 3528, 3654, 55811, + 33845, 269037, 118340, 11397, 3893, 231593, 42203, 2159, 16165, 12556, 48781, 72601, 35237, 68887, 18601, 17941, + 89983, 98696, 97413, 15006, 14769, 128317, 13607, 7426, 11962, 244737, 72660, 28493, 147224, 62322, 6982, 534, + 47018, 29199, 6388, 175620, 29977, 6018, 54235, 144119, 27979, 74453, 23072, 144966, 5552, 22680, 20439, 8839, + 82338, 13010, 108618, 484, 86023, 58829, 612076, 306318, 131368, 51558, 106387, 22146, 1218, 11904, 270917, 87286, + 24853, 275508, 76852, 5463, 237840, 82183, 83602, 44537, 132193, 104954, 15511, 29147, 15455, 20088, 13624, 153696, + 40873, 90309, 158996, 18756, 3668, 32089, 12462, 41486, 65351, 2207, 126590, 223126, 53388, 4077, 30070, 25994, + 15229, 66181, 63714, 79318, 59889, 160619, 15153, 12456, 272245, 42542, 24758, 47453, 47934, 65558, 28145, 13413, + 11858, 21191, 27021, 92095, 34347, 32570, 60556, 197324, 18038, 189522, 6812, 28247, 90853, 8634, 180039, 7329, + 86981, 148519, 9289, 4888, 300602, 74523, 9708, 49271, 19343, 63426, 74637, 174896, 114181, 28008, 26788, 8747, + 29362, 99695, 52578, 19976, 84921, 101587, 24035, 38033, 6095, 92547, 36135, 72547, 106059, 4207, 181943, 47905, + 79472, 24101, 58847, 20037, 38015, 46536, 22348, 109208, 1206, 45622, 37915, 26165, 48741, 7232, 1005, 4065, + 6208, 18528, 151721, 62784, 80000, 18897, 5854, 32564, 21916, 4912, 34596, 201975, 17423, 134488, 54506, 154143, + 6002, 72868, 7520, 36987, 108083, 112937, 75656, 32044, 24479, 15432, 40091, 58495, 34931, 9602, 16762, 2442, + 56661, 153348, 22812, 98811, 9511, 56184, 1409, 9718, 26995, 197, 49218, 167694, 100694, 44775, 53809, 6725, + 163853, 45609, 73, 98613, 35997, 111306, 20357, 36132, 81254, 14735, 26511, 23223, 58321, 51715, 70878, 210120, + 18919, 64042, 68936, 3280, 171890, 120028, 29382, 125765, 86877, 48327, 105142, 9969, 91341, 1198, 32748, 184452, + 74503, 188311, 34295, 291694, 70477, 184873, 41972, 33654, 53412, 208288, 120160, 47130, 7027, 83694, 30556, 99012, + 59281, 43841, 119720, 74947, 39892, 41874, 34808, 25853, 131302, 33590, 193671, 96415, 5864, 20797, 100412, 35136, + 15947, 13143, 28210, 28717, 61301, 5772, 22132, 12722, 67466, 83494, 128102, 176810, 162369, 37658, 11958, 19685, + 47956, 184482, 37093, 126586, 27874, 17054, 70824, 29354, 35624, 31284, 22117, 80846, 282324, 89289, 5812, 73290, + 21270, 10973, 15681, 37548, 111847, 5570, 82, 33861, 102548, 16344, 48551, 72401, 41482, 3442, 126293, 65750, + 30955, 205735, 76092, 105960, 116737, 54740, 25948, 8274, 28264, 111716, 110866, 252362, 8592, 112975, 10016, 89336, + 55458, 589, 60295, 9209, 22301, 101479, 32825, 64171, 75090, 106896, 21619, 26306, 29821, 30637, 132744, 10929, + 30697, 32956, 20307, 87608, 51709, 317395, 23678, 60949, 3041, 29345, 83581, 57276, 19208, 37802, 184528, 147377, + 8038, 29282, 6679, 77158, 24634, 288107, 38217, 32048, 30467, 20308, 11907, 18988, 87509, 32192, 89058, 11561, + 126428, 56272, 20368, 65338, 19389, 104074, 29008, 8049, 18814, 30607, 339, 30562, 152686, 25435, 16446, 78067, + 20701, 122568, 3475, 39655, 83474, 29081, 12004, 116638, 45832, 69977, 107214, 46129, 80891, 11087, 68939, 167121, + 105808, 42070, 117480, 33702, 11378, 25602, 124846, 110381, 153223, 6672, 47709, 80371, 120770, 144580, 11225, 22744, + 98186, 104427, 63765, 10261, 150633, 71276, 51714, 50281, 49838, 190522, 8987, 235791, 9141, 340331, 86198, 17567, + 12755, 210134, 6552, 101752, 30962, 11267, 2982, 8706, 5260, 70113, 110165, 127201, 74490, 93235, 145772, 35288, + 21256, 25014, 2694, 10842, 31678, 81199, 6436, 125138, 65062, 15308, 22318, 40788, 33326, 9604, 145295, 6946, + 289838, 90308, 57283, 326774, 187831, 2998, 288064, 53373, 20595, 188251, 90664, 58927, 89768, 31693, 379534, 13903, + 2805, 2413, 50298, 47087, 58535, 56981, 59799, 135588, 10844, 74455, 88188, 208971, 70085, 92510, 18541, 33920, + 12090, 24555, 11134, 23200, 2451, 6275, 135010, 5521, 138068, 6562, 161302, 44283, 98544, 214408, 65576, 50058, + 24461, 33687, 92862, 93762, 4511, 119896, 3685, 41519, 6754, 9529, 39394, 26959, 41684, 51727, 24271, 311732, + 28203, 45748, 149989, 111355, 3383, 25313, 20209, 43668, 65355, 32723, 38554, 67434, 82833, 42676, 66416, 25724, + 30161, 223, 65165, 45436, 83924, 12283, 5232, 29830, 234361, 377903, 56, 40022, 128424, 12472, 60521, 234629, + 28921, 22232, 168965, 36999, 222594, 73040, 24600, 9682, 33975, 51080, 29219, 59847, 125491, 30202, 38650, 136512, + 34069, 20533, 23257, 105963, 11508, 99917, 34237, 4124, 67464, 43359, 26791, 158522, 144226, 38792, 18892, 13958, + 41850, 71554, 19647, 61226, 98703, 124759, 9034, 30539, 34371, 467683, 65227, 93480, 7901, 29703, 82581, 17619, + 21254, 2734, 102235, 57361, 38398, 17196, 96566, 364971, 65651, 28491, 137653, 151200, 23549, 142281, 78664, 84647, + 53883, 34900, 46611, 40124, 213340, 36091, 30841, 52023, 123269, 197827, 78380, 73808, 12028, 41191, 45370, 7903, + 71764, 90561, 215513, 34771, 177701, 60585, 32517, 25415, 28758, 92364, 41752, 82386, 2623, 76984, 37173, 50064, + 68395, 4403, 43681, 98106, 11549, 37791, 53245, 104084, 15232, 60303, 28511, 10160, 68603, 161243, 108330, 25902, + 9660, 75510, 24031, 320844, 63116, 2614, 187946, 158900, 36079, 32819, 38425, 121867, 57093, 51652, 80423, 5684, + 31198, 161752, 36511, 146667, 20475, 4507, 19201, 30548, 48467, 78275, 5077, 22549, 89984, 50714, 17018, 66706, + 35619, 3531, 25020, 48835, 186847, 9463, 64248, 111387, 107469, 188636, 105137, 8703, 31389, 34009, 25409, 14207, + 43631, 12180, 14360, 89167, 73867, 33692, 74998, 119507, 41949, 87046, 115959, 6770, 68841, 6405, 81460, 142743, + 114250, 5132, 148038, 8544, 1605, 15540, 177509, 152453, 30564, 1453, 29815, 32099, 63403, 6534, 252, 11418, + 17588, 16946, 116226, 138792, 27680, 101291, 14339, 60744, 99533, 194699, 39891, 16377, 12641, 146345, 141037, 140957, + 70325, 42826, 21176, 19061, 50428, 51375, 12005, 144174, 73426, 76160, 46461, 129205, 78379, 40145, 25474, 133179, + 11855, 127230, 80781, 27661, 91651, 396, 88286, 34420, 72081, 94521, 46013, 15403, 91720, 88126, 549, 79964, + 60198, 187793, 125452, 14392, 15743, 83700, 1715, 206475, 12065, 22805, 39201, 63593, 83398, 13801, 99478, 25699, + 140046, 83985, 258911, 49800, 6761, 9435, 74133, 1389, 46598, 11934, 27897, 5386, 45900, 50232, 260248, 96335, + 5068, 33530, 49692, 65315, 886, 4019, 40151, 91916, 62448, 10628, 13597, 141884, 148968, 14083, 261394, 163080, + 37347, 1276, 8705, 22521, 19405, 57813, 55957, 200637, 9680, 15938, 42198, 23570, 15819, 166772, 12555, 37690, + 43496, 26735, 63396, 52654, 63370, 37031, 83006, 34067, 75667, 18077, 2332, 127143, 163700, 1741, 5704, 42945, + 37639, 34898, 146321, 65846, 3633, 51526, 134217, 9232, 22774, 1772, 66282, 48233, 34341, 21424, 13242, 67351, + 183131, 97579, 28584, 80530, 134335, 38834, 239665, 40278, 37200, 32081, 53185, 40538, 23915, 99449, 228969, 44407, + 7054, 17139, 112714, 101287, 14194, 810, 50801, 33269, 12970, 136397, 73876, 55691, 26438, 37462, 53441, 99617, + 13350, 42910, 71168, 42168, 285521, 78217, 93695, 78702, 25594, 30022, 14876, 15315, 8219, 34298, 97268, 51265, + 104410, 54835, 26232, 30943, 91039, 14205, 64948, 29544, 168804, 48486, 75782, 69559, 138480, 99517, 39422, 37873, + 149734, 38422, 8276, 41956, 15907, 166205, 43978, 154555, 33818, 28840, 72911, 38423, 61132, 39592, 4460, 37088, + 60082, 68918, 21965, 92108, 4622, 33094, 35917, 111849, 110187, 72295, 8713, 89755, 56736, 89742, 45364, 6790, + 13551, 41957, 48065, 8188, 73571, 62739, 53050, 144415, 3945, 24426, 98109, 2450, 73463, 37147, 78116, 9166, + 65498, 53281, 120479, 179965, 17758, 17052, 32430, 155198, 263266, 84633, 148996, 48061, 17593, 38254, 12219, 51478, + 2710, 95095, 20869, 29664, 27585, 13750, 47237, 181, 54469, 7569, 60566, 139036, 38200, 24063, 28235, 13774, + 45367, 230307, 83783, 80750, 63754, 95816, 43190, 122385, 28881, 144847, 15520, 94088, 3473, 3890, 113331, 76118, + 12791, 80924, 118713, 104097, 98287, 136037, 67781, 10689, 31895, 90630, 43079, 93377, 65787, 61758, 1438, 103534, + 4463, 1696, 4512, 120430, 94536, 3019, 34145, 75690, 24951, 53596, 58640, 11685, 36332, 6632, 11422, 26659, + 59901, 43634, 130651, 33557, 28803, 3908, 133680, 9899, 52130, 43287, 6912, 145132, 86403, 177137, 54381, 65649, + 7668, 54154, 97545, 91326, 181822, 89508, 11188, 2346, 74831, 83183, 79610, 47140, 18977, 54325, 82292, 130974, + 9850, 25539, 71074, 133045, 177206, 71768, 81956, 28358, 145485, 83131, 29163, 37533, 109798, 18435, 75184, 52995, + 7292, 6596, 2251, 2040, 31421, 19770, 177982, 2607, 26280, 108782, 69065, 324604, 77211, 91802, 37849, 30896, + 58511, 49307, 70, 135829, 12507, 6486, 51031, 9444, 127004, 180981, 46202, 95370, 11113, 65980, 7248, 1864, + 147, 52898, 183217, 22572, 8729, 6517, 4166, 36847, 56208, 124700, 29553, 6200, 43066, 1797, 193216, 150871, + 79926, 125256, 38154, 42479, 129937, 102831, 33444, 18931, 31345, 35792, 147516, 19534, 83947, 136739, 76241, 217709, + 39915, 177, 29612, 176661, 46146, 21703, 66186, 15852, 98763, 32284, 15029, 20993, 42566, 64132, 96065, 164496, + 1337, 178401, 214897, 22155, 13192, 119711, 143143, 16098, 18323, 17920, 25851, 94549, 105163, 90198, 141550, 124816, + 80570, 619, 144085, 9847, 117753, 16770, 4661, 55732, 16555, 15568, 31762, 116903, 72883, 28446, 93397, 75397, + 11077, 69803, 1471, 136227, 159438, 102246, 162403, 73442, 40764, 27766, 18455, 53335, 70933, 12129, 19140, 50321, + 83329, 51649, 28681, 50106, 26066, 38874, 69436, 77741, 12276, 9285, 17504, 16474, 72059, 83880, 8401, 45101, + 21655, 38252, 34797, 40053, 173836, 50540, 136368, 21585, 126713, 108126, 142751, 130247, 69454, 55138, 130179, 11656, + 153482, 162984, 158538, 204679, 91585, 26846, 8149, 101037, 70644, 67384, 153679, 898, 102558, 18887, 79015, 11681, + 110483, 73677, 144865, 17407, 6764, 28552, 369746, 32468, 127864, 203511, 3905, 45256, 190133, 1948, 85436, 54536, + 3961, 2931, 39613, 52497, 101798, 205435, 63769, 13356, 20945, 11390, 155312, 107698, 71138, 12797, 29367, 1300, + 82402, 758, 56650, 14527, 90884, 61424, 194495, 23078, 69669, 175831, 14654, 112729, 44753, 232898, 27621, 99382, + 923, 15115, 22648, 45524, 16939, 11503, 36250, 76362, 59700, 80609, 92077, 81783, 164258, 147964, 93190, 7889, + 25969, 56255, 28476, 115588, 27082, 3104, 94697, 68454, 31399, 203455, 22046, 2756, 43846, 6544, 135118, 128148, + 6306, 148500, 12599, 221886, 246093, 1024, 14109, 13441, 51342, 119104, 168855, 81131, 6153, 19221, 79225, 10911, + 151581, 83444, 1795, 32090, 202801, 86292, 19784, 98045, 182731, 14239, 27382, 126354, 56475, 255797, 116118, 46059, + 162188, 48612, 34197, 23712, 89426, 12944, 100256, 15683, 141356, 108578, 128764, 17528, 14355, 271397, 13200, 5464, + 121815, 164025, 19217, 145007, 27536, 7271, 11898, 27670, 28023, 2020, 34004, 98721, 65257, 221829, 10108, 127625, + 77523, 58581, 6914, 16301, 106668, 18199, 38690, 49947, 127314, 61987, 99158, 82020, 24947, 156159, 44297, 40008, + 12790, 191552, 58504, 13996, 38796, 49380, 31168, 78568, 169698, 51893, 110268, 42109, 23555, 66459, 81335, 350079, + 10725, 11018, 5903, 58576, 44573, 24333, 7047, 15096, 183083, 25940, 30092, 21527, 42088, 116405, 102404, 91943, + 62716, 83434, 46226, 148493, 43265, 16668, 120110, 99611, 105958, 52689, 135626, 19929, 32050, 1904, 97814, 125365, + 44067, 3747, 104, 129365, 50118, 22240, 81479, 147860, 1668, 21592, 58604, 65314, 3874, 8813, 70784, 17613, + 6243, 68475, 42985, 56814, 318411, 38092, 62855, 5047, 16599, 38145, 13374, 7748, 1691, 63047, 41837, 61793, + 94999, 40014, 231631, 149314, 52378, 17178, 17229, 20594, 28671, 298969, 135669, 3725, 216728, 259469, 33874, 54293, + 123258, 45594, 132414, 22290, 2059, 129670, 52137, 8994, 34969, 83516, 130408, 123610, 69225, 72380, 90635, 47420, + 5913, 73509, 87799, 48451, 136280, 23988, 84653, 34987, 171443, 34172, 131573, 84347, 141515, 41151, 84753, 26644, + 91662, 53005, 30287, 228834, 22175, 85089, 5937, 239357, 135282, 3173, 18644, 35622, 80020, 157977, 39079, 53223, + 92270, 5878, 10450, 49133, 1663, 158962, 37020, 13802, 4808, 5512, 34099, 49182, 4482, 39140, 15951, 157071, + 3495, 132172, 122206, 61350, 34691, 43353, 72769, 74295, 5226, 24234, 167378, 239776, 109830, 5797, 48280, 64587, + 108512, 25762, 31651, 55137, 17342, 87230, 251069, 6824, 107488, 60185, 7752, 14553, 11606, 10402, 78777, 6829, + 123190, 43191, 33662, 9649, 100247, 18941, 76354, 27419, 29666, 53504, 129460, 19206, 146527, 208486, 41206, 37237, + 113014, 30191, 57416, 5183, 15794, 3541, 57420, 56560, 30894, 159900, 136009, 282298, 13224, 83119, 1406, 240017, + 39585, 278159, 141007, 6722, 243192, 21680, 25193, 108277, 22351, 16677, 47180, 52089, 9903, 7391, 70117, 38331, + 7836, 20514, 176863, 391251, 47699, 49431, 142718, 40783, 11078, 28591, 32120, 38193, 25468, 458592, 10098, 64745, + 122291, 62128, 93822, 46652, 48821, 13662, 8766, 17814, 26780, 22468, 135823, 124150, 122679, 129978, 11360, 113819, + 75521, 58918, 556, 93437, 81450, 77000, 1965, 3492, 630, 26563, 1323, 193118, 4895, 24288, 80421, 444827, + 92900, 45787, 6232, 74549, 55074, 50259, 138542, 21220, 74293, 14177, 86741, 20845, 17441, 108129, 27172, 542, + 3563, 94640, 76494, 41896, 111657, 28829, 50902, 406663, 103102, 28983, 10586, 120692, 51613, 18805, 55476, 84562, + 12318, 65439, 2351, 47041, 52370, 9553, 44550, 41604, 36191, 65155, 67526, 37264, 68245, 92017, 11541, 5842, + 34269, 49599, 26889, 59557, 40445, 96850, 1906, 135711, 41354, 29246, 70287, 106660, 122901, 45259, 163034, 61670, + 168604, 10682, 20890, 849, 182500, 7459, 14076, 9824, 62012, 17393, 43, 55379, 42557, 140724, 7749, 356813, + 11259, 106585, 56522, 117254, 24428, 38947, 52871, 9300, 115113, 20086, 55518, 10927, 86345, 43851, 62143, 258476, + 12362, 54324, 10491, 40991, 3909, 2374, 125973, 57172, 78430, 341578, 168875, 62670, 86852, 965, 24965, 107818, + 134602, 52017, 33775, 46473, 20459, 104509, 19147, 87014, 47853, 11886, 132839, 96809, 93879, 110609, 2365, 42726, + 22577, 113945, 93171, 450, 7659, 3573, 145278, 14002, 3688, 110115, 18705, 53062, 38555, 47342, 56746, 32670, + 13349, 82125, 43630, 40651, 17381, 125502, 22739, 15199, 56715, 30071, 11269, 266060, 91639, 147670, 110730, 156150, + 12493, 33798, 89625, 209004, 10895, 25311, 4017, 18602, 92438, 67412, 21469, 72030, 3142, 102900, 25974, 86939, + 37057, 197485, 58360, 6360, 28928, 67451, 1402, 28843, 2004, 88633, 12339, 31963, 36427, 70216, 321089, 288373, + 32268, 39008, 31413, 4740, 34222, 25661, 12279, 159470, 209974, 23583, 8221, 132962, 10432, 69680, 62983, 6760, + 67436, 1150, 66541, 28543, 41989, 120559, 51407, 30922, 173518, 85466, 111050, 56221, 107930, 37565, 20849, 48181, + 27079, 107182, 10708, 12667, 62729, 131120, 15921, 10076, 30908, 157502, 94904, 63675, 55558, 6617, 226273, 5180, + 5828, 56366, 13233, 61985, 45031, 2485, 22785, 51794, 14902, 861, 156114, 2399, 53546, 23616, 69393, 128641, + 8204, 20069, 47928, 35756, 144263, 9724, 28168, 77879, 60255, 33275, 30360, 2287, 14520, 139561, 6919, 38561, + 88212, 24401, 123947, 83850, 86582, 113753, 3963, 10915, 109589, 75957, 42047, 64212, 69356, 27884, 27654, 1658, + 8064, 25531, 26396, 43262, 47449, 97110, 27015, 18044, 8505, 10820, 6910, 15078, 66558, 7192, 123575, 29932, + 16886, 91286, 41104, 63367, 4844, 40914, 6359, 2845, 52817, 88113, 9448, 105511, 111260, 18932, 4691, 644, + 215129, 199771, 74460, 6293, 12941, 6495, 60621, 58746, 91118, 57660, 16174, 48568, 650, 73549, 62408, 27497, + 20770, 36570, 9691, 125291, 6273, 101510, 524873, 63355, 73089, 27873, 22599, 43928, 40618, 143174, 44188, 54641, + 62790, 6716, 38742, 153007, 2873, 81054, 70058, 28685, 35002, 11708, 63922, 161592, 14023, 143955, 9908, 36977, + 97208, 20840, 37740, 37328, 19386, 541613, 59406, 52601, 102646, 24863, 90336, 108723, 36993, 19256, 90283, 156507, + 143736, 226099, 22792, 168470, 135457, 88686, 29520, 41685, 35385, 2247, 53884, 7914, 113601, 25634, 168024, 22823, + 17893, 52964, 50078, 12971, 32627, 62833, 89378, 69345, 84439, 2950, 9888, 39211, 100619, 12559, 4264, 76283, + 56016, 57173, 88905, 18361, 6581, 257, 15516, 18690, 57264, 152842, 167732, 100142, 172160, 177909, 114841, 98812, + 45452, 152952, 146652, 57421, 111710, 33059, 83570, 109892, 203627, 18224, 13991, 11405, 70131, 54654, 97200, 72333, + 24100, 8565, 145467, 50366, 322787, 161041, 7765, 55869, 1996, 48165, 55619, 16231, 35665, 59444, 21986, 40608, + 70078, 91914, 2452, 11983, 22358, 46435, 113727, 327395, 90922, 54176, 3637, 61910, 83658, 16892, 19473, 12132, + 4097, 10440, 32159, 34709, 63200, 53820, 279, 973, 58499, 93368, 5855, 59071, 14542, 256523, 219447, 28720, + 99153, 71396, 152707, 32750, 52159, 60623, 48369, 4454, 6615, 260119, 265523, 24086, 12414, 92637, 2799, 18610, + 63415, 61306, 58297, 80979, 31986, 68389, 34835, 18261, 16823, 2311, 3203, 19828, 1579, 46046, 15243, 2581, + 65405, 113461, 50646, 108814, 137809, 9153, 82289, 158349, 8841, 506, 90330, 157611, 16898, 11327, 131769, 58320, + 48082, 107069, 6787, 166721, 259, 104429, 47373, 3486, 33014, 21454, 101722, 52682, 42375, 100879, 112359, 120319, + 12260, 113213, 38656, 173961, 179850, 20340, 86000, 5336, 73667, 112248, 73424, 114676, 91389, 8255, 33598, 167952, + 98882, 313221, 141752, 163735, 29532, 164918, 42180, 9328, 17311, 38319, 9180, 42386, 326251, 90818, 26426, 1863, + 41092, 62380, 5374, 95142, 5928, 29861, 105695, 62497, 20742, 89078, 12203, 22276, 44964, 36172, 50837, 165289, + 48019, 238111, 16046, 2104, 43505, 99836, 25312, 70749, 9317, 81582, 61952, 108704, 49265, 17815, 1489, 44835, + 6643, 115249, 21605, 88265, 192712, 163219, 138454, 48414, 48424, 56847, 23238, 77168, 163487, 20527, 64335, 82158, + 19861, 31349, 187836, 102708, 20113, 52444, 125598, 1727, 70848, 111298, 11424, 155809, 31928, 96999, 24225, 14471, + 105333, 16160, 91323, 49302, 23685, 29832, 218861, 101530, 78563, 93096, 11349, 110852, 14638, 43607, 123209, 91558, + 54755, 35628, 107357, 380618, 7158, 28912, 18284, 4868, 24142, 51577, 38642, 239650, 44018, 991, 26090, 1960, + 20774, 158291, 81907, 50593, 125255, 6485, 72743, 2966, 20331, 214561, 56587, 142157, 24280, 12387, 150, 57700, + 10163, 773, 45875, 97502, 1285, 41252, 206433, 50923, 2336, 113158, 120454, 130298, 39851, 20223, 103681, 48431, + 4299, 190321, 123059, 33343, 117269, 71169, 26222, 33921, 46714, 32383, 97444, 133272, 63816, 37830, 49882, 50473, + 87779, 17666, 112641, 59034, 159624, 26706, 33574, 47126, 11731, 63641, 16669, 933, 9971, 20216, 30684, 99703, + 990, 23911, 20195, 121307, 137317, 47148, 41928, 10318, 108831, 355535, 49454, 32292, 50994, 8122, 16384, 59812, + 74554, 32150, 56591, 174312, 162680, 190228, 9598, 173148, 23640, 61991, 45718, 79874, 131597, 38994, 5964, 48294, + 146962, 7849, 20626, 126616, 170620, 27716, 30327, 39879, 34829, 15772, 147440, 111326, 91205, 762, 39309, 61975, + 21184, 41411, 43771, 23877, 1913, 5640, 44358, 29145, 63616, 7290, 39142, 49951, 18427, 231174, 53813, 32775, + 93136, 7356, 69502, 40528, 156592, 58725, 18403, 57219, 17519, 27403, 39497, 64617, 67565, 18802, 347190, 3273, + 115882, 171128, 6465, 87835, 138220, 37443, 12843, 45934, 78622, 101762, 40426, 16768, 88535, 152513, 46216, 13386, + 18115, 79078, 49051, 26866, 2711, 79682, 103900, 31529, 33554, 8114, 97575, 47612, 109492, 22435, 45316, 90147, + 54298, 70069, 6113, 94219, 971, 203886, 171510, 54098, 24914, 22343, 6068, 91601, 25863, 143464, 64459, 38876, + 36363, 74653, 13924, 17888, 45715, 21595, 79494, 40396, 27099, 48946, 229942, 68108, 194995, 38040, 6364, 105256, + 14299, 43792, 53868, 72865, 178181, 10801, 106043, 59704, 111488, 12611, 11620, 1615, 72395, 35357, 68088, 375, + 322385, 97707, 2721, 26698, 157719, 129043, 96424, 15347, 130787, 1519, 70008, 98397, 11897, 5720, 336769, 151561, + 81843, 3436, 2996, 78953, 83999, 15800, 40841, 8897, 11369, 50915, 1888, 69080, 49280, 45808, 1062, 11670, + 118604, 172349, 10206, 107337, 40922, 127584, 10458, 54750, 61332, 36780, 6193, 84096, 110343, 6105, 74775, 66665, + 53407, 65182, 51401, 281520, 75639, 25185, 147990, 19365, 40582, 32622, 61482, 106248, 300440, 3435, 158683, 10556, + 54722, 4592, 47332, 14499, 25637, 77065, 19723, 29586, 13694, 66649, 26726, 58610, 48248, 133393, 123123, 232634, + 48278, 828, 68414, 56873, 194521, 5023, 64229, 14306, 56203, 86618, 265580, 48023, 52779, 84656, 3432, 27698, + 48783, 165900, 55898, 24382, 72627, 95034, 100547, 13185, 10953, 158080, 190907, 81943, 376, 38482, 157736, 4714, + 16733, 10368, 160286, 14619, 280238, 20156, 17445, 38808, 26351, 35060, 125894, 64807, 230789, 9972, 49181, 669, + 15132, 16923, 2631, 4706, 25168, 70685, 110447, 122549, 137270, 146057, 155535, 21929, 3588, 7274, 38517, 20678, + 63704, 12863, 1324, 50985, 73376, 43993, 200585, 60934, 94031, 33647, 19008, 24301, 74284, 50725, 56271, 104028, + 19443, 69093, 1021, 4350, 159557, 108365, 31846, 2318, 9697, 23630, 70960, 33088, 39901, 32656, 33002, 83862, + 13351, 27646, 142809, 48055, 119050, 37694, 118047, 85271, 15406, 13547, 124197, 30179, 146455, 65071, 112066, 5002, + 3460, 7809, 11639, 39385, 29556, 54914, 121220, 5596, 75195, 25679, 106873, 33104, 37673, 7246, 204139, 29952, + 102524, 115109, 36038, 5460, 92329, 49344, 25242, 34014, 47289, 113697, 6996, 24397, 98413, 185550, 45812, 60522, + 15311, 46467, 213965, 11122, 100684, 7259, 17039, 6550, 56345, 167013, 404753, 307, 7116, 56462, 5417, 73857, + 95480, 78473, 10133, 19036, 11590, 36107, 100941, 53208, 7200, 60420, 95105, 119370, 167, 15524, 6653, 43384, + 23610, 46388, 13093, 145345, 58426, 55680, 305451, 86847, 17730, 58567, 178140, 38965, 136656, 5628, 88811, 17675, + 27944, 83441, 51603, 10939, 53151, 35212, 27904, 53967, 2701, 23131, 24488, 104154, 8824, 55304, 27690, 42802, + 103124, 98578, 280054, 5662, 3017, 22793, 12906, 31495, 90744, 23983, 3464, 134399, 113588, 22474, 69068, 5099, + 53216, 109191, 117048, 132077, 79736, 676, 7774, 71253, 65940, 32772, 116614, 53644, 26931, 7596, 62478, 12003, + 498, 19827, 108263, 47076, 29568, 168754, 31319, 6482, 80540, 13955, 111749, 21181, 143543, 7033, 26026, 51353, + 21292, 85463, 42779, 24887, 1740, 14404, 130369, 63220, 59268, 97587, 33374, 6212, 16561, 43680, 36822, 42418, + 180816, 18436, 9579, 74123, 42323, 147608, 27260, 24977, 50174, 61507, 10681, 205404, 40890, 85856, 53671, 11516, + 52866, 109367, 117903, 74687, 10703, 42095, 18194, 187166, 57169, 32725, 8992, 137746, 4700, 203872, 80833, 23954, + 17191, 61462, 362182, 13934, 4424, 17464, 76670, 12727, 93511, 23063, 65868, 71113, 49698, 1101, 33252, 57054, + 166650, 125436, 48999, 126818, 26972, 5103, 5669, 11598, 48631, 1882, 120131, 84296, 165169, 35330, 3827, 13014, + 82879, 103279, 159135, 24477, 69326, 96729, 88413, 114734, 202970, 9725, 43640, 83623, 4007, 109, 63529, 18092, + 2376, 47112, 60271, 36164, 231325, 18597, 8868, 14797, 139592, 33580, 9437, 22241, 22119, 132398, 126879, 35116, + 62851, 44339, 72086, 46648, 37504, 19320, 81584, 48809, 68816, 14329, 24943, 2579, 58345, 39374, 43873, 7046, + 67398, 497785, 28225, 24722, 186643, 41257, 163961, 110230, 43331, 10072, 100738, 91543, 277416, 48225, 37125, 93902, + 53749, 3511, 89864, 7794, 15746, 112345, 53732, 9563, 23102, 195132, 38187, 21737, 17432, 246976, 175275, 118156, + 4793, 40293, 65384, 52281, 151138, 59278, 5431, 12606, 48822, 47893, 70528, 6708, 54265, 9483, 244562, 3448, + 48203, 48689, 76888, 27624, 198688, 30465, 4820, 115925, 14305, 15512, 85317, 22487, 54287, 116889, 25533, 69671, + 2291, 33041, 27717, 4838, 18018, 20643, 214146, 60357, 113378, 9119, 25030, 79694, 123260, 45067, 41450, 70902, + 7180, 58600, 349673, 18393, 97549, 78338, 21978, 83130, 87027, 776, 46804, 136813, 120085, 10785, 36277, 36292, + 2920, 3810, 8562, 21587, 76080, 26439, 44502, 4907, 8190, 14533, 46271, 56528, 102005, 40453, 71405, 78573, + 5641, 71512, 18178, 33524, 64580, 92871, 1535, 33803, 14955, 66991, 45301, 14937, 59802, 4319, 76815, 79999, + 54028, 152893, 140972, 94339, 58884, 25420, 33237, 94907, 19367, 63221, 14998, 39549, 81779, 200006, 11084, 130155, + 412567, 22144, 57584, 48001, 85957, 43644, 62293, 11070, 97053, 131816, 56871, 43893, 103637, 49559, 149443, 265832, + 78871, 127928, 16848, 80586, 29364, 10123, 96121, 61469, 27637, 122786, 76840, 137205, 141728, 152810, 275825, 164900, + 4767, 48483, 59279, 45329, 30686, 30546, 26125, 67059, 112738, 1079, 36371, 14899, 130146, 140588, 27019, 105371, + 42745, 50423, 37529, 61488, 12730, 34379, 84661, 32169, 105040, 91899, 1842, 106489, 14844, 44986, 133939, 80141, + 232, 29291, 19684, 156429, 210944, 61928, 43419, 24020, 36581, 39425, 29731, 17427, 152317, 27381, 12301, 14375, + 135543, 5697, 113605, 107512, 29744, 107850, 5566, 76470, 3129, 115721, 13033, 87708, 55647, 1993, 5259, 8785, + 58149, 147004, 120371, 11205, 46319, 4743, 14434, 21697, 27265, 15562, 219250, 35228, 17499, 95327, 51051, 18310, + 28005, 166872, 77694, 6553, 59948, 33504, 13613, 41235, 7170, 2462, 19927, 201918, 34138, 6665, 48780, 7020, + 5702, 48429, 19212, 60564, 293047, 10831, 70945, 188451, 110892, 514, 102527, 8278, 408, 60992, 54201, 111510, + 91760, 50530, 1350, 10771, 218674, 53990, 187697, 687, 18469, 10560, 105023, 46885, 46095, 53517, 5808, 50818, + 81403, 2170, 22662, 47127, 14389, 748, 107058, 67959, 4610, 94719, 30947, 36510, 35672, 12468, 50291, 29921, + 73060, 20353, 3271, 51468, 11006, 18654, 11663, 76382, 74848, 7193, 20342, 16187, 104820, 128163, 34980, 13510, + 118143, 40642, 131, 15981, 190357, 1266, 49918, 3612, 20043, 144329, 29109, 51118, 105358, 3728, 9721, 32144, + 141735, 43619, 23028, 307278, 5115, 112747, 69300, 39045, 27093, 43684, 177727, 78108, 45924, 88098, 14492, 21583, + 123073, 18169, 28525, 17923, 52599, 21514, 7009, 272, 29433, 16693, 69214, 94247, 9616, 71738, 25205, 9774, + 238350, 23631, 52998, 161274, 78610, 112631, 168968, 6162, 24851, 53162, 47254, 154989, 58858, 17143, 112681, 166473, + 26769, 71077, 10680, 22270, 31969, 225745, 12087, 104993, 24613, 70335, 68735, 124630, 18294, 157460, 57255, 111655, + 4982, 2471, 133743, 52649, 32735, 12031, 93961, 124470, 39639, 39402, 65329, 63203, 143563, 23805, 22268, 105529, + 112073, 16680, 215716, 21519, 202205, 53228, 247084, 41944, 12567, 2653, 65493, 34693, 4873, 34636, 23926, 71532, + 88601, 71037, 5571, 115122, 44897, 69538, 89072, 43808, 81503, 36583, 28920, 133940, 101648, 191184, 4910, 9942, + 81362, 14489, 27729, 4960, 34662, 1289, 52544, 14776, 85277, 65813, 125227, 8902, 17574, 23259, 36661, 1163, + 48173, 42427, 11467, 98879, 21435, 10466, 35373, 39046, 221188, 185252, 93230, 99061, 40215, 21457, 5235, 36240, + 39576, 128066, 115000, 60589, 80786, 34006, 111892, 51482, 26544, 10656, 50198, 141955, 64668, 129479, 51135, 64929, + 81841, 27845, 7128, 18886, 10731, 11965, 52276, 82304, 37733, 19729, 35028, 59846, 247986, 66135, 53707, 299634, + 149188, 117025, 39304, 27726, 127703, 9496, 71006, 9486, 495, 94820, 47250, 266544, 18382, 101150, 6281, 60703, + 54388, 207915, 132866, 58949, 72446, 26212, 10542, 172127, 43071, 62490, 4376, 56788, 30974, 24968, 91443, 11619, + 198723, 24001, 15121, 136267, 89608, 160663, 33578, 82492, 41360, 46571, 2169, 9481, 190, 29283, 38373, 141023, + 33045, 7619, 29792, 83332, 8386, 98498, 39183, 39255, 31658, 129170, 27144, 67660, 19992, 114184, 53128, 24534, + 237838, 8463, 70363, 22205, 119015, 18826, 68254, 121728, 137622, 10868, 292971, 14603, 50890, 34277, 24430, 65197, + 100913, 109419, 40103, 47081, 6460, 25365, 62207, 31961, 116233, 10816, 65775, 61031, 267230, 19629, 72115, 104486, + 26621, 27002, 49635, 13334, 104129, 8378, 107732, 16558, 65114, 17477, 179978, 4220, 14190, 224493, 16453, 41872, + 41542, 19134, 32179, 25813, 14888, 130609, 58701, 15362, 85962, 12751, 55045, 6635, 23342, 28105, 4224, 127171, + 23041, 34303, 59129, 73656, 26453, 19410, 24291, 79837, 43725, 157265, 16625, 34377, 71809, 89485, 82437, 24305, + 45186, 20785, 34798, 76873, 4770, 3923, 44346, 9874, 46452, 55654, 54665, 4785, 53894, 5025, 60017, 171215, + 56616, 15749, 17762, 16761, 221286, 105132, 20523, 56343, 18973, 1037, 602, 323886, 9038, 95606, 38396, 120502, + 109299, 77097, 79367, 136893, 55365, 8323, 10041, 75592, 19366, 65814, 15614, 83469, 26863, 51521, 18119, 15623, + 18808, 146873, 172424, 11543, 60909, 19528, 22504, 437698, 69353, 57600, 115924, 4644, 41738, 42360, 79256, 27642, + 83463, 162712, 19094, 2916, 12100, 118144, 293484, 54555, 68561, 61908, 73882, 178688, 72860, 49139, 70532, 39905, + 3980, 13190, 59763, 20658, 13796, 126559, 22696, 105248, 49340, 64706, 49195, 81589, 12332, 67817, 7722, 132448, + 31311, 20180, 151712, 35160, 27418, 221689, 96589, 5129, 4255, 9628, 16047, 98064, 53430, 33666, 22032, 208500, + 18976, 61564, 112915, 15902, 45523, 84205, 34747, 46386, 510, 76620, 34577, 24704, 14224, 18606, 4384, 109151, + 30477, 37359, 74952, 30874, 26581, 19550, 21657, 13283, 4530, 93073, 46376, 122364, 3651, 181757, 62601, 2206, + 101663, 118572, 17855, 52952, 139840, 62606, 278021, 77080, 22709, 7959, 241332, 42371, 150861, 39772, 27577, 14415, + 31996, 13012, 109120, 42856, 63923, 41223, 14878, 182608, 120623, 29348, 36991, 15117, 262522, 7183, 54934, 3332, + 3076, 65389, 287192, 49740, 10528, 35270, 56818, 22639, 2929, 61578, 23280, 30127, 14672, 3157, 47696, 21588, + 130238, 62110, 52327, 56193, 18087, 130896, 142685, 109840, 9816, 18121, 57992, 97319, 121894, 80599, 54501, 27268, + 100308, 81451, 3014, 47285, 25085, 7174, 5312, 74894, 55111, 160804, 29625, 46682, 14565, 172807, 29181, 36368, + 18952, 75109, 124625, 24016, 53293, 186013, 58761, 87814, 2042, 89459, 35087, 63052, 369988, 86526, 54374, 118097, + 23674, 2122, 110119, 15845, 61789, 10677, 100968, 183270, 133529, 81233, 103267, 198839, 28783, 73169, 17965, 361713, + 108293, 136324, 42487, 26373, 35477, 218558, 27158, 71527, 47119, 331883, 45160, 78970, 36448, 156591, 71832, 96730, + 71049, 146, 111991, 17781, 40015, 99088, 5627, 90890, 33055, 23338, 4252, 167723, 78598, 149826, 35613, 11228, + 198442, 31532, 169906, 23323, 1833, 125495, 8102, 12897, 159937, 4522, 13990, 177607, 40654, 227310, 20082, 31598, + 77444, 15467, 109179, 17723, 189245, 6218, 103555, 3422, 113153, 7920, 38581, 148584, 8621, 19962, 49090, 28195, + 18599, 79279, 133222, 12709, 38553, 16730, 103630, 60913, 35223, 13228, 67419, 28730, 166072, 36003, 6493, 36652, + 2375, 50657, 9527, 81998, 11659, 334160, 15168, 51380, 21786, 15122, 59275, 119745, 89523, 67219, 2185, 46735, + 6032, 47447, 137690, 5166, 12116, 23018, 5022, 48313, 63046, 10829, 83429, 2746, 159398, 67335, 98418, 53999, + 18454, 31569, 37892, 109132, 3678, 81620, 104491, 54681, 32521, 109973, 42533, 52504, 47626, 42504, 36807, 2633, + 11411, 159915, 60573, 111310, 103527, 128644, 103027, 285252, 38896, 10950, 44585, 164005, 42946, 28131, 45971, 32359, + 15696, 11312, 106951, 2917, 26370, 12048, 16052, 74680, 10185, 98893, 89672, 24790, 8413, 87026, 851, 4549, + 37080, 174727, 18289, 200644, 165583, 57864, 69857, 29259, 4331, 19717, 91462, 13011, 63555, 21391, 1990, 44481, + 14907, 7340, 15774, 49591, 72220, 3931, 6418, 53568, 50056, 9288, 4106, 48438, 6623, 5117, 1012, 41437, + 62236, 84002, 33918, 65542, 36565, 92277, 39629, 127582, 49783, 44146, 117628, 5155, 10049, 42692, 4608, 187456, + 17503, 74015, 34318, 12962, 100581, 33867, 21832, 157914, 55951, 106858, 14475, 188259, 146244, 57414, 21163, 26020, + 24724, 5253, 161736, 121717, 9626, 38446, 431177, 1188, 17969, 46978, 157547, 62747, 25524, 30400, 177415, 12289, + 109300, 175621, 49009, 166322, 173965, 132234, 51768, 26032, 99994, 15755, 29800, 72351, 101056, 240613, 21289, 15214, + 46459, 46348, 58410, 30396, 43647, 89731, 28915, 142785, 53737, 14946, 5545, 24313, 277968, 72809, 33232, 8640, + 8347, 16712, 3946, 25575, 123521, 39179, 9991, 145543, 74858, 137200, 34491, 8629, 33829, 23820, 68921, 35423, + 44762, 49871, 133748, 46991, 77574, 166611, 31020, 11875, 877, 44133, 33877, 3257, 81377, 73657, 30727, 46882, + 222525, 70076, 137697, 69291, 123532, 18001, 25423, 188796, 30602, 16643, 53078, 60931, 43881, 38283, 3221, 355554, + 53145, 80702, 59009, 39689, 2973, 60883, 77304, 1117, 16284, 24073, 22669, 122990, 81940, 39844, 12369, 18781, + 61281, 133978, 1107, 20779, 127044, 44416, 157395, 5868, 63620, 24846, 35621, 48921, 9875, 41926, 56949, 22208, + 14756, 163188, 3847, 68103, 114829, 35532, 76171, 42852, 19032, 34658, 126394, 15837, 9202, 15195, 40076, 88927, + 52759, 150700, 23019, 8077, 119141, 224558, 88676, 134306, 23928, 48525, 115416, 76405, 120551, 21229, 51681, 188804, + 19607, 12750, 33280, 20117, 3599, 57084, 87953, 20205, 33401, 81760, 13064, 16835, 76821, 64466, 84841, 46288, + 73233, 87118, 9264, 13911, 117430, 86790, 13771, 24443, 39968, 82124, 15778, 63864, 36539, 3066, 24571, 55240, + 7071, 149040, 28850, 48106, 5446, 69537, 2674, 42691, 121735, 29802, 93801, 222967, 194059, 30066, 53170, 4677, + 15206, 107943, 39854, 31639, 45283, 53779, 59063, 2348, 6706, 195398, 48495, 2005, 15603, 165902, 265960, 30910, + 65615, 207693, 136663, 32249, 1207, 87403, 19215, 33091, 165723, 49438, 43462, 11067, 92275, 62699, 44678, 45289, + 34773, 17412, 48972, 1858, 104447, 181545, 1001, 119266, 8396, 153464, 132289, 115529, 32353, 7992, 172028, 54069, + 205240, 29592, 91739, 119994, 164323, 113460, 108440, 30929, 13600, 17141, 120659, 18146, 60555, 88204, 146500, 273964, + 79205, 25175, 90166, 44725, 25532, 113251, 4468, 67884, 22907, 53542, 14201, 46217, 33410, 54978, 26399, 177809, + 57480, 121936, 45917, 11309, 107111, 19480, 6478, 34292, 69630, 3736, 25310, 244475, 32137, 29438, 9982, 103486, + 47832, 52490, 37250, 2562, 70913, 5179, 136415, 91803, 33161, 9255, 18432, 13038, 20321, 49485, 6575, 115334, + 2371, 33789, 7808, 45775, 117348, 14605, 42941, 98279, 10714, 95052, 76922, 21407, 86246, 16306, 223273, 15316, + 1625, 250738, 2661, 32095, 11763, 40925, 49073, 29377, 17900, 1538, 17882, 8, 268, 90833, 50429, 806, + 78457, 6040, 177458, 105879, 99175, 24946, 48684, 55552, 97940, 1832, 81954, 48107, 101092, 1204, 34337, 93721, + 86660, 78345, 46377, 86370, 32221, 80601, 7480, 46341, 14041, 65429, 7269, 59499, 128504, 5442, 8744, 14012, + 125080, 161674, 6784, 14584, 53744, 130216, 6841, 122602, 124263, 31593, 18807, 113744, 31017, 885, 20344, 220948, + 13897, 791, 83914, 15043, 403, 15272, 20903, 38772, 31859, 100639, 117558, 55130, 21964, 75452, 165550, 31144, + 5633, 18734, 4133, 25708, 111630, 157700, 635, 222034, 5547, 90494, 37283, 12982, 77329, 31122, 83775, 43379, + 17961, 19477, 64730, 37597, 18241, 81216, 57923, 24610, 84995, 11492, 22811, 22078, 25984, 270901, 50439, 91195, + 12983, 70850, 98931, 154004, 67491, 66969, 83799, 168671, 62168, 141041, 109484, 189545, 47262, 124643, 76032, 11984, + 5241, 73085, 225, 35278, 297, 65251, 11750, 150754, 51191, 49847, 161229, 99388, 7351, 7406, 114189, 8814, + 8967, 13541, 57080, 44100, 147212, 116349, 149864, 40247, 82060, 41029, 37322, 87508, 16821, 59609, 20679, 68392, + 782, 43253, 332, 39938, 11033, 23732, 188925, 38934, 82431, 93785, 15532, 191109, 62957, 63022, 27531, 12252, + 5026, 3833, 2431, 21465, 43459, 25953, 192718, 3308, 77963, 89356, 168314, 168685, 203477, 49146, 7655, 23219, + 53528, 24982, 48685, 54631, 6247, 793, 74260, 48185, 191852, 3095, 172876, 179270, 87774, 154528, 7635, 7686, + 74164, 17007, 112569, 121364, 215654, 111268, 13329, 18967, 13467, 122533, 140331, 36832, 1522, 10077, 18418, 42747, + 219964, 6033, 9858, 108306, 28589, 3913, 123863, 178765, 244104, 135651, 14684, 90, 16242, 77118, 55574, 8651, + 117821, 131103, 73529, 284295, 67725, 19246, 36974, 62269, 72570, 115836, 24484, 7412, 156792, 33054, 573360, 105103, + 17186, 54299, 4686, 38563, 15979, 5729, 49563, 346297, 26990, 61110, 74313, 185151, 44128, 30657, 15134, 17195, + 193014, 25751, 282233, 137910, 35276, 2087, 2963, 41221, 57125, 17228, 387574, 26559, 16212, 16812, 134202, 221232, + 166451, 26462, 936, 117519, 68017, 293962, 22383, 16552, 6905, 277202, 9279, 27507, 77608, 63540, 29132, 62909, + 16364, 5654, 9548, 22674, 53777, 85931, 21483, 80245, 75921, 20622, 81952, 19224, 76426, 285538, 17463, 125910, + 37975, 57734, 50774, 69447, 26203, 50278, 2138, 85944, 269296, 11227, 222528, 187351, 64099, 65932, 14212, 61685, + 84122, 91003, 37581, 16099, 12077, 59461, 40868, 28123, 38533, 44075, 2588, 36016, 830, 15331, 10265, 75145, + 4407, 11384, 111778, 70114, 20139, 984, 26926, 342, 963, 46653, 91781, 6704, 43028, 132517, 82755, 31140, + 38902, 59578, 21048, 53790, 42911, 10083, 25044, 133733, 37503, 46985, 29071, 44261, 83343, 70437, 43357, 137819, + 85045, 60306, 103907, 440, 16979, 162235, 12666, 4636, 1165, 37911, 28310, 153557, 60835, 38997, 64064, 5185, + 137387, 32320, 2362, 99853, 58380, 69321, 213523, 50218, 86990, 100948, 61020, 50950, 110066, 13348, 19342, 25918, + 134540, 64060, 4579, 40959, 56331, 1462, 8265, 82320, 193845, 39437, 45216, 6839, 81238, 14382, 79046, 111178, + 17922, 29276, 57365, 149704, 163093, 60574, 16827, 8286, 38744, 18611, 99128, 34812, 110641, 21069, 171408, 84764, + 12502, 14627, 116407, 2999, 56404, 1193, 183004, 176249, 34862, 85940, 4493, 112158, 26865, 58572, 118875, 48672, + 125964, 15384, 25939, 117101, 12965, 233142, 40136, 48959, 111648, 66007, 37557, 264020, 25547, 33774, 91392, 32277, + 7771, 19724, 17197, 71573, 27196, 92454, 77107, 15576, 136980, 15649, 12242, 7975, 9555, 9118, 86424, 147261, + 29551, 122983, 9583, 13549, 107158, 97365, 31540, 14053, 57885, 2089, 19688, 78428, 18831, 57090, 7772, 6023, + 37705, 42279, 17489, 45843, 35505, 9504, 28240, 38671, 101742, 14357, 11556, 49552, 13970, 125707, 62503, 78, + 102109, 90600, 803, 39636, 62548, 22745, 124819, 19312, 124657, 21863, 74065, 15736, 23328, 7341, 72910, 29289, + 11124, 153626, 25328, 14621, 89592, 68952, 122844, 51529, 146376, 37056, 32205, 119308, 248050, 105130, 5385, 9375, + 6241, 68714, 119962, 30149, 22033, 32348, 9826, 10506, 18337, 1194, 11043, 20440, 80685, 7291, 6655, 39825, + 29898, 34836, 89830, 2183, 11908, 77399, 230, 65020, 216623, 34971, 221825, 14543, 67721, 227257, 14033, 27652, + 106162, 21776, 20020, 28180, 146610, 80549, 55471, 219506, 21377, 61817, 7021, 305922, 15085, 106336, 54962, 10257, + 91552, 89739, 15956, 10310, 42041, 12349, 21713, 99929, 62560, 8880, 23249, 59633, 122532, 7065, 9930, 6740, + 125336, 55124, 109173, 72328, 102365, 8889, 8209, 56963, 121537, 88838, 40700, 83507, 142559, 9284, 11988, 12130, + 29693, 99684, 1851, 53782, 223919, 30273, 31841, 19809, 11515, 73342, 35465, 93163, 110495, 23975, 49885, 92376, + 18776, 32648, 125596, 132210, 22494, 29891, 297641, 6024, 5895, 22238, 100417, 18472, 185059, 48669, 125656, 10048, + 103592, 49510, 59226, 104204, 229351, 165617, 46718, 48516, 51220, 41125, 102032, 77586, 100102, 19702, 25341, 13898, + 37027, 2385, 82750, 60716, 257855, 90865, 4079, 14003, 29359, 297913, 47142, 98916, 54123, 14508, 12634, 40487, + 36066, 129289, 2029, 245241, 106493, 5810, 109583, 17298, 12244, 60441, 17678, 23488, 79258, 151032, 54077, 112594, + 32002, 5397, 25330, 17562, 432, 34826, 2236, 30268, 56205, 22097, 10644, 6211, 94836, 184579, 8566, 12259, + 90182, 61802, 2022, 29431, 6726, 4718, 91806, 456527, 14762, 39564, 99255, 8125, 29391, 128901, 131938, 180961, + 48938, 149835, 7017, 26179, 26864, 38950, 491, 20535, 38083, 43407, 78246, 137337, 60364, 35466, 21672, 57096, + 3310, 39531, 1033, 46613, 60192, 3069, 21262, 288395, 14766, 77635, 29064, 99405, 205567, 154227, 179766, 14046, + 57504, 99249, 29131, 64556, 110760, 88476, 228776, 3720, 22649, 302613, 85204, 95561, 24666, 38247, 51104, 2156, + 46333, 117076, 191749, 48805, 21517, 16529, 57284, 34877, 3430, 71129, 30911, 56215, 13135, 122327, 141377, 4502, + 28873, 12208, 183048, 148459, 27052, 44260, 166866, 49683, 158809, 197069, 27559, 78004, 11597, 48052, 1373, 159150, + 20529, 206142, 12875, 61289, 6695, 68881, 22031, 2730, 23138, 107005, 59860, 1509, 22960, 36899, 42173, 70639, + 37137, 51908, 72392, 61903, 45574, 102776, 400, 53585, 6545, 7660, 71705, 48771, 305877, 115853, 19814, 37969, + 43423, 3477, 6989, 9730, 26153, 133461, 19482, 123699, 24769, 73107, 44361, 86286, 59844, 213778, 53573, 18022, + 14501, 100344, 128105, 108914, 10430, 136734, 154864, 41259, 134352, 60618, 70173, 3694, 56169, 29875, 7038, 16127, + 13213, 3067, 10956, 12306, 103432, 86864, 35882, 158228, 49523, 48003, 10223, 41242, 35181, 11563, 115948, 71725, + 13435, 59826, 26330, 104459, 12408, 31199, 198177, 173879, 129475, 28833, 287276, 88609, 64620, 90587, 23797, 20565, + 230854, 40940, 38005, 64403, 77390, 30876, 41640, 111472, 51990, 21480, 5502, 71562, 15653, 50942, 53320, 213339, + 83248, 57060, 47482, 99892, 33466, 94511, 35134, 39153, 44571, 79902, 26028, 77887, 117828, 65872, 48866, 22019, + 51481, 20695, 6816, 15468, 2187, 56218, 56453, 72760, 10559, 23157, 89968, 337727, 68019, 20454, 32792, 91871, + 18021, 19811, 54181, 19785, 54895, 74209, 9868, 171144, 48247, 7928, 165277, 1104, 18354, 39238, 60324, 127799, + 33737, 46441, 21125, 16451, 4554, 138540, 104181, 92183, 108595, 35070, 341328, 119439, 37288, 53479, 244682, 62059, + 39767, 90048, 55516, 10156, 116707, 177198, 93755, 76515, 9175, 160572, 523, 82789, 3726, 1887, 5577, 952, + 108877, 14261, 74693, 168738, 21616, 69086, 93069, 2668, 83684, 168326, 89980, 40475, 49862, 993, 36286, 134733, + 1938, 1799, 17016, 106908, 8543, 213196, 68388, 64036, 276466, 31847, 29247, 15775, 20134, 20323, 38300, 6990, + 108498, 60387, 10008, 41213, 48770, 96703, 53469, 375632, 102254, 16286, 56810, 71072, 31914, 93491, 252525, 99457, + 131520, 360748, 153227, 148240, 185291, 21359, 5538, 120533, 100559, 7782, 22500, 110558, 51890, 8141, 109666, 34083, + 209, 2223, 43177, 12541, 19526, 38778, 39701, 58422, 76471, 57840, 69298, 168436, 50544, 120544, 5588, 4904, + 71814, 6780, 99075, 11363, 99351, 96073, 4464, 71241, 8172, 116428, 160177, 72875, 198526, 85587, 21266, 28059, + 28816, 58105, 282844, 53464, 20419, 81082, 260276, 6827, 9109, 38541, 171515, 229032, 98389, 150153, 15498, 10140, + 136777, 110487, 92940, 42991, 76479, 19523, 10372, 21173, 75596, 28853, 81504, 63448, 30635, 135311, 13245, 254897, + 165417, 61091, 49656, 45428, 48216, 44529, 18414, 915, 120220, 22939, 12949, 64687, 25955, 67099, 25048, 22602, + 211071, 8500, 87865, 14625, 39314, 101395, 81563, 141087, 24308, 43453, 50836, 133590, 32164, 3576, 59438, 10284, + 2559, 731, 9526, 222252, 146280, 87697, 10901, 52647, 43403, 137838, 74591, 33197, 9233, 63356, 132528, 799, + 17947, 41186, 9082, 7417, 90585, 39849, 50863, 90290, 1786, 8152, 144979, 35008, 86920, 156078, 59343, 104076, + 125662, 127008, 111645, 36851, 2457, 87365, 8649, 14841, 64741, 69960, 475364, 42995, 32152, 78057, 117362, 50703, + 32918, 3669, 219900, 142020, 122882, 25093, 37632, 59532, 78538, 31923, 14695, 27152, 44001, 30500, 34140, 87080, + 31723, 21082, 71987, 36698, 56426, 225949, 5796, 1809, 23375, 125781, 9456, 168878, 103172, 15329, 86662, 97895, + 88177, 17270, 67948, 56739, 145697, 32730, 25167, 10080, 52506, 49488, 112733, 42661, 49319, 110135, 27599, 51073, + 68016, 69079, 90323, 101226, 31664, 20967, 163445, 48192, 41488, 41500, 17028, 27408, 18486, 193593, 92824, 60145, + 110400, 6284, 10148, 99305, 7030, 8019, 15366, 78496, 28241, 25688, 95511, 86416, 986, 31151, 29745, 215965, + 109199, 10425, 23864, 62113, 19900, 54264, 8171, 31738, 42147, 21664, 39774, 126916, 56864, 9927, 2688, 85157, + 65287, 33019, 23537, 6543, 49183, 29757, 1968, 29750, 7858, 70193, 1211, 62048, 24000, 35355, 53720, 246, + 30453, 1963, 267877, 27516, 840, 101181, 36344, 426, 16673, 136627, 20509, 19180, 25907, 70101, 11239, 56975, + 68916, 9194, 87686, 38354, 89927, 16469, 7125, 48706, 6309, 37837, 22274, 71780, 158335, 39840, 82643, 75960, + 36407, 49670, 1989, 14982, 199737, 14639, 22843, 120678, 130464, 46783, 21419, 13639, 13137, 127847, 144799, 159573, + 59603, 17603, 2824, 59934, 201778, 13312, 36351, 40000, 195292, 19891, 2315, 33443, 21015, 43785, 3891, 2636, + 42466, 7039, 20196, 103143, 179062, 154987, 42658, 47584, 172561, 40055, 48647, 12739, 89492, 8870, 14895, 170930, + 11075, 73935, 31376, 34999, 180407, 46254, 20836, 59075, 31868, 41037, 39680, 25099, 72493, 14732, 26639, 55562, + 20998, 611, 41758, 4739, 60217, 85527, 54988, 99046, 9865, 99927, 2352, 15721, 19530, 86459, 1125, 66190, + 39274, 9720, 82870, 3364, 130266, 15887, 122881, 84869, 54539, 14106, 293942, 63127, 21623, 10194, 31975, 38172, + 12535, 149, 62630, 75752, 13505, 13805, 8770, 88088, 40641, 208378, 14393, 38913, 73375, 44350, 32840, 22198, + 4087, 10333, 34430, 44812, 85633, 122365, 38608, 134765, 2153, 192171, 2383, 3536, 3117, 54286, 96428, 28420, + 70680, 12317, 147982, 4363, 55788, 39340, 6445, 40899, 92096, 24356, 221898, 26647, 47509, 10792, 131852, 24256, + 98493, 19086, 25386, 75057, 37490, 10028, 45730, 184141, 271936, 286712, 1729, 81050, 151475, 2718, 8857, 7068, + 3032, 70545, 10863, 172184, 16171, 61405, 54726, 3906, 96642, 17628, 2754, 24338, 34106, 1369, 8054, 98154, + 78425, 52080, 132116, 61228, 125761, 72327, 27960, 23032, 19591, 19659, 80147, 103860, 3366, 33018, 35292, 39070, + 19316, 88042, 12272, 53033, 54508, 18901, 21506, 21455, 24183, 122461, 20317, 27159, 50786, 118677, 19298, 4344, + 194248, 97414, 153639, 16051, 91528, 37589, 38898, 5339, 33253, 113074, 39403, 51508, 34622, 24505, 11212, 76907, + 108355, 126229, 77678, 8205, 41741, 10599, 69948, 101917, 705, 49260, 68715, 39750, 3814, 111125, 108544, 115867, + 3883, 144663, 33293, 7255, 108929, 13737, 90748, 14774, 13203, 10588, 17244, 84607, 67831, 29180, 50860, 106727, + 10142, 30125, 120708, 58131, 59754, 66460, 103711, 43126, 68208, 9476, 5110, 156651, 29128, 145052, 14949, 151686, + 84820, 79229, 57233, 1002, 56880, 47904, 10010, 11807, 38794, 32039, 2140, 3550, 24972, 116011, 47660, 76086, + 48571, 130683, 18131, 4450, 40821, 39353, 38655, 33743, 40476, 135664, 142086, 29489, 18137, 67555, 45205, 115281, + 164254, 261470, 1105, 128217, 24064, 28118, 111832, 44643, 236309, 17929, 3024, 138171, 79181, 14368, 22266, 37872, + 11282, 10626, 56113, 43632, 395, 41031, 1026, 152342, 39169, 124364, 66473, 4684, 2013, 12270, 120501, 142296, + 51587, 29013, 3177, 129452, 28551, 23498, 8358, 22595, 9645, 92008, 40742, 106126, 701, 194074, 1806, 54493, + 109513, 2084, 33125, 5622, 115899, 14353, 11000, 60472, 113566, 150803, 15891, 40791, 12762, 53130, 36257, 13420, + 62045, 128143, 272516, 213527, 58322, 32400, 115427, 101476, 103726, 20176, 21502, 355183, 41343, 23892, 15753, 2312, + 40866, 54294, 101785, 55146, 244102, 32705, 27994, 13284, 143816, 157993, 81010, 16933, 2490, 6072, 23250, 13602, + 70346, 112144, 297560, 55477, 40973, 56881, 19001, 130715, 52618, 12974, 11195, 11003, 15412, 7646, 11175, 7316, + 30720, 44809, 123585, 100767, 104315, 95294, 104855, 7533, 38917, 11608, 3316, 171486, 42027, 49423, 58544, 56254, + 93676, 12514, 18792, 45592, 17513, 1245, 48068, 120907, 107418, 26207, 55327, 89145, 20706, 175898, 72652, 100555, + 123890, 3322, 87742, 9460, 13399, 59577, 26311, 16724, 97727, 64005, 14658, 4457, 24044, 49860, 32409, 21567, + 87962, 27289, 14624, 50763, 65606, 23475, 207482, 25541, 44250, 48058, 12271, 88998, 98044, 55548, 2331, 52575, + 65276, 9350, 77725, 56779, 74790, 14114, 85990, 42433, 101473, 7844, 52309, 157284, 19350, 39922, 12466, 21036, + 91570, 95031, 74187, 95245, 1326, 34475, 71765, 28558, 87790, 354, 27116, 254543, 172042, 18412, 35291, 39776, + 7577, 28592, 29654, 42244, 100813, 7588, 48319, 48295, 86896, 40289, 229422, 2570, 85891, 10264, 76701, 52231, + 41512, 127376, 230534, 93605, 108130, 23424, 36739, 233035, 27794, 95965, 35884, 151494, 14875, 843, 49833, 38010, + 71431, 219272, 82428, 214192, 12835, 127240, 14345, 138207, 156250, 50941, 99644, 99417, 58135, 7353, 39821, 29812, + 3759, 7415, 14572, 53830, 22476, 49857, 78175, 14915, 42176, 77853, 6530, 12126, 115873, 21641, 122781, 72383, + 34686, 27915, 136002, 76130, 56523, 25687, 6527, 134727, 73643, 74722, 67478, 8251, 108505, 23843, 145891, 48731, + 51491, 16182, 53915, 16603, 20838, 7395, 115375, 138355, 12721, 58670, 15892, 3735, 32863, 124677, 12604, 11997, + 45700, 14999, 258154, 2720, 29496, 271082, 85521, 39973, 13700, 113046, 44485, 36482, 34294, 47751, 134797, 155904, + 55360, 134690, 21770, 91135, 29206, 144711, 48744, 44674, 155942, 59875, 17918, 64497, 123812, 9144, 234966, 164534, + 7706, 5270, 25090, 20339, 163234, 3097, 133900, 125161, 203, 10715, 9667, 87129, 132720, 92850, 3501, 14636, + 49358, 17266, 21111, 30612, 144431, 122860, 101497, 17673, 8130, 11884, 91167, 88623, 175788, 32729, 35605, 166925, + 35818, 36536, 38809, 2716, 3270, 93973, 82603, 23366, 76832, 61965, 62245, 13893, 25710, 144091, 98814, 44208, + 54095, 236277, 53367, 34834, 97274, 2172, 16858, 49284, 28779, 113183, 4643, 106217, 94621, 164943, 16845, 19253, + 74396, 22592, 87503, 34996, 19092, 146507, 19116, 134652, 128242, 22736, 1007, 132190, 58067, 27936, 48566, 40563, + 20885, 33771, 80664, 989, 14670, 14315, 21661, 187703, 93255, 27617, 245729, 44376, 15107, 49824, 93604, 106721, + 63291, 5606, 153280, 101864, 23654, 28688, 6737, 43584, 126900, 7137, 67499, 145087, 129421, 24707, 105699, 311580, + 59294, 11582, 211232, 92185, 262659, 717, 4752, 31126, 9798, 18631, 28374, 3367, 3251, 154411, 52363, 51023, + 67344, 70678, 261560, 78059, 28600, 18070, 79850, 53359, 44629, 30869, 19073, 64045, 50672, 63508, 37203, 78992, + 29072, 93421, 104033, 26081, 26999, 121749, 113974, 301732, 31526, 3016, 52083, 135740, 23183, 10650, 107815, 49863, + 49175, 1554, 10166, 34286, 165843, 102866, 56807, 29193, 175455, 36495, 50639, 18134, 17282, 14831, 3286, 19214, + 175411, 85620, 44203, 2339, 32022, 31760, 24711, 84552, 45989, 38675, 25767, 121791, 30298, 7929, 8128, 324, + 90690, 46242, 120990, 8574, 78118, 72361, 11333, 68279, 83156, 26766, 288, 27097, 23749, 6805, 96767, 122167, + 35636, 198501, 41641, 29661, 31317, 217715, 8631, 12460, 7069, 78590, 46516, 87449, 80381, 45698, 49298, 44290, + 94561, 24990, 13323, 11057, 133756, 4423, 12607, 21852, 14960, 88023, 10455, 58146, 97404, 38753, 9405, 216304, + 6138, 24563, 206624, 146948, 41065, 115571, 46443, 96844, 78041, 42616, 20236, 11182, 32843, 47724, 12361, 122382, + 16601, 14468, 3252, 1927, 34123, 58038, 88840, 63230, 9559, 119391, 241176, 12638, 146529, 181367, 152506, 46831, + 123377, 113569, 91896, 1930, 96395, 154527, 81091, 102845, 54441, 92585, 4800, 95396, 42012, 27534, 62635, 8447, + 84257, 129409, 12110, 25104, 123541, 51131, 36346, 44078, 10745, 2994, 120391, 4597, 22139, 55241, 51317, 125652, + 106459, 267836, 35138, 15293, 11720, 7525, 66335, 62591, 150883, 14682, 152240, 38920, 172651, 34616, 1931, 48367, + 154996, 53262, 13826, 3119, 110538, 5436, 54461, 84681, 4728, 16350, 250445, 69950, 53447, 94853, 13819, 81759, + 25704, 73967, 266094, 36612, 2009, 10054, 152119, 181823, 71152, 47335, 164288, 150538, 119354, 57186, 395013, 2941, + 21166, 126317, 18555, 208602, 66604, 161278, 109157, 190726, 1429, 126459, 64236, 8007, 216162, 49573, 170973, 56109, + 8637, 53811, 49583, 16200, 122250, 46869, 24350, 5723, 63520, 11915, 9305, 41525, 27180, 215488, 96890, 13022, + 29172, 40217, 11913, 5985, 36124, 147278, 26010, 452445, 276428, 85611, 7347, 12127, 107787, 9651, 6341, 2892, + 77184, 25332, 29107, 20014, 4680, 81466, 12892, 58258, 14952, 10238, 70027, 35013, 104903, 128969, 46523, 15173, + 24418, 103787, 139284, 30348, 14793, 9646, 156700, 29992, 51561, 15377, 15544, 84114, 52931, 20387, 129004, 6592, + 8371, 57164, 36658, 17268, 26342, 292, 19324, 116062, 48526, 23357, 38167, 14524, 7118, 263554, 155894, 242917, + 92066, 33509, 27485, 107820, 67280, 197352, 32547, 18520, 40653, 68664, 90091, 69796, 8847, 81434, 4751, 106853, + 34597, 37923, 18335, 221713, 105438, 25972, 14464, 111785, 14198, 134281, 5575, 71227, 50163, 102398, 17570, 101686, + 61188, 22480, 52263, 56951, 146286, 15008, 39203, 25408, 50315, 2155, 73767, 13685, 41205, 94069, 6395, 180692, + 170829, 2835, 2103, 11342, 161496, 89214, 6026, 1302, 585, 38860, 110361, 40025, 197359, 39048, 82914, 220968, + 95056, 26946, 162638, 65364, 1687, 34647, 33433, 75300, 365794, 57865, 121192, 320880, 91349, 18120, 6915, 106455, + 48507, 20917, 41330, 101793, 5804, 46924, 91838, 39919, 49263, 77778, 119562, 26736, 5146, 16996, 48515, 149815, + 104902, 87328, 6533, 151190, 96365, 32904, 4018, 7595, 117343, 138520, 33658, 181860, 132222, 36765, 26173, 59136, + 46084, 53703, 43164, 216719, 96919, 95045, 33006, 96990, 16875, 83496, 45441, 27720, 8073, 17015, 124868, 179271, + 262381, 118057, 199816, 4275, 79982, 123100, 54391, 90904, 52663, 27953, 77709, 4272, 13928, 39051, 71112, 413271, + 16056, 87986, 33056, 18596, 153908, 120302, 27857, 128911, 15145, 60790, 4586, 67318, 109256, 79780, 45306, 11932, + 132308, 106229, 37412, 1507, 18763, 133449, 58137, 37961, 24904, 86499, 21760, 41606, 167644, 77227, 120713, 43226, + 13618, 151594, 108301, 101213, 40750, 66225, 16687, 80402, 18686, 48613, 45656, 10805, 147124, 34576, 19977, 157309, + 114709, 36792, 223317, 58062, 150038, 9205, 150642, 21252, 52849, 184323, 41421, 43314, 2938, 99855, 60463, 129217, + 12568, 75505, 125705, 141476, 48617, 43014, 23373, 19138, 8778, 94674, 178893, 8058, 5459, 94724, 266341, 80369, + 44202, 274013, 86858, 78320, 44591, 24273, 35983, 13078, 74914, 24190, 202665, 27165, 17183, 37327, 73294, 34055, + 248689, 18437, 74717, 975, 13878, 8774, 64644, 71823, 7822, 6524, 56622, 7221, 80060, 16273, 88677, 19383, + 23116, 127134, 154899, 68336, 194037, 241, 4615, 387990, 18487, 147941, 15391, 26006, 2067, 26484, 4709, 66156, + 7798, 2820, 26469, 48765, 43077, 66027, 3606, 27342, 33678, 18421, 7829, 55334, 244028, 32856, 8103, 147672, + 31320, 49696, 49702, 69018, 74273, 120264, 122020, 31808, 2794, 1867, 52845, 55295, 19466, 19329, 26414, 52167, + 8218, 210642, 13654, 127707, 36280, 64707, 8564, 114550, 183997, 115833, 23481, 2828, 48124, 16340, 247, 309535, + 19416, 66487, 105120, 17809, 29656, 47021, 201858, 160017, 19280, 78447, 21406, 39940, 98734, 122438, 36909, 83000, + 7715, 134747, 24473, 75401, 18311, 9336, 15781, 55262, 30701, 9503, 53173, 8524, 133602, 90200, 85568, 13433, + 150307, 24618, 45822, 23920, 126956, 23395, 51943, 161287, 7378, 37763, 121912, 32196, 2933, 19681, 9297, 23283, + 79903, 162071, 55832, 23137, 13178, 209527, 9418, 18467, 12593, 119121, 83307, 51772, 86571, 52216, 23349, 61608, + 26604, 94854, 7575, 204746, 92446, 22189, 28677, 83268, 13574, 65266, 57786, 61446, 44205, 26661, 107681, 91716, + 65699, 82006, 71722, 70318, 427599, 64286, 24112, 101430, 21118, 144724, 36334, 80349, 8245, 44912, 45640, 4276, + 14407, 1378, 31347, 22711, 27877, 62250, 191999, 8464, 47936, 361021, 134866, 3976, 33542, 48109, 10852, 237219, + 7916, 122443, 17484, 70725, 26460, 109271, 187381, 128014, 117762, 246379, 22586, 82080, 21596, 9055, 4112, 313930, + 37818, 163471, 108924, 39581, 2249, 13074, 7431, 75393, 127359, 35494, 111020, 59720, 209394, 14051, 37123, 41719, + 60044, 39589, 172595, 143831, 47677, 59508, 24424, 103244, 308089, 7173, 30716, 90486, 36791, 60870, 101299, 108569, + 154971, 20715, 53888, 53901, 31417, 53959, 2728, 97125, 6998, 145934, 80463, 20412, 150042, 45242, 187078, 7592, + 174360, 31180, 135274, 14872, 12255, 11730, 93361, 83265, 43009, 49328, 20215, 21789, 29335, 115863, 28254, 39036, + 48739, 17655, 180783, 12531, 3912, 8728, 149240, 87661, 101398, 77276, 44669, 83920, 53340, 37769, 16150, 114794, + 2580, 157793, 7167, 176552, 146939, 5647, 16877, 13855, 151295, 90625, 68053, 91363, 45360, 176357, 42521, 30918, + 125275, 57667, 44098, 88883, 15273, 61531, 4145, 110202, 45383, 253407, 75996, 20238, 27456, 20964, 4620, 8638, + 48761, 12194, 52351, 99825, 23314, 86104, 80185, 11840, 8750, 45404, 19895, 71824, 60801, 13493, 12198, 37669, + 85823, 77592, 114135, 4393, 104759, 210654, 28711, 86484, 27894, 35912, 20614, 184536, 123685, 134244, 44348, 10862, + 66968, 165136, 18647, 32424, 39480, 60719, 93213, 164629, 26917, 39935, 75223, 75930, 55290, 22975, 122323, 10513, + 83305, 99747, 19500, 77541, 2696, 77867, 101290, 65344, 98390, 12816, 83594, 39244, 57569, 8468, 34263, 35071, + 145853, 12146, 76941, 17746, 340733, 25910, 37273, 43127, 4919, 60464, 1992, 49562, 20024, 15497, 109739, 14126, + 52268, 76976, 6239, 8384, 30884, 19155, 135974, 107147, 7413, 64546, 38363, 186999, 203685, 81795, 108201, 36022, + 70989, 30403, 116774, 5379, 112855, 28461, 9025, 58269, 4129, 17604, 104476, 174522, 50536, 1225, 10900, 36288, + 349518, 20856, 101414, 45229, 68205, 77322, 197569, 10875, 332641, 23358, 85554, 14077, 159581, 34721, 18791, 98700, + 135361, 167675, 50000, 42501, 236026, 75571, 5992, 17097, 37563, 20166, 101652, 40721, 176404, 19063, 36155, 59023, + 64899, 33196, 98793, 70148, 6578, 69024, 242235, 52605, 122033, 81904, 12809, 15158, 63871, 107782, 1840, 4872, + 1850, 29779, 34125, 98682, 85234, 116592, 82990, 12554, 82089, 63993, 168833, 149888, 66124, 10489, 196009, 53058, + 74145, 69106, 89715, 51982, 121098, 13559, 95458, 107427, 107351, 113368, 3064, 70286, 12687, 4839, 22981, 48279, + 36881, 55099, 198175, 73274, 117334, 75733, 3562, 5484, 13136, 64209, 184119, 61973, 14698, 16622, 73579, 36910, + 85933, 42774, 90601, 40132, 93866, 19937, 45703, 23982, 18047, 5362, 11140, 39687, 32620, 25714, 60265, 77976, + 310, 77325, 31712, 280974, 15094, 86261, 43015, 13789, 46000, 100529, 1116, 137536, 88451, 20938, 134125, 17791, + 23632, 63779, 256927, 29623, 36645, 219053, 83430, 48090, 27940, 27300, 14780, 109338, 87618, 34615, 102225, 42200, + 80520, 6619, 74506, 194075, 58892, 76807, 27201, 60732, 20976, 110501, 170773, 84913, 27702, 21417, 5342, 72467, + 140090, 178520, 122111, 33447, 96075, 4699, 53512, 17029, 67841, 174853, 17388, 16526, 103292, 6179, 27759, 37717, + 238964, 22906, 23675, 59996, 87778, 65471, 88269, 10094, 107338, 46577, 43856, 31927, 17019, 15, 159648, 72870, + 83427, 14710, 153353, 15418, 67522, 32362, 44785, 3937, 7302, 108965, 41424, 55752, 8261, 138626, 43498, 8777, + 47570, 24353, 207472, 131958, 116787, 3156, 34108, 58412, 8730, 6898, 90267, 28837, 80484, 59214, 9439, 19087, + 61772, 5722, 23159, 81837, 174422, 59842, 18102, 171770, 56005, 80656, 119313, 22496, 131193, 22339, 3665, 14366, + 52875, 107272, 15645, 70022, 14588, 34482, 10876, 36184, 28471, 60828, 82588, 55312, 59817, 102390, 8845, 87428, + 9586, 17179, 96409, 90979, 15720, 107181, 33777, 113545, 158155, 4326, 32422, 188604, 51307, 29601, 10067, 16201, + 109734, 3998, 129448, 141025, 15196, 18904, 83270, 17683, 11025, 19743, 21493, 1552, 59331, 7581, 25652, 89130, + 3884, 119328, 75611, 26479, 52626, 13590, 195115, 6807, 102602, 52190, 72507, 52467, 84797, 6467, 2334, 47785, + 25158, 132580, 142359, 76578, 27314, 24766, 3969, 27500, 4437, 254621, 65367, 168149, 20488, 13843, 62310, 21136, + 76214, 12671, 23644, 139645, 189248, 55341, 3302, 14123, 35023, 48936, 11475, 55049, 114952, 51698, 297764, 80767, + 157376, 20881, 13336, 53207, 2827, 59849, 16592, 52991, 62439, 275657, 47305, 117196, 102878, 60777, 83027, 10464, + 129749, 26707, 251779, 125019, 36405, 33132, 25562, 70233, 10329, 28189, 12385, 71977, 109339, 157513, 195426, 70901, + 108633, 55857, 3229, 108445, 36662, 75267, 19054, 102039, 1254, 2391, 173276, 29374, 13267, 51771, 225351, 7237, + 5470, 98738, 14316, 209876, 87105, 107056, 16157, 44140, 58004, 88474, 31907, 11831, 15397, 11396, 39700, 90254, + 10434, 316, 22682, 60529, 159667, 46855, 41392, 55643, 21864, 20640, 1483, 48142, 52022, 14052, 14307, 23845, + 179464, 105497, 8112, 170979, 3013, 22501, 58766, 9748, 32147, 168407, 69486, 20613, 31496, 139990, 50815, 4616, + 116832, 5525, 13988, 136236, 18494, 72230, 42950, 16966, 105502, 65693, 12439, 22036, 129227, 24413, 66758, 60339, + 107267, 12682, 60700, 199944, 50033, 64809, 169, 18106, 13481, 63773, 97251, 21927, 9954, 10994, 61753, 132964, + 24267, 269121, 315805, 17090, 22141, 89553, 60, 41720, 16257, 267739, 11800, 148430, 116154, 107691, 1495, 7110, + 36185, 30330, 15572, 9879, 950, 164278, 12417, 189181, 115685, 238527, 61115, 3001, 11305, 185925, 125509, 80597, + 176708, 23407, 47313, 198512, 2048, 137403, 10323, 39029, 178671, 3975, 76003, 84147, 112573, 78996, 69042, 6992, + 287867, 46759, 5275, 30027, 162328, 18790, 274565, 55200, 497663, 89305, 12413, 26108, 95170, 13648, 15748, 78466, + 50979, 40335, 43390, 12453, 193861, 466, 66161, 87201, 50987, 94748, 32988, 22018, 30368, 175641, 225636, 53920, + 136257, 72869, 274820, 12673, 31830, 21228, 5225, 4705, 46549, 105854, 3199, 16143, 15119, 48337, 134864, 57495, + 169876, 45311, 13298, 54322, 23788, 14950, 42143, 49801, 17462, 217840, 101953, 63412, 249887, 9473, 75292, 729, + 57377, 66067, 6712, 114078, 1949, 92149, 44063, 86259, 35448, 19529, 20598, 19485, 14791, 73304, 47309, 366, + 43769, 14667, 98883, 25339, 210091, 45717, 49613, 6739, 3783, 165357, 28175, 87394, 34612, 90151, 838, 17747, + 282103, 59822, 30329, 68354, 88380, 37787, 19846, 9765, 245190, 63635, 236, 10025, 5457, 17502, 12156, 204683, + 20491, 39054, 8070, 50911, 98908, 99317, 12186, 49875, 11402, 152980, 22961, 22075, 86899, 34073, 14755, 13282, + 117916, 33225, 72465, 158235, 16028, 64557, 36732, 123058, 162584, 20479, 31391, 63733, 60644, 62683, 9917, 17310, + 320177, 239647, 43989, 8876, 156096, 31699, 114739, 105603, 31065, 78174, 95969, 9220, 55876, 38786, 58411, 214643, + 22000, 140310, 88748, 26085, 77655, 28585, 109281, 7624, 9992, 7234, 98903, 60178, 23397, 75321, 53549, 53032, + 13757, 109032, 64081, 19046, 317623, 1946, 71713, 62867, 63978, 3151, 56924, 47304, 215255, 182040, 74590, 48012, + 2443, 24268, 69427, 33, 17648, 12518, 40668, 140511, 93231, 72823, 28791, 47052, 27388, 55205, 30906, 57350, + 104529, 63338, 26728, 100624, 93807, 107269, 82876, 163825, 55505, 18547, 30605, 14974, 140477, 81686, 69012, 120278, + 12046, 5605, 47923, 212928, 112040, 108335, 21336, 38303, 70887, 143240, 19855, 87583, 40152, 15587, 66375, 29394, + 94365, 54674, 43272, 133291, 112353, 210955, 121711, 331352, 25063, 35707, 69840, 56454, 114679, 66792, 86743, 446056, + 266061, 12010, 82324, 96915, 71248, 22037, 64756, 25091, 119555, 13915, 28628, 6675, 15589, 106123, 53880, 102126, + 2244, 55885, 39035, 5738, 617, 441160, 19553, 2220, 14129, 86275, 107256, 166687, 211431, 81495, 104573, 290527, + 70110, 99357, 25187, 33256, 100652, 31225, 142056, 82578, 7777, 23449, 58276, 64423, 4383, 192266, 134799, 66044, + 85911, 76876, 90974, 103947, 89221, 15961, 109118, 116095, 21010, 92406, 79525, 73542, 120615, 156252, 287243, 95281, + 58357, 14409, 38018, 33736, 86405, 83837, 55695, 9631, 37554, 27881, 106045, 6071, 41647, 35485, 108389, 46354, + 18, 34159, 180231, 126085, 15143, 13910, 144853, 93597, 69662, 64563, 23068, 38208, 60491, 41204, 62238, 141119, + 14714, 38001, 3276, 56189, 186134, 91982, 65866, 6904, 148344, 128140, 61946, 3683, 42347, 4969, 38485, 12705, + 5410, 78157, 91933, 103926, 168175, 15606, 288735, 6045, 44535, 20558, 34571, 13823, 42449, 6093, 11546, 37307, + 343894, 81874, 112517, 50738, 129417, 141962, 5390, 118181, 99682, 16804, 16660, 107424, 20659, 45287, 22461, 55188, + 27272, 10167, 12088, 17401, 140483, 17330, 78850, 45339, 63455, 16978, 199211, 80486, 222159, 13457, 1094, 18869, + 17536, 103284, 31050, 17962, 13722, 71867, 76368, 286932, 42637, 10567, 2, 199412, 62324, 36676, 8181, 6401, + 11976, 162, 87196, 78376, 114691, 1307, 30802, 3711, 148109, 74365, 57920, 81357, 2283, 214679, 12573, 18531, + 32057, 44583, 212119, 44264, 182393, 154286, 35753, 8258, 4295, 13186, 83291, 4736, 147364, 121944, 59024, 73352, + 33705, 31001, 88498, 32882, 2075, 163404, 10634, 11500, 44303, 65212, 13112, 34394, 30274, 20847, 64779, 22883, + 28331, 12500, 41997, 11945, 63740, 75992, 39703, 69806, 69740, 66087, 128693, 81128, 29148, 111583, 148435, 31535, + 10346, 159917, 58414, 28273, 44862, 59, 33364, 21068, 33716, 46470, 1804, 73963, 73937, 219049, 103305, 218321, + 153333, 233125, 63775, 105390, 12930, 127548, 60730, 109562, 38784, 53330, 627, 73923, 247159, 30199, 38362, 48641, + 2515, 19205, 34500, 48846, 41053, 21212, 105356, 15212, 20256, 3616, 21849, 17211, 83368, 97044, 352396, 108368, + 256189, 196693, 4226, 14922, 54639, 25114, 35849, 320, 115240, 41400, 60330, 23377, 5096, 75542, 3178, 56889, + 24661, 12431, 135214, 63314, 175419, 239496, 41215, 13379, 153552, 73270, 37517, 105147, 26516, 27776, 68242, 1357, + 141, 71029, 155582, 76053, 138176, 8168, 4134, 134075, 63885, 2519, 46027, 51951, 34115, 79709, 5576, 10203, + 47222, 15435, 34313, 43154, 55709, 9408, 82175, 88231, 2765, 52283, 66501, 65412, 28479, 574, 36643, 57430, + 38875, 293893, 16069, 28162, 236608, 26442, 8613, 1622, 12229, 37482, 74496, 33264, 22921, 79056, 39384, 129956, + 77291, 20463, 58349, 109725, 54426, 106360, 47581, 90881, 45388, 31487, 128197, 71530, 2860, 39582, 46028, 81826, + 57787, 259117, 67523, 307547, 114579, 104215, 18723, 115699, 295139, 4428, 43172, 26618, 105782, 14781, 30313, 18939, + 17826, 45064, 69594, 13204, 71066, 58366, 15231, 157682, 19119, 15787, 91984, 89607, 54364, 23607, 79019, 145509, + 69385, 6604, 40313, 81481, 16568, 32756, 79187, 77128, 12323, 56725, 270449, 8124, 28057, 84040, 65629, 72616, + 33346, 127078, 185998, 104454, 34919, 182273, 14609, 19947, 124763, 41923, 73459, 52863, 155533, 54653, 13742, 70467, + 101386, 30438, 9573, 50346, 31644, 14370, 50550, 451, 8627, 280645, 27470, 64783, 49001, 44841, 60011, 84880, + 303600, 29723, 249679, 20808, 29868, 17505, 9061, 24139, 63213, 37901, 19817, 58724, 9103, 3211, 42909, 2761, + 77280, 71424, 62334, 7307, 71333, 27508, 6331, 83663, 9696, 179679, 27464, 32253, 138789, 36870, 23709, 126072, + 37059, 130604, 83178, 283517, 24823, 22443, 72508, 24678, 5057, 22926, 13846, 74055, 21352, 14335, 3461, 51988, + 32368, 11407, 71381, 7895, 114208, 12515, 35745, 29717, 56803, 108507, 67583, 54834, 19424, 38668, 35778, 15261, + 10445, 67937, 56244, 3340, 58514, 22440, 11243, 68666, 8661, 40929, 111708, 37637, 209508, 124495, 206903, 91080, + 26187, 11633, 66771, 30871, 171838, 197948, 8289, 57706, 10460, 80625, 36923, 31612, 63454, 35105, 59184, 7032, + 14016, 40322, 27055, 86917, 122504, 79371, 130575, 6177, 41328, 5209, 78779, 4821, 21329, 41539, 62219, 83753, + 46618, 7613, 15027, 118360, 32493, 225969, 819, 120335, 38225, 84581, 17131, 15188, 7855, 3105, 30340, 78018, + 31763, 5801, 31461, 10241, 7945, 13882, 30152, 4527, 29876, 21438, 25462, 64433, 8734, 24877, 74945, 14455, + 6438, 107317, 52662, 50955, 24205, 109805, 38229, 13383, 97490, 25051, 32380, 7096, 139977, 101853, 1428, 42064, + 130740, 33630, 82143, 110070, 47323, 64635, 163313, 6594, 33195, 6607, 12008, 3521, 85390, 10031, 28761, 7750, + 57194, 65848, 171501, 76792, 13813, 65534, 81423, 13132, 60600, 4409, 31583, 87558, 21313, 106333, 155905, 153496, + 96251, 3406, 49514, 9434, 7699, 87327, 46359, 66114, 27584, 74162, 19886, 252277, 170521, 22092, 82253, 115261, + 139271, 47533, 52680, 202618, 1363, 11026, 363255, 99673, 4402, 33612, 20302, 28039, 336738, 1226, 38272, 13620, + 129223, 13205, 3958, 19963, 84983, 18752, 42515, 68608, 69150, 39813, 30549, 41506, 13147, 4080, 5087, 15836, + 3590, 70019, 16566, 11738, 163929, 27964, 83106, 22713, 207225, 34956, 53824, 5181, 155260, 90376, 13809, 102964, + 55916, 44273, 1261, 26537, 20288, 96003, 17325, 62927, 4503, 126513, 63919, 15556, 8398, 46320, 36814, 33274, + 98490, 42874, 152755, 42575, 11773, 27872, 33828, 220575, 27512, 172804, 16418, 95543, 37113, 708, 88986, 499, + 84976, 72480, 35781, 122997, 86558, 582, 113958, 53612, 28365, 4495, 11592, 12890, 11756, 81693, 108202, 42686, + 116005, 805, 108429, 99193, 182148, 10373, 83443, 91669, 13733, 56065, 75316, 204077, 115313, 322052, 15302, 23067, + 47644, 13460, 67103, 57307, 67208, 156322, 235278, 94136, 85069, 1418, 167462, 56633, 9347, 2641, 44606, 21287, + 14995, 12747, 17526, 494, 226141, 75964, 18039, 39483, 14704, 59304, 1316, 195645, 101835, 141208, 116613, 223219, + 41159, 49846, 61222, 43002, 35314, 80457, 23691, 63429, 13113, 36373, 32330, 8361, 63526, 11029, 32963, 93376, + 214039, 18769, 114006, 1087, 29978, 114423, 64428, 31774, 50446, 49295, 34202, 73765, 83339, 74913, 27287, 43834, + 17440, 83606, 93489, 60736, 129441, 57951, 84464, 118033, 72522, 2386, 5028, 276747, 118641, 55191, 48057, 16621, + 97816, 39006, 3084, 207838, 24907, 92233, 46363, 526, 73844, 2610, 25911, 60919, 15717, 85425, 136040, 70858, + 118884, 18348, 172680, 47190, 167255, 11777, 29552, 95596, 96509, 115491, 19228, 15022, 162793, 7055, 54334, 45726, + 30847, 5690, 94006, 54915, 36849, 132591, 172983, 1103, 51297, 9369, 114578, 16962, 78974, 112537, 1229, 44919, + 77793, 29231, 3317, 48815, 10427, 19768, 100349, 70835, 1873, 26539, 93804, 85247, 2972, 161694, 16704, 323037, + 9999, 22014, 49315, 152394, 35074, 21009, 44279, 9515, 28190, 8647, 79457, 53333, 64297, 16040, 25316, 4727, + 146836, 234761, 5304, 67997, 46298, 88700, 122826, 40595, 60038, 132292, 15881, 9158, 163007, 47825, 221096, 91414, + 108919, 98428, 57919, 266818, 61219, 43879, 63697, 17096, 2403, 17970, 81556, 16083, 75022, 59692, 100052, 43977, + 127339, 40049, 119563, 17907, 4233, 35651, 47435, 66632, 110389, 15505, 25565, 177269, 69022, 40996, 21600, 19932, + 9833, 15084, 84838, 139091, 128097, 47428, 7050, 1360, 88016, 32635, 67695, 9688, 79390, 38684, 76616, 96096, + 222936, 46245, 114060, 64752, 22570, 74272, 1778, 64322, 94657, 115063, 64956, 78933, 28462, 67295, 52520, 30887, + 56956, 34609, 42753, 10281, 38803, 27122, 98080, 117097, 81536, 9071, 86592, 83849, 30474, 44691, 14751, 101014, + 152794, 5331, 6917, 12448, 19566, 55177, 112969, 39493, 16481, 84170, 80483, 83854, 147408, 10964, 69929, 25567, + 74574, 33765, 31137, 41456, 81895, 68446, 44249, 35692, 20731, 44729, 68861, 17500, 1918, 8121, 12733, 11511, + 1366, 110805, 162699, 45509, 76367, 53890, 22608, 6608, 187321, 92727, 87863, 8920, 54494, 50474, 3355, 13177, + 24366, 3750, 4900, 105058, 21690, 24548, 61391, 41587, 61696, 71254, 21133, 115118, 33283, 43646, 8976, 11273, + 107477, 5228, 145257, 193829, 77499, 89840, 14294, 15416, 31112, 552645, 70476, 18585, 414383, 91301, 197650, 85075, + 74362, 201919, 31008, 6857, 18463, 18747, 80565, 258617, 218441, 23173, 30916, 2323, 120929, 143751, 107064, 20153, + 59848, 60391, 289834, 31449, 258629, 45824, 75786, 49114, 201924, 23563, 35265, 68991, 69269, 10313, 17390, 67981, + 454, 162917, 7572, 144990, 19989, 55673, 279099, 65066, 13054, 54343, 55277, 3568, 59894, 15271, 53443, 417392, + 3623, 434, 45943, 1256, 58908, 173322, 64122, 81605, 20681, 59366, 25442, 39040, 35723, 390351, 35866, 146738, + 78523, 61914, 82593, 35139, 102680, 249269, 46578, 8727, 38988, 4624, 48520, 58695, 184112, 2027, 12940, 80034, + 108087, 27491, 213336, 944, 50944, 22396, 85571, 42349, 132704, 181305, 10849, 1371, 52966, 36306, 27461, 21214, + 21699, 5289, 15632, 19188, 18860, 17496, 112885, 63208, 96349, 12436, 34610, 13227, 201411, 150107, 21944, 52213, + 82697, 377613, 11201, 44987, 85395, 150187, 38121, 21649, 95658, 8788, 60763, 9623, 5093, 194516, 47673, 70064, + 6427, 97874, 32248, 15210, 177894, 45245, 129556, 10575, 44191, 36758, 23545, 141857, 32755, 22539, 162901, 95737, + 26961, 59210, 28436, 11081, 155739, 67439, 17384, 48388, 6249, 69756, 55378, 34764, 31310, 113162, 97192, 14330, + 81030, 18838, 35438, 13932, 26574, 120400, 101846, 130645, 84311, 174890, 23927, 28384, 120155, 7519, 67818, 55584, + 86730, 120665, 55022, 167671, 113535, 159014, 209287, 66396, 7424, 10800, 50330, 93120, 48888, 11155, 16081, 102627, + 13516, 33788, 137813, 74841, 45747, 35841, 21356, 6047, 98098, 4647, 73346, 95355, 20077, 37795, 143033, 12452, + 183995, 86627, 142356, 34744, 81945, 11420, 26913, 10645, 43210, 5817, 54484, 55730, 26704, 91611, 17668, 52476, + 40420, 51581, 44960, 150119, 75831, 205414, 177424, 80186, 45648, 15047, 63973, 15971, 11180, 3401, 50574, 45327, + 6855, 127, 294368, 30972, 57927, 224035, 22536, 5728, 65528, 17692, 2498, 20700, 124096, 78896, 81810, 53646, + 34851, 17737, 67858, 144208, 2598, 69506, 82387, 32829, 156633, 1376, 87537, 38787, 107572, 47042, 34484, 111751, + 127352, 97800, 89687, 4576, 38169, 10102, 247669, 1385, 123845, 61475, 17048, 8281, 60142, 15760, 189275, 5043, + 62722, 70423, 23446, 224950, 105584, 6317, 280103, 26981, 232364, 55457, 64267, 136900, 23211, 118951, 845, 54134, + 68120, 21238, 286023, 351719, 1601, 9442, 185113, 17851, 22169, 76287, 138957, 35304, 89299, 75522, 56601, 68253, + 747, 100548, 4081, 2246, 258039, 9694, 51725, 15738, 80572, 37864, 139906, 92854, 7258, 4446, 72431, 8572, + 152249, 3376, 43649, 25854, 11862, 510329, 182162, 28232, 101204, 130724, 77756, 137694, 8834, 65401, 19289, 56727, + 121434, 112217, 15745, 51403, 33761, 31323, 43153, 136892, 19175, 85899, 25269, 38059, 133142, 52989, 1777, 1223, + 46343, 18613, 28695, 87511, 40178, 223203, 15033, 135709, 48723, 7442, 84744, 52715, 3589, 26620, 71258, 37458, + 41977, 8646, 41763, 91920, 30210, 33735, 64017, 14173, 38868, 120841, 22212, 152812, 62257, 17607, 2045, 16253, + 10087, 16559, 33187, 61003, 82658, 60687, 45996, 131329, 87827, 28306, 207690, 248902, 90646, 20868, 109813, 9386, + 16415, 77631, 94695, 12715, 47552, 53324, 61324, 84409, 351723, 34328, 65573, 16236, 28298, 8448, 4731, 7418, + 72225, 87876, 82665, 6012, 91146, 32182, 85776, 95303, 272760, 52003, 18667, 6217, 1701, 78472, 2900, 125100, + 11295, 44716, 79152, 6181, 1652, 8164, 31057, 77402, 109651, 90467, 194601, 7484, 300747, 17565, 61525, 99466, + 51863, 9753, 72732, 128898, 198800, 907, 5759, 42875, 29446, 20325, 107060, 165430, 11794, 312, 5807, 20106, + 32345, 138270, 25904, 39695, 37538, 97877, 31477, 37750, 22356, 11523, 124253, 73049, 33102, 10210, 142709, 60863, + 37590, 128516, 64551, 64303, 113544, 313515, 181738, 31113, 37970, 55419, 66610, 21171, 11478, 7770, 228067, 47156, + 179743, 798, 147431, 50652, 25454, 19412, 37683, 4903, 103417, 111432, 203788, 3756, 59905, 35566, 26345, 21937, + 221970, 77922, 9738, 6303, 105196, 61175, 21598, 10726, 145604, 59463, 162519, 29420, 7817, 116768, 16102, 128966, + 164809, 62009, 21801, 6296, 102360, 37967, 27376, 20294, 16974, 106654, 129570, 55893, 75840, 88896, 68684, 52692, + 255333, 13102, 29474, 23608, 56902, 95016, 21221, 14815, 6659, 24894, 19248, 23158, 1954, 30789, 75920, 14310, + 645, 139270, 54809, 114378, 59400, 18159, 138353, 504, 67769, 40337, 187608, 1166, 7689, 50368, 74658, 121130, + 18675, 55085, 92265, 193168, 5215, 27373, 68051, 32611, 13793, 36800, 37117, 18010, 20536, 292596, 68283, 17340, + 27852, 177093, 366025, 172, 3387, 59699, 119854, 49684, 29523, 615, 8943, 120515, 259718, 38781, 65830, 116121, + 16860, 30836, 32413, 2567, 94625, 94075, 101496, 152484, 43143, 15550, 8257, 43438, 29245, 8300, 137692, 67582, + 15848, 41480, 7018, 78578, 233581, 95833, 94811, 680, 22685, 16034, 79357, 33983, 63631, 172275, 30827, 99510, + 78557, 35720, 123406, 2852, 22836, 77392, 107120, 19910, 133302, 7738, 237843, 59764, 84513, 10802, 76294, 191768, + 1348, 1723, 835, 44328, 51826, 15011, 147582, 11622, 47129, 100405, 10694, 21708, 98836, 9941, 210066, 1364, + 58284, 76266, 30451, 238107, 1830, 218, 64337, 500320, 1749, 129373, 99321, 63068, 94642, 60327, 22930, 25615, + 10933, 11881, 32269, 125198, 6145, 6468, 88776, 9729, 12506, 25930, 46197, 67476, 10975, 30498, 22641, 6110, + 13879, 228714, 189675, 30112, 103781, 142137, 12074, 72148, 144434, 17332, 1205, 55723, 10268, 84346, 739, 7900, + 28409, 142772, 86335, 34405, 32346, 5957, 99727, 159495, 52968, 69914, 49976, 116713, 121567, 45867, 1035, 114241, + 107374, 16112, 22042, 28431, 77268, 110960, 20030, 87679, 23686, 164921, 11944, 46255, 35097, 13908, 12674, 144017, + 10501, 8182, 68576, 13277, 155275, 111289, 84842, 25219, 15303, 77277, 85173, 34541, 47136, 169489, 134845, 66303, + 21102, 68903, 56191, 22964, 168741, 12959, 7590, 12803, 55332, 44576, 35459, 106903, 90385, 5415, 140923, 1080, + 15996, 137579, 8958, 18617, 84817, 54000, 41832, 10520, 681, 67009, 192636, 36305, 137803, 34054, 76848, 21244, + 25054, 15186, 30898, 30597, 142275, 43375, 42593, 28736, 6163, 62732, 73310, 84072, 38175, 49734, 9130, 100431, + 8056, 31178, 23491, 108251, 124296, 54748, 48773, 67591, 240642, 30480, 55118, 280935, 65621, 36603, 32046, 51350, + 4934, 31903, 121291, 176064, 178205, 68901, 29505, 71720, 16101, 99503, 28716, 1623, 62803, 17988, 139992, 3625, + 60964, 35799, 20217, 10717, 18230, 122058, 97858, 147682, 100622, 42915, 18203, 67908, 76465, 68188, 84589, 230422, + 44689, 25437, 16646, 1939, 14545, 62476, 16816, 55294, 9543, 86343, 30341, 131304, 47514, 35443, 171825, 56555, + 16852, 117154, 33897, 115642, 93380, 27861, 5302, 4455, 28048, 30630, 34058, 166130, 12047, 143095, 91785, 86862, + 107106, 33521, 3018, 20571, 37575, 98074, 42118, 36747, 101485, 21122, 7995, 35412, 77047, 174662, 44717, 27754, + 57326, 45416, 100544, 40237, 34819, 147882, 8009, 40696, 96137, 22034, 12537, 89019, 76916, 16381, 260347, 20562, + 6469, 99814, 6504, 8878, 46264, 28993, 135328, 351396, 115983, 162985, 9453, 27223, 75768, 129072, 94641, 114365, + 87668, 163393, 133224, 129378, 69942, 52468, 102771, 87719, 13027, 63666, 4880, 269088, 165, 102664, 209256, 8459, + 8373, 115431, 33913, 89243, 114231, 39718, 48970, 84535, 26434, 89763, 13534, 52518, 52844, 4523, 118415, 84250, + 42799, 85012, 51296, 129398, 182044, 164191, 23430, 172125, 23580, 17068, 100097, 165599, 146254, 103774, 23886, 5517, + 38081, 6556, 39590, 7165, 43236, 9564, 11470, 79503, 33883, 6422, 32630, 507, 146220, 14032, 54871, 27774, + 382894, 111499, 100220, 85905, 14606, 67028, 127208, 96410, 46035, 45103, 89938, 72023, 36481, 10463, 84711, 2184, + 166621, 34621, 71767, 6005, 35417, 18397, 12328, 6245, 95382, 74094, 6382, 42236, 2957, 19896, 58715, 27397, + 59384, 32742, 2546, 93465, 60428, 48668, 29303, 20252, 36358, 149322, 3193, 36701, 66343, 73618, 24279, 42201, + 75378, 214932, 112583, 62412, 22267, 87725, 1442, 11196, 22950, 229298, 116569, 171221, 83528, 29153, 38927, 69087, + 17577, 73532, 64199, 281267, 56474, 107520, 35241, 9230, 25285, 16464, 54961, 820, 4619, 89654, 59498, 85252, + 179691, 2434, 26357, 9414, 75355, 113594, 58673, 61757, 95836, 68318, 54415, 28188, 53295, 26258, 1129, 9855, + 34588, 10951, 29459, 270568, 171410, 122449, 84225, 40183, 4487, 93745, 119118, 40504, 14679, 59040, 261948, 112143, + 84208, 36024, 77263, 53688, 44015, 109308, 151516, 53139, 18562, 27509, 122, 78679, 109133, 27618, 9307, 11687, + 54101, 37528, 73589, 23837, 11531, 15405, 27569, 34097, 86052, 88898, 452968, 106351, 174479, 16086, 12337, 33522, + 303157, 114465, 20251, 93451, 28095, 29793, 32562, 4865, 9953, 17465, 73959, 11715, 35642, 25146, 120955, 73779, + 14564, 70652, 194775, 127647, 39802, 8232, 15171, 32361, 16145, 146754, 54118, 51290, 77606, 61329, 94469, 20733, + 117406, 168386, 13963, 5919, 53038, 27673, 29890, 70790, 121117, 125451, 109176, 9417, 53624, 27539, 61759, 90921, + 22062, 12907, 8858, 21259, 1212, 82256, 83446, 126781, 7632, 49742, 4924, 99538, 127157, 121931, 106183, 86599, + 237292, 72098, 87000, 4747, 189087, 63407, 75261, 28941, 10478, 97792, 6128, 23277, 127345, 38013, 46884, 1492, + 102515, 12723, 3978, 16950, 181997, 58167, 67025, 121398, 86752, 24702, 76144, 13670, 87623, 143137, 34358, 58216, + 10966, 15260, 79095, 10693, 121602, 149602, 4024, 88047, 68783, 112717, 12525, 7881, 68681, 9491, 38696, 70997, + 83042, 12348, 7661, 10266, 114380, 152092, 105889, 5453, 138349, 17021, 6772, 17024, 191305, 9666, 12566, 121832, + 67176, 6357, 20860, 13799, 50085, 58837, 33490, 31519, 39016, 25779, 4404, 75055, 1427, 51784, 37042, 40467, + 42384, 128165, 26665, 15628, 1412, 114798, 10282, 15463, 67118, 108331, 37941, 49355, 122616, 11721, 123403, 55963, + 72389, 45854, 157529, 86342, 25260, 17774, 101303, 13857, 2237, 12444, 17435, 9912, 13576, 18041, 9931, 109677, + 137346, 2419, 5631, 153579, 19938, 7345, 26359, 32429, 20304, 111232, 63629, 62874, 2191, 41851, 33540, 17568, + 68759, 206421, 20421, 8918, 5373, 10979, 52747, 28961, 61364, 35687, 21405, 9253, 238507, 56648, 20973, 119843, + 75814, 58786, 192, 23147, 23931, 351555, 317690, 34008, 69565, 12383, 22156, 37312, 38993, 98373, 54103, 108574, + 131741, 105495, 23252, 10516, 38364, 418691, 1022, 31867, 12528, 57830, 10554, 28925, 87762, 73786, 45238, 3871, + 5679, 49309, 1866, 15962, 129853, 7444, 10743, 34937, 5310, 18691, 186923, 15486, 186831, 78192, 56399, 34869, + 32653, 116247, 7899, 91437, 90338, 131084, 26080, 707, 260176, 152285, 65289, 108488, 389531, 49652, 28839, 250236, + 108118, 976, 52394, 48642, 26843, 1434, 7539, 14145, 43985, 32680, 56851, 16604, 50175, 15484, 133420, 201965, + 30563, 171323, 10408, 5106, 25106, 3832, 22052, 30595, 56965, 126120, 10162, 79454, 18130, 125828, 52128, 6763, + 140428, 50288, 3359, 75168, 4542, 67789, 21815, 25917, 165503, 152273, 62307, 3497, 117991, 32786, 40567, 896, + 24219, 83611, 40133, 65241, 229605, 317863, 103959, 90507, 1819, 29545, 15591, 32868, 129663, 93792, 44783, 13032, + 1240, 5795, 26358, 21931, 3797, 229975, 8527, 45899, 76093, 223890, 32636, 329905, 18398, 40622, 43735, 5700, + 71339, 108607, 70873, 19592, 51919, 16423, 4832, 81475, 93043, 49045, 105473, 57292, 27175, 161447, 88028, 82073, + 47060, 1862, 6336, 34528, 216257, 49542, 1523, 13933, 6483, 12498, 143270, 12904, 35051, 32513, 38643, 137631, + 1217, 10305, 15169, 34487, 16512, 91140, 101318, 54337, 80798, 39094, 72004, 164938, 129064, 45885, 110177, 68422, + 13225, 211848, 134863, 65992, 69339, 9421, 7754, 97572, 8548, 19362, 46011, 26566, 237079, 65134, 52432, 108604, + 72298, 156269, 99270, 75206, 2575, 146135, 49731, 81094, 34280, 39812, 5510, 134730, 51379, 103016, 61853, 94676, + 117910, 64, 7934, 95122, 55671, 3205, 58333, 173791, 53345, 20418, 92151, 39998, 247552, 69993, 40897, 8758, + 29486, 48394, 44428, 19361, 39328, 12796, 110850, 20923, 140821, 98505, 27484, 87170, 34681, 8696, 6881, 178272, + 57045, 10428, 21956, 22625, 60177, 192367, 30337, 97408, 5004, 22803, 112181, 36456, 90269, 45503, 47341, 44581, + 78522, 63489, 59424, 114517, 2479, 35959, 56238, 267536, 322607, 86378, 13479, 1160, 48474, 6182, 17033, 29824, + 61296, 57555, 82202, 35730, 13057, 15270, 72386, 73223, 31558, 45570, 36450, 117667, 4678, 14390, 62433, 28769, + 59271, 15277, 15374, 25119, 6699, 9480, 26668, 33155, 27044, 64427, 178, 36692, 31988, 5800, 18108, 209160, + 35944, 71154, 35918, 167895, 12503, 1771, 16989, 56793, 83480, 39460, 212938, 8159, 4389, 5594, 58690, 40182, + 136508, 1899, 67361, 119842, 3781, 42329, 32531, 37520, 114121, 97367, 52218, 49794, 70279, 62339, 116961, 90247, + 4488, 183020, 1828, 43025, 155829, 86696, 4847, 194990, 42214, 37848, 22958, 11578, 2898, 43626, 50811, 14189, + 68191, 47328, 4117, 163237, 75695, 118135, 64031, 37784, 305850, 99832, 84067, 112381, 45041, 38270, 15120, 240636, + 74344, 6693, 36080, 91318, 106509, 219791, 38255, 7554, 30087, 101966, 6713, 51615, 17429, 102583, 207618, 156, + 93292, 28386, 62288, 34520, 12477, 25385, 50776, 72166, 290, 7825, 17764, 23980, 23080, 74965, 22645, 203791, + 114802, 45119, 156694, 216, 35714, 42020, 117883, 147910, 18751, 59077, 92531, 63106, 26554, 23194, 21689, 105051, + 105424, 2395, 22296, 6139, 17775, 18788, 78110, 84290, 2144, 18630, 17328, 34740, 2412, 84397, 47585, 9344, + 100610, 186751, 213784, 22382, 65192, 94882, 115271, 162177, 113975, 16918, 4108, 47999, 52975, 14387, 51692, 40643, + 180272, 25467, 117999, 146655, 135050, 1246, 11255, 45090, 129815, 13155, 69900, 30778, 76238, 51833, 50960, 175019, + 106483, 105526, 121590, 62888, 21440, 37725, 6971, 74563, 63186, 4385, 6132, 28738, 4260, 7667, 208949, 44659, + 46189, 4885, 2107, 8295, 9711, 33755, 18758, 47913, 28249, 76357, 33947, 20500, 4169, 156995, 46268, 30937, + 23429, 30285, 10916, 52362, 23390, 2897, 68316, 35544, 8324, 51699, 132392, 78433, 141585, 44776, 84808, 4462, + 63809, 59925, 104776, 132583, 67668, 101503, 35715, 119985, 38457, 47365, 56836, 187538, 38063, 17773, 101921, 26244, + 39226, 27892, 7402, 164108, 59972, 87376, 323527, 169672, 1189, 114898, 48985, 100235, 203916, 95, 21963, 6130, + 62368, 812, 54658, 54753, 14403, 18099, 9204, 80661, 16949, 16703, 239430, 18692, 61767, 58048, 87844, 125829, + 85801, 177571, 8915, 209065, 1739, 136288, 19402, 9552, 40147, 51786, 57185, 93178, 35049, 55789, 6563, 35663, + 76757, 32960, 20303, 80517, 33124, 173105, 7464, 51755, 62102, 25470, 21379, 115520, 15780, 117655, 47580, 25076, + 103593, 140889, 141531, 170341, 103009, 155483, 236319, 50667, 53484, 1496, 133657, 34260, 22952, 46940, 6610, 65362, + 67973, 155084, 121252, 1114, 114645, 46684, 81892, 70092, 6566, 122003, 120281, 934, 5245, 88201, 9873, 128289, + 50462, 186108, 122319, 93482, 7601, 179944, 197940, 172496, 8288, 2228, 103865, 63562, 3513, 16971, 44832, 8458, + 194571, 18586, 21840, 94414, 80276, 279833, 284818, 117971, 1908, 62558, 8177, 5618, 54592, 34875, 38914, 80151, + 5124, 5315, 93652, 14782, 58571, 5164, 45076, 67898, 2513, 49234, 56032, 253677, 6800, 85426, 46580, 21278, + 273997, 56465, 57545, 120308, 193904, 19317, 27718, 67291, 1119, 22758, 15825, 65236, 17991, 81823, 6750, 11437, + 117245, 9177, 63553, 197299, 2508, 97808, 35037, 55401, 129156, 37796, 50612, 379962, 82366, 53364, 48505, 19654, + 26278, 19171, 20254, 13206, 71465, 142569, 27518, 4583, 63341, 506954, 106046, 14062, 56943, 1023, 4159, 99329, + 39662, 25053, 62754, 40102, 106116, 281486, 54055, 90158, 94966, 45145, 149583, 64918, 156875, 121779, 78151, 69085, + 9736, 35396, 9131, 26784, 2204, 8896, 115808, 87466, 122308, 77182, 75953, 30431, 94418, 20569, 21625, 175, + 27134, 19042, 38016, 95890, 1280, 29709, 5584, 120804, 24539, 41701, 33794, 262040, 49022, 72657, 12577, 34963, + 45314, 122802, 119410, 33620, 3764, 35300, 297940, 221821, 50904, 31890, 33498, 679, 46424, 24998, 9549, 17126, + 30699, 3402, 46901, 107591, 28087, 48118, 17943, 10684, 293839, 72628, 116809, 24752, 9400, 43553, 51404, 46957, + 33646, 107115, 59781, 66313, 40165, 60892, 19582, 17483, 822, 7964, 22063, 57161, 147499, 47679, 50833, 9574, + 50263, 102698, 56893, 336655, 116179, 23193, 11473, 52072, 29085, 2120, 164190, 67171, 11863, 14812, 284781, 18622, + 31314, 107961, 27863, 20352, 5578, 15425, 16772, 6733, 17797, 45919, 177948, 52326, 5104, 101129, 118088, 134374, + 12454, 238986, 175627, 243562, 1604, 162707, 28340, 5806, 15342, 103576, 54301, 104313, 219206, 110037, 13405, 101976, + 10232, 224569, 40292, 169439, 67800, 219782, 23823, 58633, 94261, 4775, 217506, 19292, 25872, 158455, 309664, 54744, + 13565, 217649, 145860, 19153, 90339, 68476, 52350, 11091, 78971, 60510, 6347, 95417, 75377, 62774, 238643, 45079, + 26649, 121377, 31224, 120575, 41184, 92773, 38918, 55073, 47695, 123557, 42923, 124821, 11514, 104093, 123806, 114651, + 35369, 65591, 36023, 60482, 20767, 40839, 12635, 10552, 14227, 109847, 53556, 40945, 41953, 4497, 82444, 74167, + 309396, 11706, 101696, 39123, 148270, 9459, 44262, 20639, 147938, 46790, 89576, 30166, 33074, 58222, 75050, 18854, + 14453, 79479, 32094, 56822, 27499, 8684, 27993, 15551, 109019, 360070, 45828, 143786, 39018, 134316, 51248, 33214, + 25738, 56409, 48044, 5319, 240196, 56286, 31884, 103159, 158931, 46088, 113738, 5489, 52820, 62007, 122208, 50310, + 8612, 192204, 14081, 25757, 95853, 28293, 144772, 72058, 21524, 28179, 52909, 10275, 137010, 29363, 50509, 211571, + 84901, 15663, 36633, 159109, 70869, 102773, 15903, 250288, 70021, 118, 136728, 103089, 116794, 6302, 4006, 31162, + 48404, 98849, 21638, 10331, 38771, 92331, 17759, 19077, 6732, 77161, 31843, 1947, 1070, 61852, 19400, 133326, + 70990, 71182, 53741, 56003, 187297, 38802, 52928, 47866, 49140, 42448, 132456, 52558, 5238, 95978, 34737, 124934, + 576, 52602, 99772, 2211, 3564, 107992, 7854, 37619, 253975, 33502, 97130, 7194, 16027, 44625, 176640, 70109, + 16483, 69386, 84445, 189507, 2811, 2911, 42607, 29686, 37775, 48856, 7024, 32934, 19034, 21522, 16906, 106835, + 25259, 237775, 26587, 28815, 4053, 8604, 55942, 126681, 2000, 45110, 26993, 6934, 70083, 18404, 97123, 242951, + 95774, 28200, 35245, 126075, 19713, 54541, 26771, 41892, 33431, 6278, 43932, 28670, 92703, 22776, 5711, 39966, + 91314, 25582, 90115, 25964, 42381, 57282, 8595, 60098, 288770, 91568, 25698, 84737, 48194, 43690, 3154, 56885, + 95985, 54802, 176675, 182814, 3991, 18462, 12526, 110444, 77418, 13087, 68801, 15335, 13406, 7781, 13313, 67395, + 241328, 90575, 70480, 44887, 245086, 30235, 7491, 81635, 56533, 280976, 128226, 12240, 35275, 20723, 261812, 18827, + 62725, 32865, 19772, 70441, 9246, 31671, 4690, 16830, 51924, 18082, 12155, 52604, 70181, 56355, 9343, 45521, + 95331, 89906, 123743, 26046, 16163, 794, 32226, 38434, 31410, 124030, 18573, 21412, 79016, 51513, 69499, 483, + 39312, 5830, 39528, 16907, 120878, 146499, 169663, 2790, 119371, 14322, 112030, 52038, 275987, 14651, 179020, 32135, + 80124, 13936, 24740, 53966, 27712, 86684, 12262, 6516, 9186, 18255, 29820, 140583, 220, 99643, 29371, 88024, + 23598, 104635, 329921, 84019, 146167, 73134, 35547, 58064, 85209, 47039, 3925, 35890, 68238, 152444, 111395, 26790, + 282190, 22569, 83839, 77937, 57048, 132950, 95253, 79911, 31273, 2689, 38285, 60446, 30555, 86725, 98060, 37473, + 80913, 23265, 231272, 356070, 17594, 117027, 84460, 79300, 75779, 181354, 106863, 2092, 59160, 174624, 41803, 31994, + 135002, 52912, 60077, 41514, 101219, 16751, 143391, 28891, 189377, 73103, 110543, 14827, 29225, 18801, 72180, 15635, + 96735, 20123, 8653, 54778, 60126, 24504, 72006, 22210, 62522, 119650, 6858, 20841, 104000, 97893, 37534, 99349, + 27620, 55847, 127814, 128781, 86814, 56767, 22957, 8365, 17240, 17411, 175172, 58526, 147533, 178431, 17806, 93628, + 11001, 10386, 11849, 45857, 5425, 535, 4471, 45522, 43682, 11326, 69505, 150707, 410, 109497, 103689, 22109, + 49460, 340469, 41561, 30428, 87270, 69036, 297676, 195760, 69480, 40499, 5201, 173640, 46315, 3997, 90953, 30510, + 59448, 243249, 347379, 83361, 1816, 4707, 31440, 4789, 76201, 1555, 28507, 130215, 9431, 5428, 7565, 108357, + 11788, 21925, 7664, 17680, 87960, 37326, 70316, 157989, 29063, 16049, 114035, 14852, 65539, 7305, 58413, 1877, + 47347, 17700, 121084, 175922, 11678, 14590, 26324, 40935, 33846, 20487, 2960, 82286, 7008, 140230, 95020, 131093, + 196704, 19819, 214983, 15557, 9895, 61765, 2774, 23793, 6753, 15070, 23136, 103206, 8633, 6988, 6539, 24357, + 120892, 28654, 43247, 90291, 59970, 66616, 39279, 57954, 572824, 154282, 103289, 83945, 115934, 30357, 45667, 105824, + 6646, 68979, 29029, 11138, 202559, 233721, 58053, 123432, 892, 61869, 24559, 4513, 48351, 9869, 14927, 39859, + 37611, 43939, 23103, 59284, 251282, 8496, 98455, 54160, 57823, 36455, 13031, 54764, 67263, 54173, 12367, 186498, + 57750, 24251, 33450, 28263, 26527, 3389, 48830, 62567, 34485, 22568, 19435, 87958, 90747, 51443, 111255, 9133, + 7685, 14251, 4357, 34121, 88370, 40692, 44187, 15871, 6144, 133877, 39726, 20248, 64182, 49145, 10818, 50541, + 1709, 19747, 172835, 1815, 41969, 4563, 131019, 10784, 21458, 34631, 34642, 27451, 62327, 10502, 64817, 21040, + 181657, 3356, 25303, 57684, 49247, 69433, 4964, 30900, 225330, 137978, 66824, 79436, 122600, 89064, 2877, 43195, + 114574, 122442, 100287, 41921, 107124, 25840, 145878, 21565, 85361, 235410, 5467, 28770, 111833, 74398, 71263, 46983, + 63243, 138675, 140, 48949, 71420, 26887, 24503, 148456, 15655, 33138, 34734, 7409, 191178, 14, 9258, 150881, + 72430, 90902, 102763, 36586, 18063, 309662, 7191, 149392, 51425, 83681, 24587, 13296, 54002, 210523, 16626, 112474, + 12364, 63416, 36624, 26506, 53225, 21831, 31787, 34, 86557, 998, 49738, 86372, 18193, 23048, 18612, 8630, + 97580, 38441, 26833, 25418, 41232, 78390, 16663, 190608, 138398, 35615, 30762, 41936, 67821, 38100, 49578, 39714, + 128724, 43474, 548, 16126, 8944, 23862, 54057, 149293, 233212, 265993, 19560, 43377, 101353, 40854, 29299, 30997, + 52099, 84221, 76059, 31238, 42127, 24282, 80418, 19725, 14006, 28929, 62417, 365698, 120107, 2583, 165916, 29207, + 32789, 70495, 14426, 38831, 32132, 4652, 155719, 175722, 3498, 105465, 149732, 73487, 18123, 17955, 25073, 7848, + 33758, 212117, 244871, 8698, 56058, 78012, 176282, 143192, 5779, 143466, 322846, 64979, 128760, 44417, 7522, 81933, + 59888, 57516, 87550, 22759, 98869, 19786, 24780, 17277, 18445, 71895, 46515, 17698, 84702, 95788, 12963, 2088, + 51911, 34015, 139375, 18024, 13234, 31480, 33157, 4088, 218379, 20152, 163491, 28846, 20093, 78589, 10362, 134905, + 39031, 1665, 31913, 27636, 8074, 44238, 29175, 3880, 70195, 32932, 86265, 35934, 20708, 49901, 110247, 9629, + 23462, 71547, 25903, 28239, 24355, 4901, 138539, 79668, 131384, 12295, 92413, 90944, 60189, 41168, 22999, 20952, + 26390, 12629, 62314, 162675, 10403, 221931, 3072, 11764, 41060, 197102, 26233, 17299, 7140, 4, 23363, 5513, + 10781, 164646, 18296, 151517, 49410, 18780, 28010, 94985, 42261, 103149, 80050, 34361, 87202, 65486, 9245, 50882, + 82566, 8189, 29402, 49903, 41663, 200708, 115537, 287472, 43105, 88272, 5751, 34285, 60276, 72903, 27444, 11115, + 2768, 40213, 17769, 32834, 5733, 17012, 31302, 65627, 74176, 65762, 220572, 9443, 28329, 49080, 12480, 25022, + 2297, 13079, 51120, 130422, 145430, 108651, 137217, 132944, 131632, 45724, 62952, 163637, 83615, 68196, 126016, 32802, + 122915, 52473, 182224, 14962, 105441, 51662, 20871, 19690, 655, 19897, 140506, 6328, 224102, 127751, 71453, 69604, + 5284, 37207, 87714, 44437, 136426, 34177, 153918, 197294, 67763, 66574, 112376, 9811, 16294, 44836, 174988, 254354, + 188511, 2252, 79267, 9326, 32538, 195, 43583, 8887, 61049, 21719, 26734, 11871, 27893, 192270, 45063, 30090, + 3394, 20502, 9111, 20810, 13951, 78445, 8154, 14291, 159099, 36402, 6237, 37096, 28542, 24459, 73277, 130650, + 17930, 20147, 11211, 3687, 145360, 109503, 113181, 11464, 9492, 132383, 106568, 69961, 190122, 21790, 110379, 8476, + 32285, 32541, 17180, 45498, 78855, 35803, 91050, 89637, 26440, 3026, 120111, 12708, 13570, 45990, 40699, 99704, + 58648, 92464, 151437, 6667, 73908, 1742, 3256, 33631, 4239, 45909, 170962, 320179, 124561, 64828, 19263, 102210, + 2444, 89057, 81206, 34535, 74172, 132424, 73378, 52206, 53131, 11865, 27025, 47218, 11468, 17209, 232859, 291876, + 10794, 27550, 80459, 63364, 73566, 231061, 98585, 163639, 11623, 81336, 209888, 37838, 35343, 51998, 144543, 217838, + 64710, 105694, 183409, 44047, 30481, 27765, 3985, 96950, 4163, 39768, 18053, 21140, 10328, 45473, 19463, 485, + 38309, 119763, 13419, 18090, 29901, 52576, 186587, 4918, 10538, 151025, 65973, 352, 154377, 192678, 90225, 68987, + 76132, 45679, 99334, 22260, 92405, 1735, 27185, 48079, 24839, 129186, 20096, 21327, 11679, 132020, 9799, 26969, + 3465, 457, 58718, 36333, 13449, 152147, 4317, 24297, 11637, 32905, 8133, 40341, 7824, 26227, 23026, 45709, + 2337, 33165, 12703, 29202, 57754, 193021, 33150, 59750, 1260, 52775, 3601, 206, 14458, 29558, 61780, 10854, + 41118, 3988, 39264, 86434, 19878, 45295, 164578, 92891, 38661, 105334, 321632, 27923, 13416, 145240, 66977, 20857, + 159180, 21051, 75795, 119241, 37074, 56804, 62179, 4633, 163164, 62782, 27211, 8479, 54137, 80694, 29848, 30457, + 28627, 13295, 19382, 20644, 52134, 10036, 217, 6449, 184900, 22218, 8094, 95155, 8520, 119873, 16010, 180016, + 40385, 43460, 98015, 169032, 29546, 317, 16757, 61276, 30502, 211577, 27875, 3900, 22386, 11672, 18862, 7483, + 66527, 116135, 41410, 25526, 107458, 48136, 4627, 31485, 6850, 6851, 18832, 87836, 24022, 18326, 45152, 4289, + 47983, 4584, 50159, 5227, 30603, 9757, 27848, 14186, 35083, 178388, 76847, 56645, 8934, 26884, 969, 107665, + 304066, 106975, 94078, 22863, 39500, 79589, 54285, 52346, 9, 105974, 10324, 22271, 28261, 14018, 68550, 223406, + 33026, 33723, 167920, 84931, 77251, 18488, 61468, 140862, 9374, 16225, 134571, 126469, 44833, 2686, 49718, 124647, + 116312, 1215, 83962, 5572, 34990, 71606, 73680, 62194, 29236, 49106, 112907, 45328, 63563, 57200, 134223, 119112, + 125639, 20182, 135593, 81539, 135405, 182962, 6376, 2944, 165398, 71975, 43868, 92182, 159055, 10997, 72330, 18563, + 55690, 2114, 39931, 35390, 88141, 54250, 18546, 28114, 69643, 110033, 69602, 11752, 236964, 2628, 45862, 81263, + 31983, 28246, 80175, 121337, 25572, 22559, 17702, 19278, 20436, 10543, 47657, 113444, 36746, 10480, 87410, 12082, + 60896, 959, 39265, 183075, 31850, 51426, 10828, 84058, 16179, 1273, 128039, 147289, 11828, 25587, 131393, 39952, + 5888, 121379, 76269, 37043, 3043, 76094, 45039, 146616, 66368, 17415, 378876, 51710, 9750, 1764, 52374, 71144, + 31167, 15312, 56281, 89753, 7915, 15678, 93391, 87084, 53111, 56512, 105225, 38442, 36430, 51574, 77376, 65903, + 1333, 89929, 814, 75533, 64344, 10164, 325602, 55313, 93659, 2487, 16394, 59473, 20061, 3479, 15298, 124327, + 60596, 150371, 102582, 138574, 180191, 71830, 13827, 98326, 51630, 12630, 135, 27533, 6792, 27597, 14192, 33715, + 30244, 143897, 92657, 4323, 43509, 2215, 32766, 14515, 101058, 82207, 7222, 111758, 22409, 5186, 141296, 24798, + 420, 100838, 20522, 137775, 44210, 32883, 95696, 537, 109783, 90107, 20074, 2531, 43223, 14612, 84993, 171076, + 27030, 753014, 54033, 23994, 72477, 11870, 108492, 74863, 72831, 78381, 313, 219089, 32679, 44756, 39365, 53656, + 29235, 112804, 4612, 118248, 7675, 9325, 61217, 115300, 47556, 1974, 176307, 5313, 12258, 24359, 49934, 241815, + 39907, 4311, 18866, 14709, 149412, 89233, 46619, 83155, 84926, 117576, 37441, 44149, 118247, 9581, 245890, 19548, + 24692, 61039, 81797, 23206, 71717, 37350, 97838, 6965, 105038, 174607, 122384, 5456, 86009, 3823, 64576, 41024, + 45941, 19151, 16982, 5901, 41189, 12737, 28662, 9093, 89453, 59974, 36378, 119022, 29856, 38530, 16274, 104168, + 52543, 25096, 44843, 20335, 30627, 25217, 83023, 20190, 226798, 32906, 48839, 122264, 67303, 25118, 78432, 77680, + 59230, 99203, 52453, 97225, 67415, 116979, 48969, 59468, 34408, 89209, 24629, 71301, 1367, 86361, 58029, 35735, + 99685, 35427, 110370, 109975, 16867, 184273, 35211, 185023, 128419, 6443, 43697, 58373, 52147, 56592, 116955, 32140, + 4111, 14363, 213044, 135637, 125381, 38275, 50143, 17997, 117881, 101169, 57994, 9105, 16173, 1311, 63238, 37077, + 44093, 61457, 62582, 8321, 102224, 43737, 126639, 70604, 31575, 13061, 20600, 75637, 23234, 165387, 187842, 19370, + 24870, 41729, 18660, 37654, 83790, 66390, 28158, 66508, 127407, 6641, 102989, 100976, 239098, 111562, 135358, 12923, + 3200, 3321, 50588, 83626, 994, 45525, 35195, 6785, 1255, 50764, 123683, 15659, 100903, 138375, 23653, 10925, + 242275, 55742, 47613, 60776, 117266, 29543, 19014, 83593, 55116, 48171, 71871, 34800, 38205, 1776, 35402, 102985, + 16140, 81921, 23789, 138566, 29662, 15032, 205698, 158837, 11307, 35577, 20517, 75918, 40414, 115830, 81388, 35444, + 208793, 18557, 12624, 88971, 123355, 7306, 709, 13579, 56470, 36205, 56962, 82710, 4862, 23826, 108392, 175539, + 75600, 107919, 62674, 112679, 30119, 36318, 35323, 3583, 58218, 42961, 39675, 9447, 70828, 40967, 25394, 52990, + 24075, 6264, 32106, 43005, 26974, 2988, 36004, 49055, 7802, 84004, 24170, 31281, 192353, 47725, 12122, 11485, + 4851, 11565, 12034, 19433, 5475, 41012, 20120, 169743, 78720, 54606, 9059, 80180, 66596, 18422, 7869, 44705, + 3409, 26996, 13290, 28943, 28573, 92913, 21345, 102017, 64396, 222153, 58203, 28422, 30381, 11224, 105757, 65847, + 30690, 156282, 70213, 52668, 59859, 46136, 55861, 28532, 88256, 167753, 157143, 34619, 5406, 46785, 110077, 43667, + 99945, 9622, 81243, 40902, 103064, 15805, 16903, 64832, 34463, 71025, 48166, 42913, 37727, 31054, 199439, 94651, + 24238, 84843, 164414, 59355, 86643, 74925, 68161, 10544, 60088, 90967, 94387, 626, 4057, 59963, 60891, 3100, + 23741, 51614, 57271, 43352, 5967, 76881, 11240, 21366, 162904, 80651, 76535, 13325, 38240, 4001, 24457, 67064, + 28356, 59452, 277744, 118863, 93858, 59807, 37070, 46501, 25510, 47161, 7751, 47987, 122879, 281048, 12303, 65298, + 6897, 37593, 95760, 68584, 3278, 26040, 46338, 79738, 7057, 17560, 16525, 65646, 11971, 76085, 47810, 37627, + 4400, 76728, 5019, 19862, 35461, 120836, 69334, 34264, 211413, 192855, 77060, 103398, 21395, 73312, 37471, 37865, + 59615, 99094, 60234, 101902, 39471, 232524, 96009, 116049, 87233, 34141, 2404, 47909, 55795, 180204, 127761, 116132, + 128426, 111843, 49823, 18679, 3051, 65130, 52071, 17872, 22470, 42507, 67834, 142462, 41950, 81020, 33544, 172729, + 14705, 32808, 43131, 39318, 3974, 48407, 145523, 8491, 180108, 104130, 40283, 86994, 80476, 22927, 7120, 102446, + 78442, 34442, 85410, 33747, 204996, 201697, 144383, 153362, 91987, 218233, 24213, 28647, 15634, 10985, 37691, 14620, + 67610, 11910, 119218, 53184, 139015, 23331, 10937, 4794, 142373, 40819, 40365, 31288, 35611, 63904, 95756, 235215, + 51134, 76276, 11957, 30265, 10387, 46958, 85389, 177045, 4353, 39988, 117622, 35975, 153456, 14265, 42420, 3485, + 57749, 23217, 14856, 159478, 181039, 125928, 75417, 128987, 14183, 7005, 18444, 37640, 68447, 15249, 32753, 189408, + 151532, 34992, 102201, 115188, 21107, 39324, 56039, 38368, 36452, 94812, 113679, 87546, 20551, 11761, 60872, 2888, + 3186, 69267, 53191, 52996, 46247, 44225, 33896, 98339, 46383, 32566, 16275, 88963, 129666, 176, 31427, 38271, + 88736, 11275, 58147, 11767, 140662, 14422, 96597, 45066, 146243, 18516, 134527, 23160, 2066, 107751, 37546, 10292, + 8360, 95441, 58117, 98151, 7978, 116856, 906, 79063, 64818, 14019, 109894, 9029, 106963, 34922, 1651, 42838, + 17896, 207329, 33682, 4066, 47801, 29346, 56026, 102181, 10723, 23963, 12845, 30084, 114821, 24728, 27351, 96712, + 223295, 24805, 8840, 155257, 74192, 14731, 60882, 14446, 3293, 36369, 62442, 16908, 3393, 24144, 93518, 169099, + 16987, 21052, 64385, 3253, 74064, 34395, 37448, 95011, 11277, 73486, 191440, 28196, 91622, 56473, 327982, 27442, + 4270, 7603, 34045, 68072, 29828, 36418, 35213, 46824, 27951, 92979, 6344, 294949, 387869, 35280, 6512, 67496, + 103235, 38638, 120453, 15900, 1374, 3046, 370, 32475, 61988, 25666, 311175, 38789, 120083, 14581, 62496, 17476, + 477, 95410, 36028, 79370, 145892, 21060, 64274, 8593, 128378, 29734, 47799, 41023, 11779, 189177, 291116, 68655, + 211263, 3096, 34673, 28182, 61354, 70851, 185841, 5926, 18221, 58964, 5624, 148475, 17869, 70577, 270, 72725, + 46530, 32794, 6345, 53885, 83061, 319994, 77613, 55607, 108538, 2964, 83165, 113358, 157981, 31861, 142777, 32134, + 90608, 77733, 12681, 67299, 67199, 12519, 151204, 80716, 95080, 143, 88673, 73243, 49064, 70975, 92554, 194270, + 195814, 37976, 26210, 15538, 12302, 590, 123979, 34036, 66307, 32917, 51270, 50898, 10348, 192749, 78998, 142567, + 231346, 74689, 141501, 68754, 160732, 81034, 15915, 13236, 112859, 19495, 33767, 7063, 63633, 6869, 25702, 6584, + 146558, 146882, 202224, 116085, 21271, 11451, 46600, 73942, 31037, 225039, 32805, 68328, 198802, 8912, 109275, 23107, + 47622, 21867, 446, 51547, 12862, 75989, 33036, 7283, 95710, 83109, 38497, 1301, 3910, 40681, 44384, 46771, + 77850, 49203, 95089, 139563, 73961, 6370, 84201, 1459, 85585, 161489, 169649, 79787, 34752, 252385, 3378, 55032, + 61000, 3120, 4672, 121269, 4082, 53803, 18080, 73246, 24595, 42389, 13731, 72203, 103679, 101774, 38710, 34206, + 71107, 64573, 16007, 64268, 8208, 29590, 28108, 9501, 79568, 11666, 168288, 83632, 150019, 35196, 5403, 5176, + 16615, 93441, 6104, 17287, 24961, 112281, 14480, 245496, 139857, 9685, 75837, 34655, 32664, 20702, 201412, 31683, + 197366, 202353, 26591, 31589, 4559, 27910, 10695, 29436, 54735, 144360, 113352, 52336, 32696, 146974, 38124, 34893, + 4126, 3855, 115855, 6057, 162019, 10972, 91491, 48259, 75698, 34626, 7354, 16587, 13916, 6749, 69092, 102728, + 70108, 121378, 4399, 25253, 159638, 105250, 11905, 98411, 19834, 40029, 8256, 251243, 9349, 57694, 5558, 143250, + 24675, 51457, 65260, 24058, 175560, 27907, 52475, 49070, 49643, 4320, 66836, 90101, 18206, 8928, 62901, 87851, + 52459, 14454, 36601, 64627, 27992, 417, 30565, 19686, 10809, 231322, 36540, 40887, 88865, 49589, 161033, 58107, + 401975, 102386, 190339, 87577, 133172, 54023, 98944, 20896, 29000, 84825, 33172, 11781, 34558, 116897, 58352, 53344, + 30915, 21521, 89, 241320, 3658, 45946, 115393, 212186, 25834, 50333, 3228, 37254, 42430, 1886, 49132, 81794, + 36562, 28818, 44722, 31564, 125265, 149984, 17397, 128685, 18182, 3056, 21825, 25140, 10155, 16556, 115060, 11990, + 40149, 184644, 50253, 367370, 97082, 13560, 14932, 13300, 208980, 36379, 448, 106133, 19575, 21267, 68757, 93452, + 60853, 5169, 34427, 16161, 90529, 142432, 73614, 192405, 66545, 49751, 90066, 65857, 9600, 4098, 80224, 55076, + 789, 3798, 125169, 73968, 46420, 59540, 51655, 28547, 2317, 95529, 72038, 360298, 88593, 260668, 151609, 120315, + 55595, 35600, 10778, 130706, 98980, 30010, 17825, 47077, 115302, 28222, 240315, 7342, 5742, 15051, 17234, 17561, + 169155, 93735, 18784, 51808, 1073, 87638, 22486, 96835, 177901, 34933, 43751, 134471, 3472, 60107, 8319, 10366, + 11189, 23719, 97590, 116678, 63711, 15095, 47410, 210265, 78643, 127925, 2850, 146459, 65472, 435141, 43043, 51101, + 50459, 17859, 2597, 1029, 127979, 110999, 140586, 131287, 93, 204861, 49176, 53532, 42202, 49335, 38681, 33970, + 67053, 39163, 64804, 7867, 21720, 57269, 35446, 82892, 157650, 75789, 40884, 15694, 11145, 39407, 108823, 27310, + 141378, 15460, 227, 20019, 42033, 6822, 24630, 75665, 22824, 20941, 64884, 46365, 85705, 3773, 32852, 75547, + 79114, 70646, 91197, 26198, 35584, 16783, 11785, 51303, 15974, 155610, 115243, 9935, 1510, 56327, 108654, 387235, + 54172, 2660, 22731, 69831, 28562, 32500, 45536, 94042, 12451, 40584, 20876, 3006, 104226, 49255, 2735, 68192, + 19190, 61831, 13307, 161983, 97151, 164345, 4367, 129297, 73024, 45668, 60180, 126542, 20948, 27490, 11817, 6231, + 5151, 13473, 7833, 109917, 81741, 27736, 71450, 26743, 21499, 244239, 47508, 47275, 29006, 25257, 7357, 18329, + 84183, 112580, 118605, 3793, 198074, 159004, 32283, 17771, 54003, 17983, 52972, 70724, 45120, 24182, 47404, 99628, + 170125, 8794, 46876, 41664, 26240, 122522, 32698, 169875, 35177, 43594, 40030, 96952, 28389, 22133, 1219, 1627, + 64863, 236968, 142742, 94455, 79974, 94146, 13684, 19300, 60778, 15176, 21046, 112121, 176915, 177946, 121876, 170733, + 232183, 45571, 52925, 2209, 45342, 34927, 19843, 1825, 2038, 21606, 30609, 39291, 80253, 114805, 22110, 67637, + 41564, 11345, 145262, 126398, 40703, 91578, 60920, 14383, 32689, 114171, 33321, 22821, 5430, 118359, 18515, 41276, + 100689, 31373, 44534, 57652, 5366, 38077, 63639, 246112, 23007, 50484, 65271, 114639, 134279, 23472, 42037, 36268, + 14266, 3805, 20110, 106077, 26712, 83068, 3745, 48789, 73993, 11755, 10138, 6567, 24934, 1686, 24404, 64978, + 64242, 102615, 49674, 64915, 52113, 102967, 47100, 123729, 102887, 34493, 5532, 124741, 61801, 68500, 69254, 101322, + 46415, 189970, 103910, 112087, 201049, 82512, 10888, 99736, 54251, 19508, 119320, 20798, 62133, 100781, 96032, 438249, + 122757, 112255, 29169, 17982, 164883, 72196, 54239, 5731, 30815, 71455, 40605, 9088, 139966, 231725, 23187, 27251, + 2319, 15295, 22292, 9157, 30842, 13822, 93710, 35112, 766, 17075, 23218, 37794, 13362, 14364, 86554, 81170, + 10287, 35041, 29695, 20186, 134518, 87266, 22035, 118809, 86111, 54826, 39227, 34957, 81665, 785, 130193, 5422, + 82440, 165380, 20305, 170800, 28333, 3085, 25031, 63282, 43019, 46076, 6077, 19050, 18963, 12418, 78312, 37302, + 8804, 89631, 234705, 12675, 161944, 14463, 408482, 32815, 23439, 151647, 193595, 35885, 102144, 76974, 201816, 48443, + 101145, 61088, 67380, 55214, 80029, 65112, 236169, 11787, 39052, 61274, 38785, 128619, 248708, 117197, 271163, 7859, + 30350, 146035, 2253, 131342, 117340, 101888, 58102, 64465, 11878, 89316, 34052, 26133, 128467, 54644, 138482, 21706, + 974, 21544, 138848, 29501, 138625, 13861, 594984, 69791, 63961, 7884, 49187, 75381, 5237, 3488, 22429, 62564, + 74778, 10035, 87433, 84117, 61834, 89252, 99881, 82523, 67040, 20877, 122047, 11631, 43814, 146638, 2274, 23298, + 13690, 16936, 14388, 11469, 65947, 133203, 65149, 34934, 33809, 57431, 38649, 14721, 232476, 110369, 46966, 79425, + 115258, 73854, 38503, 168461, 181745, 103268, 1590, 73761, 28824, 23871, 104328, 10944, 94013, 11048, 79005, 77074, + 9510, 89479, 89361, 8471, 10246, 73385, 17529, 51173, 93722, 40211, 452, 112593, 81976, 33181, 50233, 31287, + 7217, 11605, 5815, 49818, 114383, 88600, 20714, 201753, 3493, 31374, 99143, 103003, 16014, 10855, 96263, 3215, + 69045, 19108, 17235, 118289, 72692, 54171, 110308, 6337, 12145, 34602, 29627, 82100, 80981, 1942, 94550, 34461, + 9507, 33064, 52111, 30229, 6692, 117458, 68421, 89525, 1620, 80775, 26613, 26281, 60820, 8784, 48514, 22303, + 330444, 101568, 44180, 117514, 35474, 54648, 52855, 24987, 33962, 73393, 65642, 67569, 4797, 67532, 33609, 31625, + 7053, 54521, 80888, 3839, 295463, 120005, 12078, 28541, 46445, 187065, 33145, 165188, 27026, 38584, 77641, 63748, + 12491, 8511, 283918, 4716, 77988, 19152, 55724, 61518, 49524, 87047, 241122, 13621, 35675, 5771, 49017, 17443, + 90947, 23528, 8917, 92592, 29114, 23367, 38345, 16221, 166705, 19640, 54603, 106502, 101385, 34100, 50779, 50417, + 133782, 46043, 51856, 22308, 32704, 72281, 6041, 33229, 6186, 116558, 164648, 67105, 84595, 74801, 41853, 10043, + 176031, 39148, 41713, 36877, 185623, 77660, 93112, 18322, 45966, 122806, 108388, 45184, 151302, 14234, 62763, 55556, + 63069, 244438, 9876, 276168, 1699, 17487, 9500, 35260, 107491, 149204, 1781, 17579, 947, 100336, 84387, 8106, + 15458, 23215, 396223, 29684, 74452, 123641, 92338, 31473, 196212, 62391, 1527, 3879, 6046, 20893, 200851, 3778, + 10498, 7764, 38750, 23256, 12163, 76975, 57473, 59530, 10239, 23275, 35839, 158531, 35191, 44209, 40678, 3596, + 243951, 73764, 16266, 15283, 9277, 164, 9335, 59492, 9090, 7474, 66893, 9054, 29539, 12905, 6948, 246436, + 54460, 50334, 52706, 46765, 22820, 125379, 163547, 191059, 26514, 8972, 72494, 117307, 112549, 40233, 12782, 33432, + 60372, 35994, 32356, 367449, 51753, 39462, 62458, 34885, 48756, 795, 117374, 148644, 21812, 14674, 1704, 10995, + 70861, 98992, 38556, 29671, 260326, 35167, 3569, 19709, 41, 10312, 47172, 133362, 44222, 19304, 3828, 39543, + 10441, 32694, 2925, 24827, 16961, 14477, 106226, 11249, 48148, 641, 182648, 47579, 138771, 11517, 1834, 5495, + 216194, 89574, 56707, 54758, 5914, 36178, 72805, 5272, 52153, 156729, 2240, 13954, 53400, 68748, 63310, 189742, + 212036, 48502, 106813, 55645, 56519, 64696, 48413, 131579, 26245, 26436, 62198, 36864, 10117, 183822, 41455, 133034, + 45888, 134173, 151941, 23987, 15294, 9465, 151116, 213962, 138019, 23156, 20652, 128889, 90913, 19364, 219476, 43092, + 26368, 135383, 5308, 63585, 43842, 38292, 83051, 52878, 42111, 91099, 61824, 86809, 23348, 16103, 1661, 1463, + 6082, 2926, 10571, 10834, 194845, 212415, 28967, 55622, 161089, 61297, 359805, 83283, 156206, 39861, 2052, 124838, + 51546, 15485, 2858, 116911, 11647, 165218, 246102, 28861, 30759, 90050, 115426, 107685, 302912, 221668, 43284, 28078, + 262094, 15913, 108493, 34355, 8635, 56521, 33938, 107963, 78876, 130940, 120485, 7453, 26535, 294502, 79190, 217099, + 35283, 16638, 827, 171026, 54183, 254986, 24436, 15975, 31183, 71531, 8757, 197433, 85484, 10671, 7791, 98566, + 147873, 81926, 428971, 27061, 12989, 34063, 127862, 4570, 5197, 115338, 86529, 11135, 6356, 99904, 45008, 275758, + 72894, 23300, 64365, 137852, 65347, 47170, 28144, 96791, 20150, 159397, 90399, 1362, 27370, 98131, 31669, 28527, + 73787, 117782, 21103, 19726, 1493, 47288, 66521, 29621, 45918, 29807, 100648, 6707, 12366, 36485, 45413, 16410, + 190217, 44950, 2222, 75028, 20724, 41617, 33388, 41062, 13858, 83447, 87017, 3916, 10981, 5912, 19979, 5885, + 67449, 21903, 300914, 5663, 81213, 18877, 80072, 19338, 7553, 143149, 63928, 73907, 14115, 297533, 1424, 109439, + 72242, 21063, 30733, 14100, 271517, 34775, 24860, 63700, 11842, 20737, 50067, 22773, 48310, 11235, 43957, 13183, + 88743, 32209, 23321, 97092, 143726, 17704, 33289, 144394, 22177, 68374, 111967, 982, 3290, 170312, 8945, 39607, + 243231, 56247, 9319, 54873, 58452, 181674, 44954, 10669, 62937, 133402, 31264, 14854, 12592, 70761, 4902, 2472, + 1654, 39097, 17353, 115270, 40066, 92333, 7620, 158039, 33477, 80720, 2135, 38418, 13751, 164773, 39322, 114253, + 9921, 17904, 79739, 5389, 128442, 4659, 46992, 27136, 15868, 109620, 82964, 2344, 7106, 102119, 3920, 13567, + 75236, 23651, 9863, 2844, 83773, 92000, 12422, 95077, 10775, 142605, 120027, 9048, 36938, 81154, 56545, 4754, + 10482, 28907, 16084, 21873, 170465, 19577, 24905, 58130, 17368, 70780, 14531, 55034, 17469, 23293, 260571, 52064, + 161508, 63795, 195965, 205503, 32752, 145584, 138232, 298398, 98340, 15352, 275003, 75638, 800, 306996, 12394, 36516, + 19824, 101316, 192935, 84781, 264456, 14840, 73856, 120271, 3901, 50990, 48385, 372041, 87319, 2227, 95681, 56225, + 2867, 24632, 65645, 172829, 26782, 83816, 46844, 37867, 9630, 11329, 76756, 138080, 113102, 2296, 84207, 55323, + 185815, 5210, 2986, 109112, 24197, 70199, 64611, 7066, 44584, 96679, 3292, 11955, 86366, 7796, 105090, 121463, + 40224, 45210, 50967, 43523, 3636, 166133, 21094, 57178, 140916, 4748, 13764, 3777, 31731, 104179, 416810, 188439, + 267731, 86931, 13966, 5018, 9567, 283051, 3232, 53171, 53678, 6228, 32833, 99759, 72984, 37061, 70162, 70738, + 29389, 1868, 122000, 13671, 27963, 37380, 69277, 37341, 17106, 45005, 12564, 183, 50282, 3809, 57468, 23868, + 284911, 75942, 28036, 2447, 60170, 8119, 51479, 25171, 8322, 48880, 42721, 33419, 12608, 11361, 18423, 24958, + 23374, 141776, 2304, 149128, 89652, 68789, 36035, 45613, 5268, 442, 159096, 63783, 39044, 4432, 34374, 26718, + 229766, 59526, 23706, 15072, 8869, 12775, 73562, 76513, 151350, 46040, 29341, 58942, 31436, 8742, 195663, 155875, + 177342, 9409, 11232, 106379, 12269, 40493, 135684, 74614, 183212, 41598, 44475, 43186, 120418, 263054, 22473, 36531, + 116270, 43205, 64216, 41880, 2843, 3299, 84033, 48575, 78888, 37197, 53158, 44555, 69192, 29213, 80495, 156103, + 7865, 86885, 75088, 21642, 184099, 132386, 508, 948, 1086, 69319, 44488, 3613, 129897, 33311, 51024, 2449, + 18383, 28970, 50210, 70726, 70508, 87097, 118602, 146382, 20242, 182430, 197233, 39937, 18508, 14350, 24901, 165385, + 229924, 33832, 136628, 151243, 124569, 88492, 127154, 62624, 35749, 21909, 91751, 8060, 50589, 954, 105732, 25763, + 55626, 146107, 1247, 64529, 9884, 13632, 16172, 1335, 83115, 126164, 36224, 170872, 40971, 2035, 7394, 46551, + 30671, 53142, 107311, 75486, 18135, 69492, 12903, 96202, 14452, 18152, 49690, 77016, 38861, 2226, 385130, 21131, + 17844, 57784, 84, 36610, 201826, 14842, 131564, 77270, 5549, 102741, 193961, 183742, 26413, 2474, 194304, 464074, + 17189, 226650, 166968, 718, 13561, 32539, 7182, 35594, 38539, 16307, 188634, 18219, 10679, 72826, 161909, 63158, + 143331, 23378, 14208, 25491, 3314, 73171, 63331, 5500, 36785, 38973, 614, 1580, 171194, 97209, 137201, 91854, + 49685, 50187, 10733, 91809, 187713, 62737, 172516, 6666, 67506, 4776, 58141, 30188, 4618, 392, 4013, 235640, + 104039, 39136, 56441, 250251, 17060, 48306, 57483, 66360, 195080, 59703, 90208, 74747, 50648, 37806, 27833, 30715, + 33159, 66182, 9356, 34911, 19238, 70986, 114677, 17409, 67559, 80594, 47694, 8643, 134840, 79148, 5452, 14070, + 28599, 144163, 50434, 27535, 157523, 13810, 155, 61478, 17130, 128237, 70556, 20046, 38064, 108184, 48322, 65305, + 117398, 36136, 43677, 27966, 94355, 126236, 109335, 98906, 31918, 54262, 1145, 25681, 13575, 60774, 45912, 122417, + 34538, 24034, 131365, 51834, 40326, 21250, 21866, 17508, 13997, 117733, 16819, 89310, 3494, 11178, 60427, 34853, + 348283, 6846, 88202, 17221, 62481, 8359, 10088, 62152, 26862, 12931, 13414, 151495, 3603, 33643, 178131, 46655, + 104426, 109103, 4105, 146806, 244363, 64272, 15471, 93101, 153709, 21364, 73754, 69185, 112487, 8238, 52934, 80498, + 304612, 124178, 39721, 198761, 199674, 132729, 36479, 5833, 41239, 3609, 28387, 262648, 35545, 67430, 89355, 18851, + 54869, 54495, 5118, 27316, 293005, 36308, 43061, 110038, 28223, 7186, 33116, 18945, 26277, 73156, 23311, 41090, + 26899, 300052, 100683, 8894, 4533, 2915, 66118, 31965, 18518, 75755, 209592, 62868, 15492, 99745, 54220, 111614, + 38587, 26556, 107426, 95125, 80488, 64232, 13265, 23009, 70485, 25431, 69000, 3572, 160395, 98028, 42484, 42093, + 263, 6747, 244525, 132822, 60162, 171913, 124805, 29698, 11382, 8416, 48535, 37248, 222152, 16593, 47020, 8816, + 4696, 24033, 36100, 38399, 250751, 10483, 19137, 143486, 51921, 74917, 130735, 79897, 182609, 69122, 71667, 39358, + 10707, 133977, 56093, 112728, 48463, 33430, 9299, 48097, 46243, 217556, 63573, 36225, 1227, 221850, 32274, 14107, + 49111, 29646, 36094, 63601, 111564, 49362, 24638, 17610, 46502, 41875, 40909, 4262, 33342, 32777, 32103, 28673, + 56846, 6159, 154445, 47336, 68541, 153151, 3385, 58607, 63559, 108092, 46159, 13477, 858, 2316, 24161, 128413, + 139927, 11071, 88393, 110949, 16654, 34235, 73235, 37210, 229375, 76831, 194659, 71471, 76759, 4725, 28276, 29321, + 26478, 117042, 46296, 101434, 33205, 67157, 6462, 3168, 95828, 54864, 23765, 55657, 23399, 39175, 47265, 10732, + 92945, 59744, 14719, 35115, 2637, 23170, 162802, 33876, 35630, 38052, 9197, 83972, 28470, 19030, 22779, 259343, + 143992, 229198, 23683, 129194, 50214, 30041, 34367, 134697, 14174, 184114, 144727, 23833, 21456, 25715, 53623, 10461, + 166191, 57432, 28691, 157504, 65665, 67457, 16937, 12529, 1711, 168178, 4710, 9397, 21594, 15069, 102984, 295316, + 78019, 30013, 49809, 108210, 97599, 11539, 20109, 22376, 111701, 47783, 890, 213373, 36, 229081, 150468, 50773, + 147151, 857, 99218, 78079, 110246, 83001, 17917, 78802, 189022, 112785, 40738, 2303, 43021, 10227, 101710, 85107, + 30397, 3429, 72517, 49710, 40757, 160978, 8573, 100346, 131935, 103276, 38380, 105759, 42065, 28883, 402, 95838, + 73335, 67055, 20011, 29906, 48039, 54843, 18117, 35768, 26596, 8518, 143614, 74994, 28984, 30865, 51327, 59362, + 15102, 44276, 89118, 714, 2361, 214493, 160772, 63295, 7421, 71563, 196497, 21076, 202167, 92741, 103226, 106818, + 69744, 89977, 100205, 98932, 43766, 76931, 6843, 16584, 52826, 291470, 15946, 28322, 3642, 204, 258863, 106515, + 83304, 32946, 12706, 41427, 33873, 27151, 56397, 63503, 75140, 34306, 73149, 29926, 63169, 91359, 77492, 74847, + 192389, 42353, 148, 138944, 36551, 31192, 181456, 63005, 92748, 24635, 51573, 52932, 13039, 4741, 79294, 69836, + 123959, 107948, 11123, 170654, 233220, 54352, 20575, 40997, 21738, 120351, 106134, 155424, 84447, 124076, 35541, 706, + 77230, 51748, 8442, 40109, 20228, 6042, 20963, 107207, 187852, 127504, 15461, 33093, 19095, 5987, 21488, 118624, + 25799, 182308, 23326, 20103, 92136, 54838, 10713, 126470, 108774, 19203, 61066, 63342, 29237, 9113, 45526, 43035, + 53947, 312166, 11117, 25827, 2299, 27218, 55737, 42929, 118106, 61182, 16888, 118952, 2687, 159550, 3496, 59443, + 8830, 51688, 78340, 103766, 42331, 73399, 58020, 1666, 202924, 2854, 12678, 5891, 33667, 17, 36329, 9733, + 2023, 40389, 124099, 86266, 73763, 17618, 9348, 9584, 30704, 89527, 11379, 88708, 19363, 71305, 80907, 163488, + 19779, 4406, 18155, 125871, 16737, 4313, 24007, 71010, 35629, 59187, 181773, 171755, 48081, 282188, 44159, 41703, + 24068, 63512, 20026, 20079, 101013, 4611, 19597, 169832, 162338, 158889, 21816, 59081, 291912, 51657, 16911, 142760, + 13749, 22555, 50557, 197345, 24745, 28474, 5713, 171571, 328289, 6161, 7725, 36994, 167679, 62776, 62795, 52274, + 70086, 34392, 130952, 4930, 48299, 107549, 36520, 19777, 23306, 51480, 107581, 7841, 16732, 37422, 72074, 99464, + 17801, 125916, 10597, 40408, 43322, 87432, 24862, 37097, 54589, 32282, 167348, 17898, 3586, 19826, 146462, 1988, + 63653, 28763, 132195, 94493, 43624, 52107, 37692, 42666, 53474, 25957, 8805, 222451, 925, 3055, 104713, 68687, + 109177, 128137, 51766, 12112, 251316, 83233, 11015, 24963, 43805, 189880, 86830, 77075, 13082, 33163, 73548, 44568, + 19511, 108517, 64550, 26047, 86565, 54739, 5095, 68246, 142182, 40984, 140026, 1750, 92461, 2773, 725, 19988, + 17117, 106417, 21066, 147935, 101033, 174174, 12814, 99891, 103319, 85050, 87912, 55182, 64589, 75793, 37143, 22056, + 4022, 81127, 18968, 52899, 4351, 50009, 17573, 31885, 235897, 86925, 127636, 149633, 5352, 170945, 13870, 65479, + 82705, 21354, 52980, 4573, 107142, 167201, 689, 25288, 46391, 60641, 14497, 192600, 156084, 61637, 16433, 20054, + 5860, 781, 55170, 27826, 61365, 81069, 119317, 27123, 10558, 95773, 129802, 55875, 13045, 36330, 234176, 18058, + 7717, 148000, 254, 11979, 18357, 24975, 8185, 10718, 33922, 75655, 63067, 62116, 12590, 26192, 24136, 12861, + 33065, 175633, 5001, 83234, 6928, 37537, 39570, 797, 46993, 106751, 3390, 24316, 783, 56130, 98315, 10024, + 46937, 84478, 81419, 119922, 67846, 100404, 45582, 91490, 8952, 68431, 40501, 170957, 26295, 27641, 117433, 78412, + 6107, 100280, 74320, 156460, 119656, 6001, 172558, 13585, 18799, 5324, 39108, 12471, 17458, 37351, 39676, 88115, + 50747, 61250, 125687, 5267, 4229, 23899, 71256, 32369, 179559, 203977, 81060, 24631, 112727, 201216, 111416, 67474, + 118080, 91, 35301, 125372, 20683, 74602, 93673, 189634, 41464, 103487, 832, 23478, 125468, 73805, 119649, 29439, + 51560, 85326, 160218, 36794, 49749, 182459, 10783, 266813, 44231, 9294, 15428, 71516, 7359, 56127, 165213, 174634, + 35339, 38024, 193163, 64310, 62988, 135393, 50470, 543, 136487, 82389, 59047, 12586, 67015, 11644, 14120, 11086, + 5208, 84496, 75693, 97070, 29150, 13909, 133818, 28826, 24956, 25564, 136586, 132437, 105186, 4035, 36426, 22688, + 48858, 283040, 60007, 145362, 6143, 70935, 12860, 88632, 18097, 251246, 43147, 88692, 6972, 46173, 87229, 69837, + 16404, 61816, 34813, 41161, 73489, 67221, 80668, 80317, 58742, 40484, 31106, 50580, 97196, 213949, 58374, 30823, + 36357, 82430, 16088, 18269, 164616, 424, 215366, 24797, 5834, 261900, 3248, 32765, 32267, 19570, 240015, 43546, + 13746, 95242, 296368, 118754, 147733, 19239, 38636, 84832, 15113, 3902, 193061, 119038, 132091, 99171, 11021, 33759, + 34127, 5030, 3999, 6802, 106298, 88781, 43305, 22438, 39729, 5081, 25109, 217719, 106426, 21384, 30890, 42599, + 22294, 25962, 25245, 30497, 9780, 59534, 29560, 5927, 15602, 52507, 34738, 56980, 36213, 7273, 98748, 10870, + 71502, 69490, 359492, 120808, 42808, 83335, 17061, 250432, 66802, 102331, 12384, 25660, 599, 64618, 47661, 61634, + 60755, 54355, 107300, 30007, 5851, 79666, 18845, 114230, 39120, 65244, 33679, 16370, 67363, 870, 69190, 700, + 108623, 7199, 25596, 17155, 126368, 82661, 16291, 21557, 72770, 55038, 123010, 8616, 91263, 105277, 4143, 14045, + 32486, 45280, 15555, 228392, 30596, 287, 109234, 90030, 151717, 60950, 12314, 165069, 7951, 13304, 63878, 123573, + 52002, 27428, 9709, 27443, 43103, 89429, 81209, 45635, 11768, 65127, 12093, 62760, 68942, 4189, 59598, 70512, + 40901, 34082, 60981, 18280, 39344, 77285, 4183, 14054, 24037, 4113, 474190, 227569, 127500, 33437, 1737, 48639, + 116890, 82909, 62991, 47953, 48403, 63792, 44541, 194078, 16926, 13540, 54497, 288031, 86750, 30818, 25377, 51309, + 17745, 9988, 98587, 47808, 48648, 15607, 51298, 11671, 159545, 43990, 97815, 27393, 34460, 59095, 50213, 40288, + 58419, 77017, 108711, 106974, 5634, 27302, 23195, 55246, 114317, 20787, 2176, 34170, 67865, 144530, 7099, 31816, + 31462, 136214, 45619, 105094, 23352, 133565, 14615, 72015, 24010, 107270, 1347, 151444, 98185, 20971, 63491, 86427, + 125708, 141576, 126803, 83272, 69686, 130286, 27346, 20481, 68337, 143641, 11161, 32803, 13610, 94361, 17084, 170992, + 26271, 23139, 77797, 5076, 70691, 1893, 34857, 35003, 2980, 189255, 41107, 6458, 4768, 19194, 182508, 92031, + 27225, 316351, 48574, 15987, 102402, 14334, 46503, 138005, 75453, 29119, 31677, 97327, 28106, 72279, 7983, 6958, + 8104, 6430, 66373, 51695, 6931, 86975, 52340, 108203, 1176, 75897, 106677, 21721, 6274, 7082, 13018, 139466, + 6475, 73402, 49676, 46350, 112635, 1121, 31018, 39288, 22498, 157722, 11740, 4237, 6176, 145143, 8139, 77918, + 238686, 12912, 106531, 13530, 26832, 8235, 79497, 23768, 28893, 17853, 435790, 33701, 90319, 24688, 121374, 13130, + 14441, 44878, 2951, 60805, 15682, 368968, 59313, 5764, 15087, 226921, 19420, 46906, 39517, 28151, 141168, 18755, + 45270, 39523, 7813, 6413, 109134, 51672, 66939, 115004, 104440, 22565, 91467, 22538, 45965, 62165, 14231, 46295, + 47645, 71747, 30576, 43847, 81772, 99248, 106345, 17063, 7876, 115604, 42345, 60383, 52683, 130873, 7327, 83603, + 87720, 43843, 117, 49018, 12898, 24681, 9075, 145197, 4505, 69956, 86690, 8844, 185665, 73412, 8843, 2050, + 2769, 61489, 84788, 765, 113401, 48729, 111522, 169599, 15664, 24750, 131177, 29882, 57592, 149057, 45527, 23022, + 105229, 1754, 10064, 14643, 137381, 30538, 7740, 43774, 97059, 30153, 99882, 62539, 119268, 49329, 86015, 5395, + 6876, 63855, 14385, 103785, 43309, 58548, 24380, 31077, 33886, 37135, 15365, 50313, 128363, 48659, 56802, 25981, + 35476, 24927, 75162, 26509, 144249, 18559, 42480, 122475, 67013, 7834, 35825, 193197, 143587, 15263, 5498, 114311, + 83367, 56008, 102437, 38184, 25703, 112278, 26497, 8942, 91436, 71042, 111589, 153704, 59347, 5258, 57333, 17950, + 53236, 28574, 30025, 337749, 2289, 1610, 194412, 103935, 16519, 41580, 29330, 37510, 19844, 35382, 26043, 158342, + 46309, 11301, 106938, 96214, 58558, 67686, 163604, 7639, 99834, 7952, 12195, 58864, 23313, 15895, 25042, 10540, + 218816, 17583, 10486, 66978, 231303, 9956, 202649, 2197, 36388, 7432, 55426, 2224, 51333, 16871, 119452, 179548, + 183535, 153812, 138057, 9695, 109792, 28563, 126806, 52451, 139277, 13092, 48380, 9516, 54306, 67549, 14696, 110080, + 90139, 139123, 3236, 10377, 18235, 70961, 50653, 11129, 8275, 44976, 10027, 18291, 32710, 28012, 318969, 288958, + 37677, 67929, 68821, 98990, 82464, 10181, 44498, 70197, 86025, 8013, 28549, 83043, 92204, 6171, 76895, 4556, + 88842, 98320, 60377, 71627, 117723, 32809, 5961, 12916, 37570, 27325, 108946, 2654, 128723, 67148, 211765, 80004, + 234242, 154947, 40300, 196564, 76350, 3941, 18870, 24018, 73795, 84945, 197515, 8338, 34896, 58219, 146191, 58500, + 148247, 85921, 101683, 117085, 58424, 66764, 17133, 63784, 11105, 84682, 86491, 32061, 11744, 40778, 32197, 195238, + 45746, 2576, 39893, 67918, 63372, 3938, 36907, 4069, 17118, 24568, 96382, 59551, 49772, 244074, 72416, 53854, + 199520, 69813, 60862, 51724, 81902, 122032, 234459, 29839, 38004, 36647, 114474, 13978, 22911, 158690, 15088, 63654, + 33752, 29885, 83141, 89560, 3125, 12877, 21039, 57564, 1995, 38775, 1392, 70857, 53792, 84731, 5519, 86340, + 4689, 95886, 119040, 180041, 26909, 84982, 2582, 213702, 108150, 123127, 35785, 15401, 146062, 49004, 67250, 232281, + 69674, 142589, 97906, 160196, 41811, 23716, 17807, 35367, 161444, 15020, 30993, 67673, 84855, 49859, 39473, 23557, + 8999, 127662, 19183, 120775, 28561, 79489, 57003, 62500, 16731, 29948, 30509, 54712, 93937, 166789, 11042, 61414, + 3189, 128681, 363904, 9363, 21967, 135864, 94929, 23174, 24890, 236852, 51310, 35602, 22943, 58275, 6351, 33135, + 1356, 27798, 41855, 110968, 145300, 139274, 56821, 8658, 51569, 19894, 24832, 4253, 28802, 5732, 52333, 338701, + 517, 144012, 123400, 70750, 118679, 112674, 109716, 66301, 31703, 84657, 45777, 1745, 40607, 17239, 226055, 50256, + 48098, 24528, 28411, 109729, 108854, 16675, 111456, 14807, 25003, 27471, 42491, 22378, 10233, 14158, 70447, 13850, + 73969, 15024, 24742, 25518, 177495, 27226, 176504, 38550, 5248, 41612, 65904, 91342, 24516, 41883, 18419, 84650, + 215347, 15434, 75579, 9614, 146192, 82954, 25501, 30483, 48712, 34315, 70905, 29488, 60626, 66089, 51329, 5601, + 69188, 18936, 21518, 23440, 40735, 224481, 33618, 40631, 5866, 927, 128437, 30586, 586, 52791, 76586, 141284, + 101541, 81564, 12333, 65243, 6509, 6267, 176039, 133405, 47590, 16079, 254143, 27357, 52129, 34758, 9267, 15970, + 5969, 57732, 7254, 86956, 222045, 17428, 16267, 26799, 110933, 58017, 142888, 143524, 25733, 55763, 16175, 9560, + 24223, 247240, 55864, 48197, 65339, 12856, 21320, 46799, 62812, 40007, 188763, 14523, 2414, 31539, 49494, 58075, + 155418, 90186, 99708, 24554, 35819, 75001, 757, 39520, 16022, 59445, 3713, 46416, 78423, 112394, 18048, 40416, + 43138, 69398, 67029, 137948, 20995, 20115, 42968, 1859, 128255, 8554, 13664, 13508, 240673, 36331, 63579, 21029, + 46745, 152929, 19469, 227297, 236093, 41575, 99905, 13097, 72176, 7570, 11923, 37300, 57085, 104327, 59863, 51790, + 97841, 7674, 89187, 121357, 61248, 15021, 6833, 112841, 107, 73638, 39990, 166290, 36068, 78500, 50124, 104807, + 193177, 49274, 90762, 21097, 105427, 14711, 17114, 26796, 55726, 20684, 123636, 20366, 215229, 140553, 26789, 7139, + 20446, 136219, 5009, 2402, 47228, 5882, 154075, 61745, 100420, 43241, 49334, 149951, 87091, 86136, 329294, 49115, + 14429, 27425, 36818, 111800, 121708, 18542, 6570, 27157, 23605, 12467, 6000, 70237, 21157, 88964, 42071, 18521, + 187721, 24444, 86, 29324, 21880, 145192, 303927, 17373, 2997, 108310, 171873, 60425, 203976, 18220, 116526, 69373, + 99166, 6387, 84942, 187663, 95068, 21687, 241311, 45047, 25877, 42751, 35432, 9804, 7724, 82982, 23192, 146653, + 98925, 132997, 92522, 87415, 83401, 53505, 150646, 8142, 4829, 58649, 25534, 26193, 13182, 71621, 73803, 70797, + 18229, 39127, 69038, 57676, 13718, 58332, 31672, 65795, 239662, 77166, 190337, 3939, 38653, 96399, 34620, 237425, + 116505, 13199, 102812, 5543, 153497, 1676, 111555, 5249, 30589, 10038, 44022, 32064, 89029, 4156, 84630, 183016, + 38962, 20874, 41135, 5896, 181302, 141985, 28318, 41843, 43853, 109018, 101914, 14232, 78872, 97030, 13738, 107743, + 180301, 8364, 79955, 206838, 4786, 1843, 19029, 45663, 248240, 2387, 51445, 145895, 7401, 82908, 16435, 28522, + 106136, 4571, 2405, 140262, 112590, 26994, 36399, 3894, 77745, 48157, 81325, 9424, 19731, 7606, 25832, 161377, + 60880, 3462, 68402, 63020, 77789, 8989, 94980, 140615, 125748, 101040, 138, 61603, 135487, 19491, 9072, 42404, + 5975, 4730, 23228, 147617, 48627, 88470, 18481, 32183, 34084, 197974, 15306, 16406, 12419, 25554, 80789, 50074, + 215770, 77760, 67732, 15820, 47557, 8552, 32891, 3397, 254582, 51747, 37440, 13256, 10364, 19078, 197381, 38702, + 106495, 126239, 69247, 4048, 21856, 4277, 25578, 128895, 67539, 63586, 54687, 80647, 88981, 92208, 26195, 51852, + 38805, 50151, 28772, 79952, 21428, 21251, 116522, 15445, 48732, 44111, 50224, 95470, 42316, 106832, 48425, 377321, + 12149, 3533, 41847, 71691, 16078, 249001, 118133, 10711, 52808, 33393, 18850, 60881, 25327, 125536, 35507, 17128, + 51322, 25298, 64567, 14087, 33850, 17830, 26983, 37516, 51147, 63624, 6036, 75728, 12253, 42565, 30641, 60123, + 122354, 7767, 33831, 15668, 46077, 207921, 704, 228032, 56483, 154155, 19104, 43429, 254553, 40400, 19915, 39707, + 115417, 1959, 8797, 59126, 81834, 6291, 43802, 41057, 150991, 132620, 67404, 31385, 94662, 35042, 22728, 29984, + 86668, 80841, 166714, 23521, 7381, 18863, 127695, 23731, 12841, 34184, 955, 46179, 100650, 105059, 92227, 35881, + 18218, 34994, 30732, 65296, 15741, 79032, 12811, 2842, 22372, 120408, 11638, 298925, 68294, 83360, 60616, 1270, + 50705, 35353, 39160, 65700, 15535, 87701, 7971, 17998, 84660, 24834, 3600, 57330, 61887, 43556, 70547, 21033, + 22553, 123308, 92138, 46071, 72299, 43807, 86552, 3952, 31361, 45177, 11621, 157425, 24824, 87145, 1530, 1015, + 17743, 64397, 14528, 84960, 46820, 135812, 40268, 205321, 64288, 83124, 142613, 20892, 31582, 178130, 41319, 47604, + 77006, 38648, 45265, 69293, 111674, 38866, 30288, 90253, 116384, 11710, 162727, 119339, 30760, 74575, 99191, 114910, + 80920, 74030, 166787, 23839, 86149, 70396, 8817, 71462, 77192, 61144, 7550, 263557, 51979, 2741, 12376, 38498, + 79691, 27990, 88220, 46311, 60342, 115770, 32907, 654, 122805, 22347, 45779, 35595, 103800, 61077, 11173, 7981, + 240873, 127729, 60554, 100208, 160744, 278120, 46400, 47854, 233114, 14783, 28068, 50186, 78962, 21368, 149837, 32533, + 54920, 67698, 16575, 42220, 8608, 244187, 24441, 16118, 3484, 29636, 35155, 100272, 316104, 399, 13004, 70176, + 72548, 2188, 3176, 10044, 24337, 146534, 171223, 44154, 5088, 100828, 173780, 92915, 230040, 59854, 91355, 69382, + 21926, 88289, 10494, 133339, 10172, 99597, 53605, 17770, 36838, 15150, 30766, 12102, 26, 22751, 93985, 48775, + 86221, 110954, 26896, 56128, 83458, 19243, 10858, 11338, 102176, 1734, 27656, 45449, 12062, 47678, 227191, 104843, + 17571, 33218, 31175, 80, 41929, 75064, 823, 5915, 41170, 26266, 21858, 74328, 28428, 46729, 53037, 208149, + 68239, 44371, 128012, 14846, 41750, 121730, 939, 16024, 103930, 2667, 3749, 72822, 2634, 17905, 21653, 37065, + 18313, 12459, 26288, 15851, 53019, 237454, 82804, 43717, 34825, 6324, 223813, 34763, 97837, 74764, 131779, 54108, + 63115, 77477, 133465, 158834, 24606, 172748, 8241, 11219, 73157, 67543, 21979, 44698, 152474, 62783, 25538, 151168, + 14715, 17653, 20409, 10177, 91439, 51243, 33807, 21982, 37033, 28498, 20946, 82195, 109806, 89357, 35843, 62764, + 140259, 29524, 15905, 148965, 30668, 242609, 18782, 55072, 174760, 95402, 12389, 60205, 380, 39535, 99410, 68744, + 135597, 29770, 37761, 9074, 95673, 61075, 167448, 81798, 136073, 92495, 63964, 71292, 65073, 16100, 82788, 3903, + 134249, 666, 114606, 13925, 13829, 14923, 22844, 73642, 17279, 39192, 82814, 14279, 122305, 98412, 2819, 90185, + 4420, 211793, 88571, 343220, 46444, 31428, 94176, 75136, 10237, 43041, 121311, 109668, 64848, 79724, 95455, 406446, + 203623, 49760, 35347, 15313, 70728, 55604, 64355, 9274, 10349, 116949, 76977, 10948, 182885, 140337, 63627, 148647, + 65075, 60013, 4856, 3391, 24519, 53746, 165940, 8600, 25783, 64942, 35809, 14075, 40318, 2510, 34997, 36980, + 34139, 23025, 39457, 7315, 22222, 75794, 8923, 194881, 63394, 25194, 37165, 85475, 55266, 208468, 18378, 53662, + 102764, 38595, 7896, 34791, 41422, 49686, 92984, 25098, 20126, 17645, 88907, 226875, 65100, 60009, 15638, 21283, + 90408, 4537, 139878, 112661, 53640, 5071, 42553, 9995, 35128, 46262, 76889, 67947, 48932, 16991, 106940, 167117, + 11192, 66889, 6670, 104891, 38935, 1875, 45170, 3303, 96839, 772, 3134, 41094, 34782, 66145, 43963, 48995, + 39492, 21237, 117116, 33731, 19396, 265866, 122508, 109994, 41332, 31277, 72923, 726, 6250, 12016, 13536, 75815, + 5511, 102922, 12522, 133050, 19492, 24257, 18746, 2693, 51304, 63505, 129615, 231652, 25936, 33108, 79906, 94200, + 104466, 80492, 72337, 73422, 54099, 254560, 176028, 6993, 73771, 49079, 55319, 58712, 86115, 97967, 23109, 55938, + 5080, 244577, 48923, 66103, 7669, 640, 49551, 74043, 30891, 80537, 202612, 47981, 111700, 26871, 4345, 17399, + 13931, 293811, 135578, 107640, 25276, 30158, 17676, 15676, 72289, 37101, 1637, 43083, 135447, 37641, 14254, 111332, + 14820, 13404, 34584, 56626, 258641, 7240, 63894, 83112, 25265, 17841, 32376, 48491, 31005, 66732, 30950, 9648, + 281179, 112290, 34755, 61683, 75286, 5189, 100077, 59697, 393, 103531, 23185, 179430, 95359, 298178, 110282, 125995, + 14623, 78807, 24189, 26684, 13584, 47803, 47440, 29923, 6680, 25153, 12281, 81189, 101227, 5727, 57666, 53928, + 80173, 157148, 23623, 17510, 44933, 56582, 107749, 28680, 76666, 75185, 175076, 32262, 54542, 14210, 77349, 27496, + 13244, 83199, 3441, 55821, 39348, 3757, 3667, 123147, 458, 15000, 19818, 5639, 25379, 68555, 51878, 6205, + 109451, 7850, 13287, 9188, 134348, 386526, 16856, 19356, 81143, 264611, 26487, 30169, 6959, 42394, 96934, 7084, + 65554, 79211, 22545, 21576, 12027, 4118, 60397, 13483, 51311, 75590, 42156, 36096, 8716, 11493, 42998, 11218, + 57589, 275790, 7172, 33265, 140731, 69517, 43030, 8376, 28467, 43930, 2234, 31591, 23316, 47974, 14197, 146070, + 17272, 6751, 33924, 168150, 30458, 113416, 293380, 11766, 25980, 203311, 28924, 162345, 55229, 20334, 34079, 27402, + 77197, 13365, 186022, 69870, 83798, 55050, 364150, 25353, 28302, 1155, 109582, 70417, 114784, 7067, 16416, 132275, + 7428, 45143, 48146, 46692, 34548, 35154, 92593, 5358, 26241, 23637, 54860, 9482, 14712, 7966, 32576, 13535, + 39336, 35734, 47925, 187574, 103304, 90255, 22548, 13788, 18928, 36142, 63464, 150312, 54080, 263654, 319602, 6537, + 12870, 133946, 9773, 20050, 334, 130222, 30305, 136258, 87722, 40831, 167627, 13993, 15208, 85494, 50771, 220399, + 16895, 50769, 10053, 113498, 142098, 93461, 17165, 99681, 114262, 41550, 192972, 66158, 39820, 17436, 87519, 144390, + 83913, 82212, 14723, 8746, 57817, 78233, 11144, 30225, 28682, 86362, 276167, 25943, 7721, 38719, 161361, 102297, + 14900, 88287, 14336, 12092, 108672, 42339, 328, 10290, 11250, 44623, 111087, 145880, 62246, 20511, 67542, 263445, + 42849, 24396, 94945, 30646, 415188, 26446, 102124, 18065, 1724, 4925, 110914, 163915, 26555, 176996, 8050, 33583, + 24549, 11288, 16296, 29023, 25505, 6867, 86739, 11159, 26443, 84520, 68545, 10696, 107450, 65107, 90951, 10518, + 145899, 31404, 52435, 29234, 61035, 11336, 53944, 64679, 43528, 83757, 4052, 13189, 6901, 39247, 35310, 26976, + 60726, 185599, 8030, 4198, 65906, 57296, 259345, 122777, 267741, 2857, 142950, 19003, 21338, 112410, 33257, 200700, + 147590, 74901, 51360, 32601, 42079, 29847, 124456, 34389, 18924, 20790, 120555, 65991, 73017, 171882, 21281, 26841, + 135236, 5978, 4123, 303, 15393, 27267, 28700, 249892, 5206, 105391, 162130, 107419, 4026, 62796, 18843, 50664, + 84185, 9681, 10383, 108809, 1531, 34176, 8061, 39095, 5988, 39057, 7403, 4419, 113890, 60683, 85058, 11712, + 82647, 76332, 51237, 903, 303391, 133929, 25009, 138549, 7386, 175781, 132183, 3037, 69844, 21065, 30442, 4101, + 71611, 155271, 265989, 32740, 189865, 56230, 135927, 48500, 76523, 108510, 11776, 16685, 31877, 27734, 41614, 24689, + 13315, 15066, 48022, 4309, 19314, 41098, 90569, 30515, 198575, 24381, 154303, 42859, 32821, 78665, 30662, 14747, + 1928, 59755, 28149, 70209, 67641, 20901, 5264, 50251, 25913, 66241, 490439, 175537, 104475, 97516, 78264, 91266, + 103489, 23865, 183520, 34766, 3297, 275917, 146670, 25323, 70391, 25755, 49964, 164202, 18406, 31978, 16441, 52632, + 15446, 24429, 4215, 37736, 113347, 8883, 22563, 15500, 19295, 41760, 78521, 113283, 93790, 25764, 24081, 23658, + 27856, 43669, 81754, 11052, 1792, 147034, 105048, 59257, 167471, 86802, 148695, 15116, 116449, 115822, 22405, 24926, + 8541, 22171, 31801, 33192, 4408, 12297, 301197, 138987, 41757, 44743, 115490, 73003, 63233, 12310, 113745, 80287, + 25765, 1137, 45241, 12509, 86680, 100507, 15502, 82114, 64501, 29571, 9042, 4784, 27034, 836, 106118, 79642, + 24816, 19191, 71859, 10806, 34975, 35721, 20447, 33671, 6079, 126054, 58217, 78753, 4486, 35660, 45492, 39072, + 49693, 135128, 38873, 1595, 36229, 21988, 86413, 27520, 16917, 83041, 32578, 42649, 21581, 17612, 3706, 5582, + 62426, 61684, 21930, 147493, 27862, 16374, 25590, 69477, 11612, 15240, 18552, 19226, 54284, 19154, 205, 44618, + 35702, 62029, 11975, 135778, 194034, 34324, 9287, 92145, 355, 83533, 389, 11125, 24277, 28651, 33600, 110599, + 48262, 80091, 24087, 86535, 87411, 65839, 48531, 5435, 70504, 1680, 141541, 34304, 310164, 9214, 109239, 74125, + 118018, 80462, 100258, 37839, 12516, 18111, 111964, 15304, 47559, 22475, 250341, 55009, 43502, 72785, 26068, 56283, + 57433, 145320, 83034, 101357, 107139, 13166, 65124, 29871, 9290, 47434, 20163, 28721, 66533, 101179, 26384, 119496, + 80863, 26599, 33186, 50921, 14634, 49049, 8156, 90368, 34312, 71503, 2924, 84269, 91725, 54206, 70953, 60570, + 28606, 1961, 1020, 118183, 21342, 60064, 25713, 117531, 67241, 26343, 257386, 77026, 72355, 28646, 61026, 94224, + 43244, 94932, 4601, 230976, 375789, 103456, 58534, 48852, 37402, 24109, 241400, 52782, 174015, 1515, 35127, 236213, + 105070, 41444, 3868, 195472, 8342, 37810, 28026, 30469, 44167, 123934, 17110, 49127, 67494, 4950, 89802, 22448, + 1890, 32145, 62103, 193571, 16365, 8100, 2759, 59208, 11723, 30626, 54047, 111425, 271002, 34847, 30791, 102173, + 1865, 152807, 44228, 16334, 47918, 19851, 52637, 48405, 8350, 22131, 69413, 35540, 45564, 53848, 57537, 202520, + 27742, 16511, 37103, 9857, 25110, 80964, 59758, 10709, 125803, 10945, 60525, 12999, 8553, 3885, 21820, 165805, + 49504, 26657, 12487, 30455, 81925, 76254, 4388, 51128, 62211, 301599, 142773, 27276, 4534, 106190, 11978, 19483, + 15491, 115826, 50411, 58796, 19011, 32938, 119108, 220904, 80373, 67031, 70541, 4859, 206920, 6090, 19310, 22573, + 667, 55921, 9933, 6880, 102405, 3647, 62961, 136965, 128623, 63897, 23416, 79705, 245524, 144775, 47359, 10859, + 5553, 97850, 6803, 18191, 113309, 30019, 22922, 29253, 192739, 61644, 10879, 93327, 65766, 71215, 147457, 80167, + 19567, 55770, 29797, 29274, 22832, 23356, 42325, 44027, 261958, 72646, 19852, 9637, 29679, 36046, 49336, 14687, + 21293, 77708, 14113, 74893, 71134, 200672, 39308, 12740, 20962, 86248, 26029, 50842, 105123, 136390, 98208, 22087, + 24721, 49911, 106064, 73490, 860, 163439, 14873, 41067, 21752, 30501, 145265, 76566, 33448, 28437, 8815, 16951, + 18372, 74873, 29462, 32916, 157167, 37777, 218069, 57242, 94822, 93459, 63003, 77897, 35770, 25963, 42205, 118099, + 173224, 15519, 76989, 16637, 232737, 22211, 31315, 67805, 75729, 4140, 57334, 9310, 28937, 79865, 138213, 106821, + 46828, 51030, 76484, 117312, 28062, 12545, 71393, 159499, 25453, 210547, 151602, 22228, 5207, 75071, 53864, 71005, + 140366, 13537, 2178, 11825, 36665, 45071, 70308, 57129, 30652, 16553, 302183, 10738, 6169, 43148, 24995, 57331, + 67920, 86667, 244672, 341687, 150458, 19053, 961, 107389, 92040, 192870, 41097, 22344, 23186, 119577, 34986, 45018, + 184604, 177949, 6669, 18473, 92330, 10137, 20330, 189512, 20891, 13257, 66265, 48954, 176492, 72915, 219860, 2494, + 49427, 18529, 56158, 30214, 27828, 171123, 69463, 40254, 38305, 23967, 79164, 66024, 42495, 299257, 23031, 106341, + 143982, 353, 39736, 75709, 49560, 70040, 243406, 1642, 25503, 56434, 81502, 48303, 90043, 52859, 24462, 43046, + 29747, 41457, 23434, 42918, 65328, 52708, 5329, 21975, 47830, 3326, 160281, 95290, 12932, 95952, 35520, 107324, + 11068, 52610, 109869, 64849, 77721, 9674, 61370, 154578, 9003, 27427, 87582, 116020, 25213, 95646, 34677, 3719, + 94205, 2145, 19568, 65295, 140426, 3088, 26113, 131686, 46090, 188040, 30031, 72073, 89945, 2538, 23463, 34360, + 138173, 3342, 84724, 64829, 192691, 8206, 251775, 2536, 33329, 64010, 2755, 48205, 112232, 33297, 244729, 27663, + 129905, 107744, 55337, 67101, 35709, 152617, 74645, 44141, 27514, 12925, 107358, 33190, 1841, 66538, 7298, 34436, + 19957, 54584, 3634, 41173, 31411, 2298, 3434, 77461, 127476, 54373, 77688, 7987, 53572, 15128, 19113, 176061, + 17497, 39049, 101234, 59914, 173549, 48281, 54139, 65147, 55063, 16371, 43136, 40263, 175135, 13721, 69771, 59399, + 19841, 1955, 57439, 88361, 69314, 130279, 804, 37567, 5192, 185175, 75166, 10500, 237921, 127018, 7558, 35337, + 117660, 21372, 36787, 27678, 150697, 7, 190870, 106339, 4060, 7260, 122007, 5881, 273045, 63325, 39801, 38618, + 50414, 113953, 105525, 17559, 98940, 56463, 347332, 34915, 65348, 25837, 82591, 5365, 153665, 27182, 7831, 15055, + 164423, 1182, 30831, 177372, 58804, 5448, 49128, 44734, 156695, 4975, 125400, 91561, 48994, 97252, 49285, 17162, + 213928, 127791, 49987, 50768, 86036, 12840, 111058, 253850, 28608, 197563, 19740, 127785, 8355, 34689, 65656, 32199, + 39574, 8110, 23600, 97524, 34540, 38651, 19006, 29152, 16927, 100216, 30893, 172304, 135680, 31450, 91503, 54177, + 18374, 32795, 63764, 459294, 151587, 85350, 39064, 13067, 10830, 3717, 20553, 32482, 53805, 108785, 109353, 20145, + 16878, 76255, 16289, 14152, 16623, 3446, 23337, 31309, 4282, 24663, 64821, 61752, 48030, 64655, 21808, 264145, + 8537, 50728, 25184, 49171, 14986, 13324, 23567, 199062, 46102, 179857, 99718, 369654, 13062, 27072, 2232, 105686, + 72897, 219385, 64202, 22442, 72, 52447, 22847, 94762, 33050, 52976, 8735, 2293, 108227, 50715, 42136, 12707, + 39451, 45981, 114988, 190349, 45935, 22798, 12654, 1, 651, 11355, 22585, 15841, 113320, 18682, 87649, 22561, + 40535, 140869, 61447, 16658, 95176, 80270, 61544, 83797, 57450, 101532, 133714, 89999, 48843, 172813, 18252, 163124, + 5003, 103269, 9853, 67492, 19019, 55271, 3109, 55823, 10407, 119899, 97338, 54114, 211163, 4927, 123086, 69260, + 3848, 55061, 18449, 12690, 1068, 37710, 26424, 11375, 4988, 41383, 92404, 48881, 32091, 48305, 36150, 113778, + 30095, 105405, 16612, 40433, 41692, 73917, 51729, 55139, 15099, 30180, 50, 16916, 43602, 95240, 47258, 86059, + 107434, 94751, 15026, 33649, 50744, 49046, 74109, 13167, 7627, 11804, 18035, 3335, 171349, 35806, 44194, 37671, + 16313, 34545, 198682, 35794, 150832, 210760, 258621, 12579, 352665, 110221, 193929, 21773, 207750, 141990, 78065, 65827, + 33937, 281, 49827, 8372, 38256, 111292, 55786, 57932, 51091, 10740, 12648, 39213, 156000, 72468, 27361, 213358, + 87889, 22207, 42213, 35711, 90663, 88229, 37662, 37545, 84175, 5983, 52865, 9162, 24908, 28484, 109135, 3656, + 114900, 154191, 40016, 143364, 50365, 4998, 47423, 91888, 31494, 33385, 89791, 113590, 83829, 74958, 6063, 23411, + 5398, 3346, 29188, 43992, 169342, 124619, 152146, 38176, 47521, 837, 5847, 40491, 54818, 14886, 64782, 79830, + 18935, 46064, 22834, 11304, 8356, 14908, 14164, 58309, 43094, 59761, 58932, 55478, 41212, 27362, 8157, 45308, + 174536, 290996, 677, 204177, 10082, 87199, 60656, 99512, 92550, 18666, 17670, 8755, 6678, 78663, 12108, 219237, + 60614, 81551, 23867, 117589, 23355, 14754, 99693, 35914, 69721, 75856, 71852, 97445, 14796, 53501, 37755, 5823, + 34149, 11053, 56010, 32326, 128830, 80883, 474, 3312, 58187, 4593, 94897, 82655, 3179, 117179, 34370, 37073, + 208, 174, 40568, 42678, 40325, 118866, 28501, 3518, 28399, 91754, 79629, 270203, 225029, 103041, 171673, 19198, + 401412, 202372, 71959, 27441, 51150, 57934, 46575, 551, 31580, 48734, 52559, 6830, 207268, 88303, 10399, 26375, + 6657, 26942, 1499, 28435, 10993, 84614, 864, 33684, 69818, 63313, 138059, 44306, 64282, 22203, 52406, 127830, + 289845, 11019, 2908, 36009, 23308, 8408, 38414, 42453, 12961, 116672, 9638, 175093, 38447, 99982, 7614, 4603, + 6681, 54049, 103103, 12820, 52944, 2652, 87605, 137098, 31855, 44982, 31388, 16335, 2572, 234999, 76439, 59626, + 47646, 105458, 231, 16630, 120728, 71649, 54479, 42672, 179148, 62338, 5367, 4698, 37240, 85883, 273485, 122580, + 45196, 6452, 17224, 35656, 218274, 532, 77135, 92225, 4816, 24612, 23330, 78494, 3695, 84373, 30447, 293164, + 21961, 19227, 40712, 50432, 50084, 83383, 130654, 3512, 35209, 106119, 26859, 2775, 18073, 188766, 9641, 22040, + 51452, 7828, 120628, 59247, 27004, 7212, 84542, 50515, 6100, 130271, 27415, 45596, 33941, 106546, 4823, 107962, + 1377, 42166, 117980, 25577, 84831, 24787, 184967, 17471, 171214, 62502, 4444, 8334, 85, 27407, 295919, 244072, + 141510, 43179, 145423, 52704, 9078, 33296, 18231, 71008, 99227, 13981, 68573, 4322, 32610, 51176, 165546, 3853, + 6417, 145489, 23086, 27479, 11718, 56566, 19653, 100740, 49868, 121955, 56420, 11535, 65579, 132995, 125548, 43942, + 87902, 58981, 4510, 84294, 73018, 226515, 1295, 68198, 49062, 157567, 27234, 124146, 46280, 100486, 144184, 15600, + 61742, 26572, 61714, 65125, 21512, 7799, 35874, 6311, 40862, 35522, 45414, 16108, 107733, 43364, 9206, 73819, + 15941, 51689, 82329, 40065, 29168, 48562, 85845, 69609, 157765, 60708, 25387, 1180, 144919, 159797, 25726, 214431, + 14487, 5968, 68537, 109664, 5767, 13490, 63443, 104676, 158014, 10404, 26593, 10161, 140070, 96476, 96798, 10196, + 7241, 29156, 51314, 97628, 573, 118109, 8622, 3106, 71584, 57894, 84024, 11036, 16921, 66038, 61545, 106441, + 223566, 16117, 74626, 3336, 40331, 47655, 20982, 117267, 179473, 76397, 121704, 23368, 35081, 186150, 1889, 47653, + 47926, 33122, 15734, 26894, 140885, 14802, 76951, 41988, 41508, 57629, 16634, 12405, 52104, 20107, 218288, 100668, + 59180, 73629, 1683, 30932, 42310, 64739, 20003, 6633, 32811, 26700, 39873, 153638, 29048, 2831, 22955, 8961, + 123517, 244356, 25796, 26746, 102413, 144572, 12002, 20480, 80208, 92037, 145215, 65587, 10104, 70587, 35982, 10208, + 14746, 188951, 116180, 117036, 12649, 257536, 49699, 32220, 153641, 10918, 10962, 51792, 126022, 13715, 104110, 23594, + 37965, 15247, 6442, 44822, 113017, 28398, 13830, 44800, 4171, 120616, 5418, 1810, 83, 42459, 4381, 81522, + 142592, 107242, 4170, 85703, 2809, 7049, 62349, 190193, 6362, 36642, 21195, 33097, 50416, 52066, 84992, 65769, + 71323, 20902, 52748, 114648, 116894, 25884, 34351, 102634, 260776, 19638, 86892, 17434, 16204, 19854, 106540, 27954, + 1524, 13745, 42151, 138947, 5760, 153807, 35075, 95356, 30351, 27161, 68708, 53500, 12658, 22077, 63851, 8487, + 20703, 57740, 44334, 64734, 54403, 39682, 77475, 5602, 36083, 1112, 36181, 71932, 45408, 99180, 206226, 42336, + 74772, 77663, 25805, 117083, 4946, 39476, 36769, 30289, 14485, 5872, 59638, 72213, 50759, 23451, 882, 2453, + 111222, 168615, 130208, 48836, 10890, 90002, 55698, 21422, 2195, 35834, 39131, 16781, 167147, 16091, 54925, 18399, + 92962, 80011, 5820, 4726, 130534, 187899, 869, 40302, 16283, 28616, 86006, 14823, 177256, 25701, 70837, 29786, + 35016, 19926, 80067, 4711, 15472, 93684, 2584, 58032, 210156, 70971, 75498, 15685, 151187, 60994, 38213, 13471, + 73922, 9338, 117718, 24543, 117691, 15713, 45967, 200243, 43250, 36553, 35694, 36433, 52051, 152826, 305512, 217989, + 37392, 40189, 4153, 56219, 24811, 51616, 37703, 87103, 24358, 84298, 167734, 60608, 30830, 95114, 82423, 123075, + 5775, 16326, 137007, 23746, 818, 184283, 59155, 49161, 21969, 92570, 27322, 24660, 1476, 194447, 116982, 30577, + 127322, 117428, 1856, 80745, 151783, 5171, 15901, 75451, 58392, 49455, 93446, 42926, 31021, 17030, 17243, 171279, + 106913, 15354, 115117, 51694, 65215, 88371, 23841, 28644, 89407, 71198, 6973, 57127, 90802, 67682, 21453, 30346, + 28531, 59792, 72619, 106195, 11690, 597, 21636, 30078, 20234, 8145, 91408, 50011, 95249, 25250, 66246, 24442, + 44602, 12103, 41001, 105897, 37256, 44489, 85248, 1331, 18707, 29983, 310182, 6411, 11928, 10116, 19299, 122916, + 5161, 82625, 56098, 136518, 4410, 33338, 119068, 31371, 26571, 52839, 11442, 358, 51903, 115795, 48253, 212226, + 49768, 72313, 32154, 54738, 22008, 16766, 174325, 98378, 25252, 9732, 16533, 147195, 65780, 41940, 24564, 81099, + 209499, 21378, 137617, 184321, 68769, 172072, 71325, 81618, 203726, 24974, 21300, 111798, 13249, 30461, 47901, 78074, + 137363, 96937, 205703, 15259, 48845, 38294, 28061, 109460, 86823, 28722, 44363, 19999, 6658, 142277, 14939, 11150, + 5674, 45392, 60588, 177764, 31881, 6786, 145293, 13598, 1083, 12784, 3617, 14433, 1823, 25033, 79112, 70251, + 108676, 88876, 67887, 11458, 34518, 12199, 148504, 65495, 166752, 78027, 54905, 18762, 13791, 20914, 58692, 1568, + 14287, 15068, 7216, 15244, 91576, 191867, 58273, 3830, 91429, 78507, 84897, 9770, 8665, 7954, 43039, 48860, + 11529, 61697, 166056, 55960, 26401, 61415, 290831, 12539, 16191, 30889, 13589, 1191, 91972, 41144, 4955, 34048, + 30964, 87299, 107280, 64425, 5254, 43169, 46627, 18402, 28486, 30816, 67369, 1564, 54697, 41405, 16000, 32524, + 79613, 30190, 43938, 8057, 66520, 53870, 1494, 247505, 18447, 16053, 29278, 66743, 22870, 25668, 1648, 14080, + 45203, 1341, 40989, 119871, 194466, 122534, 8385, 58819, 22822, 35970, 12729, 29360, 51703, 27032, 51912, 51956, + 12278, 36617, 79242, 39507, 76716, 85023, 73180, 18140, 44595, 125017, 191485, 174629, 73455, 77570, 220522, 125113, + 33546, 90187, 62766, 35279, 12235, 8675, 15151, 50393, 144843, 26013, 205214, 46310, 36154, 69776, 28572, 32563, + 51247, 38454, 4595, 42074, 11116, 86835, 30706, 10273, 33040, 34204, 54246, 91737, 3180, 77652, 106293, 106121, + 225753, 62203, 83244, 49829, 60864, 33244, 3262, 132227, 1972, 167168, 175800, 113557, 28469, 1342, 99125, 98666, + 12891, 8033, 119055, 3277, 28879, 37357, 275688, 62785, 10338, 60445, 97431, 99394, 144157, 1870, 20794, 59985, + 56294, 1569, 12614, 65686, 353058, 24023, 105292, 40234, 38302, 59113, 20587, 39754, 41447, 7733, 28382, 149537, + 87532, 70154, 27770, 8584, 110616, 28877, 50839, 33339, 27065, 8349, 41578, 41373, 168438, 10230, 58202, 18179, + 6557, 87189, 41859, 112308, 1213, 37229, 12748, 127395, 50804, 25519, 6813, 29126, 144643, 51945, 3761, 173270, + 24817, 37177, 11538, 1953, 2390, 71610, 55025, 12286, 136531, 8290, 7081, 13438, 38174, 12201, 368643, 56955, + 247513, 86715, 29189, 151151, 16190, 44518, 9116, 26301, 4059, 29547, 121363, 528, 122791, 104758, 128283, 132963, + 131994, 18283, 17120, 57082, 137430, 286470, 90537, 63450, 39506, 73884, 58318, 16044, 57650, 17259, 42080, 17885, + 16305, 157015, 93813, 43437, 5188, 134150, 32055, 268669, 54309, 84632, 18425, 114608, 106128, 82465, 25150, 81372, + 20628, 50827, 203900, 88756, 88071, 113318, 88552, 32344, 67394, 25784, 120662, 65041, 395446, 1313, 179364, 2878, + 250285, 16496, 42810, 142259, 66176, 14834, 29115, 136061, 91254, 103667, 12871, 26008, 1399, 9634, 6954, 97146, + 114196, 292674, 65716, 14216, 43915, 106501, 379, 35470, 60230, 24709, 71955, 28003, 44853, 42762, 19842, 9247, + 27206, 76172, 35445, 42656, 106353, 30864, 56216, 217302, 43013, 490, 12455, 125743, 18733, 112917, 66668, 5890, + 345105, 38120, 9856, 28648, 226453, 13944, 99130, 54004, 51202, 214051, 47536, 22937, 16607, 40104, 54194, 4979, + 57106, 15086, 23012, 12071, 117175, 174267, 29878, 59251, 35492, 196132, 120077, 81399, 10476, 19539, 129457, 31908, + 89598, 42460, 90787, 28424, 127439, 6776, 101077, 81013, 15187, 1074, 58103, 66003, 39624, 68595, 18810, 173127, + 13688, 6576, 66630, 43484, 61570, 92693, 65418, 85754, 10615, 177935, 31294, 91906, 31111, 386524, 52324, 16388, + 59370, 52508, 156372, 25357, 6238, 72256, 41599, 57828, 175252, 163986, 132645, 50076, 32143, 95350, 15564, 103443, + 224492, 75148, 26023, 120071, 41388, 19532, 110427, 22508, 95408, 89126, 17624, 37562, 34384, 9140, 91145, 109567, + 148238, 18379, 47470, 5638, 78307, 70465, 82451, 53859, 38959, 18925, 14088, 22217, 9340, 26777, 74821, 42124, + 160091, 16523, 3150, 97181, 61443, 8097, 65561, 68601, 15737, 115420, 25095, 57655, 11216, 70875, 87640, 78471, + 41244, 28465, 55017, 134190, 170, 58246, 16739, 39956, 38299, 255505, 2797, 2174, 102443, 13841, 69822, 12621, + 113097, 6991, 123270, 37586, 26382, 47496, 42833, 10023, 14027, 38076, 52804, 80220, 33707, 4788, 3121, 7610, + 3957, 167985, 5094, 37233, 76300, 62786, 189431, 11488, 66160, 1236, 76849, 5333, 19431, 42643, 23661, 46201, + 18900, 8417, 18568, 111327, 6952, 44621, 24495, 38741, 1717, 138255, 22782, 46607, 108656, 236097, 24621, 9067, + 82206, 38888, 253672, 45369, 188021, 74422, 200471, 3792, 257335, 14028, 151249, 5429, 27295, 141619, 22966, 27219, + 43999, 105930, 97394, 24617, 41210, 3333, 88262, 22024, 31777, 58259, 8812, 91559, 46956, 22151, 60598, 161311, + 57457, 123650, 86473, 64439, 12657, 10686, 130688, 112742, 11489, 53274, 26714, 21670, 15697, 30443, 104596, 7868, + 48060, 22775, 3022, 19869, 204748, 16977, 184709, 89313, 53583, 83928, 92875, 99194, 82422, 96190, 2556, 47490, + 284790, 12772, 5841, 48964, 30503, 33825, 99246, 251304, 137341, 36338, 22912, 3614, 8120, 31432, 14001, 2727, + 19615, 36074, 75714, 22938, 220311, 52593, 32987, 17971, 15991, 102877, 210170, 136379, 10217, 43348, 155559, 9056, + 63424, 28650, 29017, 9663, 9808, 49301, 50859, 10641, 67431, 17280, 61331, 20739, 70976, 97391, 58235, 36525, + 98221, 122956, 57506, 98979, 4491, 86694, 28324, 129, 15177, 9809, 3222, 215310, 28535, 4761, 16001, 1184, + 144789, 181348, 54083, 88078, 751, 22452, 65081, 1577, 13230, 27685, 98822, 56681, 2394, 90263, 54478, 144599, + 1504, 78572, 173001, 99606, 33977, 33470, 29437, 39886, 132104, 10699, 34506, 36978, 30316, 13646, 16311, 29262, + 22230, 50283, 49086, 343445, 931, 13052, 125899, 139325, 97193, 24009, 38257, 76027, 185240, 47587, 137522, 115144, + 24826, 38532, 19149, 8495, 22687, 75105, 130036, 15268, 174322, 68514, 245144, 17081, 15307, 34585, 208142, 75209, + 22988, 36011, 65, 2906, 1390, 60888, 44865, 144040, 188745, 118480, 95778, 32437, 180325, 4138, 10609, 92925, + 29580, 8808, 159680, 42631, 59068, 29860, 171355, 10899, 74903, 33949, 320605, 9425, 18994, 26854, 7737, 53509, + 29195, 107306, 35880, 21197, 79, 68771, 286937, 4362, 15436, 42681, 71303, 124778, 7622, 25028, 9618, 122572, + 38462, 11060, 66457, 65269, 11566, 72952, 5073, 71968, 138710, 28743, 12069, 66022, 44828, 82002, 156524, 81292, + 45774, 14165, 218072, 86389, 37768, 116234, 37323, 222673, 99236, 417011, 6380, 170851, 68137, 22809, 50851, 17147, + 84083, 118504, 78497, 64504, 19282, 56977, 84684, 68011, 22698, 100149, 2846, 125107, 17134, 46339, 16369, 72262, + 74807, 15652, 17984, 99115, 126662, 49499, 64245, 224198, 173497, 81277, 63478, 3449, 46248, 2829, 31143, 91485, + 16938, 9355, 21751, 89231, 119735, 2651, 2158, 25221, 3212, 1095, 134321, 26633, 28292, 72271, 10874, 18895, + 213652, 343495, 36158, 6930, 49013, 9714, 53844, 16595, 9975, 99720, 38334, 23140, 32180, 298162, 284394, 20189, + 45660, 51804, 12038, 74719, 86250, 44131, 68813, 48629, 4801, 41574, 219878, 76411, 68788, 91859, 17071, 199893, + 95490, 13890, 126132, 21590, 77482, 5070, 117208, 183553, 113751, 775, 118421, 47980, 11994, 16510, 60560, 22757, + 44624, 41900, 22489, 161977, 94452, 40768, 256639, 97607, 46839, 15049, 48016, 183793, 128497, 40127, 59466, 43034, + 100316, 61744, 20099, 72276, 5798, 4254, 61106, 151277, 58588, 78938, 208785, 23350, 73184, 13401, 114456, 168253, + 202987, 128773, 32481, 9314, 65417, 80566, 15061, 20781, 37790, 80269, 18985, 16154, 88524, 11484, 16349, 5922, + 1606, 101590, 83867, 4032, 43156, 17265, 40946, 123245, 97964, 46724, 2142, 201438, 105717, 55537, 40251, 107387, + 34947, 130879, 26300, 2025, 11203, 27400, 9384, 6700, 100060, 93137, 120697, 32781, 37742, 97514, 147819, 50972, + 130074, 43696, 152282, 11325, 93653, 25846, 60051, 100451, 107799, 99294, 5187, 187837, 94311, 19648, 17481, 47149, + 196106, 2484, 185532, 68892, 41347, 6476, 26576, 262, 8035, 144425, 16194, 7546, 10780, 99032, 192083, 18268, + 16390, 38046, 139599, 36447, 27883, 48800, 8802, 104301, 118236, 16610, 9043, 30215, 167395, 15722, 14540, 10143, + 1979, 18303, 245965, 6606, 25006, 56388, 720, 40122, 19375, 26986, 4175, 5283, 31628, 70617, 156858, 13338, + 18916, 50924, 158448, 13314, 144723, 40846, 148751, 33355, 78502, 66354, 52938, 44935, 114047, 29390, 83010, 31740, + 103107, 187158, 28282, 6840, 86492, 173457, 46403, 22614, 107686, 143217, 20089, 170121, 5844, 9860, 56485, 104630, + 20934, 42133, 9301, 19064, 206963, 93906, 29729, 27462, 23556, 248023, 29615, 24218, 22591, 27525, 19222, 62444, + 16562, 40084, 90324, 40232, 146333, 178921, 45549, 11142, 20167, 301568, 34164, 125423, 10471, 17862, 4749, 774, + 117434, 30213, 12597, 85041, 33085, 58865, 17338, 4578, 2863, 16515, 49743, 2267, 9740, 64838, 32867, 305033, + 36669, 34833, 20474, 42789, 41849, 24106, 210964, 124297, 37271, 24216, 53900, 123495, 22790, 8477, 175065, 22886, + 18209, 95189, 3313, 32543, 28979, 29761, 127609, 71172, 8231, 87016, 63834, 20159, 12952, 70904, 466787, 101605, + 54408, 2160, 17597, 57212, 21731, 165012, 21316, 33552, 25130, 56209, 46615, 46375, 45208, 106318, 31681, 64073, + 55748, 7104, 76381, 85964, 138120, 4075, 21570, 28070, 75826, 73539, 7912, 79024, 414, 177899, 313993, 67507, + 29593, 5743, 4806, 12800, 9925, 25560, 9189, 117626, 292865, 50234, 102480, 16382, 25999, 50641, 18440, 9929, + 683, 55242, 2340, 1064, 123149, 61826, 15245, 38280, 7036, 24794, 44030, 43924, 92159, 34247, 66141, 23809, + 86055, 215911, 128281, 150909, 61827, 53182, 142185, 14010, 103680, 51751, 108481, 22354, 23176, 13327, 14346, 152541, + 54918, 99104, 95228, 63611, 58466, 81038, 32483, 69723, 57578, 44054, 189180, 149427, 13305, 19749, 43628, 89334, + 5709, 43087, 18148, 4104, 86479, 50105, 64469, 20382, 16697, 4708, 14117, 130911, 31064, 73543, 33459, 45627, + 17660, 15860, 57462, 86199, 200919, 78755, 79677, 80038, 10770, 87019, 8576, 17552, 49793, 46030, 21495, 35725, + 33423, 27589, 152364, 6318, 32370, 142933, 34912, 78214, 52047, 54699, 36052, 229203, 16488, 20327, 25789, 14697, + 62555, 29116, 9656, 6836, 6459, 16067, 47438, 81922, 8426, 32236, 21951, 67133, 83493, 104694, 49662, 4774, + 7763, 74850, 270584, 335979, 59725, 82959, 82821, 18110, 82812, 14354, 2193, 9843, 18628, 69780, 24991, 112338, + 67760, 191557, 92348, 79071, 79405, 72842, 11351, 56088, 68557, 139675, 23222, 148134, 9612, 12610, 21344, 25747, + 7673, 584, 17873, 39734, 28102, 18328, 10063, 14720, 56517, 1902, 69798, 38307, 69620, 33351, 1174, 19948, + 171797, 67288, 84834, 16123, 32458, 25946, 172250, 8199, 29541, 28207, 15618, 8731, 15870, 23596, 47369, 57922, + 81109, 26904, 26073, 8326, 32080, 57471, 44892, 162057, 207644, 334076, 10101, 4119, 71495, 49601, 2592, 19742, + 21202, 14849, 98354, 61825, 11039, 158223, 75426, 119901, 91036, 68746, 116495, 8557, 61230, 102302, 14765, 75658, + 2810, 4942, 28526, 36256, 130800, 67752, 202742, 33081, 32260, 193926, 185696, 4064, 4613, 295863, 166466, 13260, + 60590, 1252, 145391, 2657, 37112, 87184, 227365, 8194, 75214, 88155, 115530, 90924, 33979, 90533, 27556, 51339, + 126402, 49225, 196178, 34452, 155062, 4813, 17478, 33954, 30642, 120974, 35852, 38833, 63875, 31380, 62028, 58381, + 12810, 7419, 98274, 1977, 194463, 145760, 23510, 116833, 82799, 19072, 2433, 145655, 47664, 4834, 69147, 46751, + 16725, 33328, 38665, 115531, 36685, 76090, 11537, 18743, 43367, 17948, 23978, 41370, 61099, 40095, 66518, 999, + 449, 217319, 6688, 250897, 172150, 20516, 11330, 20451, 102867, 21452, 159960, 15660, 21691, 82391, 6601, 43312, + 301838, 29124, 21637, 110211, 36745, 105335, 60833, 98115, 7130, 2470, 75962, 2011, 18671, 50489, 79569, 101266, + 57316, 81095, 53258, 13308, 34852, 17013, 84541, 47478, 38034, 23762, 162120, 178016, 54182, 33123, 52028, 72197, + 35578, 4602, 243630, 88186, 65900, 67107, 5029, 138288, 99486, 1235, 6540, 165347, 19771, 47835, 318100, 22891, + 3456, 21803, 91103, 57561, 2658, 54417, 30476, 7012, 16914, 55333, 21913, 180607, 99866, 184639, 7485, 8405, + 28390, 37172, 89244, 53674, 28109, 98360, 69082, 3525, 8262, 79773, 254797, 87253, 21147, 105791, 15807, 58442, + 34353, 98558, 30931, 80675, 20006, 3002, 81642, 11376, 4228, 91457, 8547, 21430, 137085, 33238, 42307, 3087, + 1675, 66687, 47814, 34117, 203023, 131032, 24008, 5970, 283196, 124604, 83088, 60714, 198286, 26339, 5149, 82518, + 214375, 8762, 21409, 25932, 163329, 13237, 37495, 3608, 290603, 72236, 1508, 11575, 152574, 55633, 156361, 32414, + 40471, 48043, 3556, 2415, 83506, 9556, 79122, 233954, 30068, 33325, 6305, 159939, 14730, 53878, 89577, 30054, + 23177, 41063, 32980, 17345, 131539, 217504, 35311, 15300, 34759, 144987, 54877, 46496, 27668, 5784, 24491, 1354, + 32178, 129844, 14953, 7360, 71896, 107476, 206892, 65803, 104799, 60213, 3795, 77961, 116305, 72186, 184835, 52495, + 85430, 98086, 108950, 22959, 119262, 214032, 33931, 102185, 42860, 161725, 32444, 24541, 25160, 41398, 6650, 202950, + 8911, 27523, 50156, 13935, 23428, 255875, 23753, 49759, 49437, 771, 101855, 224178, 105322, 141973, 32780, 5494, + 6519, 83915, 103464, 195927, 16203, 18899, 2849, 150029, 6349, 3289, 4814, 219, 74711, 59509, 333, 40550, + 1230, 49476, 28787, 6325, 38045, 10647, 173625, 26321, 8540, 19101, 23643, 21796, 75165, 98886, 256858, 8390, + 44736, 107620, 67566, 91614, 25909, 54320, 31937, 195737, 51026, 52019, 46128, 10676, 317034, 7784, 41102, 123264, + 4984, 106475, 31610, 19260, 32281, 83653, 4280, 61891, 91312, 19136, 38931, 76940, 27060, 33501, 126832, 48333, + 44431, 81276, 41771, 130533, 17817, 6320, 38313, 928, 45363, 59120, 177473, 41182, 155937, 135020, 126653, 32047, + 239085, 115649, 82912, 3416, 35697, 345331, 53591, 16649, 59784, 39055, 46432, 28477, 91993, 8200, 97534, 6307, + 29531, 9129, 30788, 89098, 126740, 20671, 133582, 65905, 213757, 1632, 18153, 20878, 76560, 55987, 68969, 1600, + 167776, 51365, 34575, 216355, 285273, 37934, 49689, 21386, 24262, 69390, 24454, 75939, 8237, 18742, 88250, 165234, + 65030, 85487, 44653, 10365, 41160, 2784, 164637, 7275, 74437, 817, 5045, 54742, 48804, 217409, 12001, 99489, + 118916, 8909, 10151, 74282, 13159, 165410, 3506, 39017, 37842, 24440, 5032, 93366, 1031, 93948, 42413, 34930, + 75349, 36125, 57529, 29308, 1478, 45294, 1328, 29873, 11655, 72323, 80218, 16686, 108777, 112357, 19468, 161527, + 23435, 67822, 30370, 4433, 277425, 199425, 1173, 8369, 101734, 76516, 110263, 4965, 67469, 27648, 64330, 158915, + 70231, 148349, 33642, 19100, 124711, 6240, 206630, 5766, 43532, 60290, 1618, 11261, 28514, 49764, 75380, 44379, + 65526, 33015, 1566, 161773, 54956, 37344, 69904, 6421, 1000, 17254, 11877, 7155, 21882, 13912, 9792, 134, + 17728, 212180, 90771, 66606, 25302, 43754, 11818, 134151, 40952, 12919, 28325, 57470, 52214, 30361, 5898, 7913, + 149632, 18095, 212017, 195480, 1999, 139, 84069, 3822, 2111, 116190, 22381, 104936, 3259, 19369, 7470, 4564, + 63362, 84396, 244911, 82844, 89961, 73711, 23902, 88689, 220561, 81148, 100516, 124589, 39777, 153793, 37780, 13806, + 26335, 4176, 56333, 280949, 9063, 9260, 69363, 258594, 10572, 107880, 12115, 33299, 12416, 68082, 27837, 184178, + 34551, 83293, 68854, 109274, 34623, 9210, 18491, 59555, 38604, 267, 8192, 6400, 24723, 29696, 82525, 68604, + 5947, 72996, 15729, 703, 15588, 23700, 2015, 100398, 69927, 427, 20207, 148402, 66252, 2099, 146853, 12510, + 119177, 37939, 48402, 172082, 69173, 242876, 15286, 133076, 46629, 9996, 20910, 33571, 28714, 132255, 11444, 47791, + 70715, 103704, 9226, 28482, 212408, 75092, 6197, 29216, 20521, 24, 52569, 5853, 406913, 21243, 31218, 77868, + 74380, 146453, 7607, 72181, 11716, 15373, 26582, 8123, 50659, 30590, 227825, 66454, 50862, 49529, 80294, 15517, + 37009, 35230, 69063, 80260, 88460, 38472, 63246, 37205, 130101, 137671, 14972, 60171, 7210, 90428, 50245, 64301, + 53853, 21012, 116299, 19943, 538, 102919, 143609, 50795, 65120, 122155, 20760, 41285, 151950, 28489, 62634, 48588, + 55806, 151533, 4795, 3053, 163748, 44956, 565, 152058, 52837, 23981, 76468, 97083, 13153, 60576, 2112, 50486, + 21100, 377, 192917, 29902, 16674, 14359, 42767, 170627, 64536, 35897, 66424, 6902, 6091, 127107, 4355, 121366, + 138201, 65773, 66108, 41998, 44837, 63222, 69586, 36291, 58547, 23085, 14181, 135294, 3723, 40961, 35006, 126987, + 163, 9211, 49788, 117861, 2177, 37726, 91665, 22613, 32288, 24902, 24789, 76868, 85454, 74752, 103374, 11683, + 34033, 48129, 1456, 23503, 8497, 70596, 92766, 70637, 14282, 304999, 76392, 9980, 25742, 4216, 140344, 193566, + 10535, 16591, 137916, 20347, 10741, 5439, 17749, 74636, 79559, 244434, 10353, 2254, 117493, 6879, 36582, 273890, + 243787, 15483, 5037, 43308, 49337, 29065, 64416, 85528, 100718, 19024, 222754, 60476, 79495, 44751, 64434, 4020, + 40139, 30091, 121039, 83627, 42956, 12277, 115688, 38864, 7551, 37316, 31576, 348, 55433, 10897, 8383, 89713, + 15421, 4329, 42444, 12217, 31509, 48867, 30445, 38228, 23034, 8090, 37931, 30345, 45081, 21129, 36808, 88429, + 547, 39635, 34098, 148415, 61176, 52774, 24919, 16366, 53434, 13434, 146264, 79719, 328001, 5483, 62687, 73315, + 8470, 79268, 19141, 72096, 36263, 44493, 236350, 267628, 30145, 211091, 25890, 14437, 4519, 17070, 79714, 73443, + 74173, 53239, 98936, 72193, 53935, 17849, 592, 6437, 11845, 802, 96206, 13472, 73774, 36519, 15404, 33551, + 60211, 17322, 196495, 29339, 78025, 276332, 54124, 171051, 3, 52430, 53849, 77154, 4102, 16020, 8709, 109741, + 73782, 54762, 26431, 7665, 109293, 2201, 111613, 5780, 315332, 245103, 65577, 66474, 48412, 162153, 10534, 61430, + 26683, 61829, 32733, 13780, 13714, 81304, 58398, 119619, 6865, 107753, 132039, 172363, 20128, 121666, 235595, 131904, + 18490, 178167, 57539, 35059, 104141, 15949, 75689, 5299, 325, 57947, 48755, 8362, 39470, 20406, 30082, 13818, + 171970, 31118, 19942, 97627, 115860, 172133, 40888, 75047, 15707, 107467, 49758, 2751, 7268, 102546, 139896, 5441, + 73301, 107048, 22686, 65676, 74336, 38585, 54155, 188892, 31370, 24110, 165124, 94512, 2368, 74483, 9470, 16357, + 111827, 4043, 96403, 170548, 107757, 169476, 28693, 28709, 136231, 117890, 11783, 151383, 142844, 158445, 124615, 108842, + 97138, 165759, 118091, 170718, 96638, 151535, 50875, 26742, 84053, 96653, 22587, 72385, 38691, 81135, 2787, 15019, + 23801, 14048, 175978, 40360, 1588, 63401, 3408, 17858, 10573, 129113, 76021, 1698, 122098, 41563, 91014, 29385, + 77039, 205898, 31782, 66049, 240, 30201, 29388, 36852, 186135, 119600, 62862, 71976, 146101, 66513, 162780, 57466, + 11996, 13198, 214801, 18524, 18143, 9123, 160460, 15278, 112963, 50571, 55357, 30128, 46171, 94183, 55715, 34086, + 155836, 66742, 311911, 22315, 348769, 10015, 161530, 38573, 47795, 32056, 23574, 5351, 121213, 7806, 95295, 21922, + 116266, 57531, 199257, 102740, 132515, 64909, 90793, 50599, 3344, 13994, 153769, 111152, 144804, 6819, 219255, 121782, + 31286, 2640, 54412, 62437, 99187, 80216, 98506, 1973, 255838, 88342, 73727, 16363, 129694, 9582, 119806, 25823, + 35894, 95168, 13820, 13961, 48779, 36969, 6826, 36122, 55235, 97975, 132524, 23098, 148582, 4514, 57373, 77831, + 71967, 9045, 37633, 44748, 65282, 84429, 23687, 7236, 15174, 64021, 45128, 22679, 13920, 80494, 17542, 17632, + 47080, 10822, 58383, 72334, 6147, 18237, 59831, 194844, 108242, 19788, 180510, 100731, 157593, 30326, 279827, 55366, + 125025, 15314, 11547, 48137, 7136, 21272, 48386, 58395, 1286, 9368, 30466, 43535, 28957, 219088, 24784, 30339, + 127956, 154838, 51263, 15865, 28402, 41075, 56222, 63661, 98813, 65963, 4997, 12583, 20805, 75481, 56536, 95023, + 7532, 156833, 60839, 127105, 109417, 17040, 15236, 207517, 40610, 22003, 924, 154828, 5041, 3149, 61584, 15751, + 32958, 30934, 164321, 30734, 15142, 101107, 30660, 103621, 18408, 12233, 98156, 98027, 108596, 11650, 55792, 146477, + 33543, 50057, 68000, 98194, 50517, 22325, 104336, 17124, 27748, 70931, 26858, 118550, 80114, 17779, 47640, 40187, + 233434, 205828, 163803, 9522, 91447, 45870, 85576, 87308, 487, 32686, 244627, 45444, 37094, 10371, 30263, 37708, + 100048, 61011, 174186, 98247, 30541, 198823, 425277, 43101, 43477, 177323, 58960, 83354, 10639, 14794, 48614, 76723, + 89862, 7677, 6456, 663, 155868, 17446, 160748, 3648, 37667, 44426, 160030, 75580, 8726, 48941, 203882, 126698, + 60684, 139753, 22714, 49200, 237903, 165483, 83252, 25239, 73408, 26534, 38895, 18906, 99589, 26437, 80391, 31962, + 12190, 496, 115352, 1660, 38739, 25624, 25196, 314328, 97348, 164824, 64001, 40502, 3914, 11141, 3746, 80143, + 13594, 164955, 149665, 13939, 2680, 66054, 20584, 29040, 149016, 20350, 30753, 3677, 13907, 48796, 31858, 191904, + 30171, 5370, 40086, 3400, 28343, 12830, 135213, 25267, 23530, 168908, 4125, 25811, 115225, 31603, 26072, 36166, + 61104, 83325, 117213, 11935, 35821, 85360, 2192, 24751, 147679, 4560, 76850, 115369, 14337, 33806, 25191, 101567, + 4297, 2681, 88848, 31912, 244282, 37953, 201, 50311, 24085, 34402, 47251, 10729, 326976, 52264, 7804, 24341, + 56428, 30572, 45401, 26493, 7851, 42287, 88129, 15527, 21303, 2927, 54360, 26880, 131620, 27105, 560415, 199310, + 71446, 41999, 39217, 105002, 83253, 83065, 24673, 2975, 68692, 62597, 80790, 14068, 111870, 19347, 24015, 20919, + 5224, 78599, 21870, 186584, 15813, 2869, 29987, 2229, 38197, 107855, 17170, 8632, 49026, 43883, 59246, 115190, + 45057, 33719, 56476, 34016, 13660, 4646, 58901, 85953, 3306, 180651, 187566, 98029, 76345, 77891, 987, 5156, + 40671, 55385, 6206, 26225, 27905, 8848, 109863, 213205, 91072, 48382, 18078, 2792, 996, 28414, 213905, 5931, + 68527, 112270, 20314, 69524, 62085, 20144, 88213, 73577, 91351, 41869, 10074, 4599, 122634, 53272, 97538, 11166, + 55109, 35551, 37066, 49101, 168209, 72102, 82064, 21549, 2024, 9446, 465, 39252, 27560, 116621, 45956, 29498, + 112707, 228850, 6346, 6775, 17352, 61942, 42228, 65657, 8306, 101094, 17714, 124595, 167115, 53195, 69704, 69894, + 169921, 90173, 145431, 10207, 166958, 58767, 73284, 73047, 5031, 13824, 73684, 160378, 46020, 98056, 31999, 14582, + 11844, 8113, 397, 4778, 67284, 141101, 53616, 155244, 19130, 25923, 170625, 42980, 76185, 10190, 22410, 161443, + 6920, 90784, 3737, 114515, 32849, 78448, 37681, 43864, 5450, 79795, 35899, 56126, 14610, 23922, 5420, 21396, + 22451, 72777, 14065, 9126, 21002, 52416, 92088, 1118, 17392, 26827, 105671, 77612, 31872, 56056, 132315, 124658, + 66682, 14163, 4889, 103995, 84796, 16665, 66442, 3877, 13709, 21423, 164865, 200571, 40210, 92532, 13583, 53632, + 59898, 24828, 103097, 34087, 12029, 98561, 693, 181750, 8719, 55652, 72459, 64857, 53564, 30839, 32498, 14036, + 21462, 17046, 22523, 14381, 91884, 124941, 10727, 52730, 21647, 30000, 89146, 127984, 88379, 88232, 34226, 35579, + 194428, 38436, 60128, 81429, 12754, 39305, 70531, 1282, 37797, 8724, 135076, 130745, 132826, 35826, 139941, 21253, + 160016, 52919, 35823, 9597, 22567, 65729, 24463, 1528, 54383, 74937, 66907, 66449, 53186, 30489, 4666, 11722, + 77611, 109886, 5707, 100503, 31107, 178087, 63909, 42663, 8339, 41648, 8005, 13790, 4694, 67663, 118773, 48261, + 19185, 50745, 116520, 7658, 90355, 49068, 180, 69370, 23597, 3425, 108805, 81173, 17222, 9136, 45975, 93798, + 140675, 20433, 37301, 145028, 28442, 25919, 19397, 66698, 23668, 7495, 6626, 1272, 55977, 23033, 63440, 185173, + 9128, 176386, 222575, 17347, 61555, 20002, 35331, 148015, 28774, 22224, 32101, 95270, 155229, 119206, 28922, 125370, + 17658, 61847, 307477, 5300, 9390, 116888, 3866, 54029, 24379, 64983, 6059, 113961, 69357, 18619, 44315, 7700, + 15752, 90871, 72844, 35312, 127381, 68250, 248737, 128932, 239631, 59079, 34253, 144975, 62460, 30364, 34513, 135745, + 93181, 50304, 215982, 80359, 55913, 6568, 31706, 56971, 45133, 253285, 65598, 43884, 140155, 103385, 51233, 124409, + 18676, 15537, 161665, 3137, 25249, 53067, 54148, 94047, 33164, 89524, 31906, 32711, 29581, 75647, 98053, 70783, + 82837, 31002, 153684, 118904, 67223, 110578, 329466, 139791, 22362, 2416, 53013, 8222, 29975, 285682, 15525, 40047, + 7317, 13949, 37038, 37506, 52813, 5084, 19535, 108360, 1943, 92833, 2683, 39660, 29613, 47826, 43960, 34705, + 20012, 41780, 29619, 22730, 207130, 29242, 83810, 88401, 49617, 41631, 13484, 27193, 49651, 48220, 16919, 66661, + 5636, 12779, 78449, 38764, 15334, 19403, 26257, 42121, 36313, 84036, 57343, 3445, 29226, 117117, 1835, 68598, + 28084, 20588, 21723, 125697, 95247, 24456, 23959, 82657, 72072, 191527, 20720, 69714, 19000, 159615, 178013, 30853, + 224932, 9741, 2949, 53392, 15811, 65040, 40817, 91887, 114, 34555, 104656, 35360, 32127, 29705, 5951, 48069, + 38097, 8656, 40672, 7670, 37508, 58545, 33267, 101625, 88507, 8002, 107670, 43585, 37225, 107303, 21839, 889, + 27359, 44829, 8538, 29418, 91626, 112730, 12807, 6084, 12193, 229, 49391, 10635, 69279, 134886, 35206, 15904, + 20608, 57877, 82079, 26098, 11055, 91058, 20546, 8467, 88156, 225150, 50824, 258547, 92808, 13614, 10159, 28825, + 2152, 16714, 38468, 40965, 57259, 46500, 133547, 3619, 55275, 36301, 8395, 62581, 72789, 88910, 30763, 61787, + 24475, 34843, 39477, 127755, 104414, 59623, 65515, 98667, 1708, 34899, 29620, 271962, 9882, 16411, 164317, 42920, + 3818, 33307, 30124, 3990, 48661, 154034, 37142, 8914, 66897, 91285, 266, 18859, 1631, 20187, 18104, 18347, + 34806, 41381, 28001, 36157, 227930, 227438, 205318, 151106, 85815, 19169, 188069, 52621, 87753, 7676, 18539, 52732, + 18321, 344197, 32336, 25286, 250664, 72376, 6599, 95500, 72733, 207728, 65258, 4923, 25107, 38173, 18852, 277661, + 206797, 10280, 68706, 18443, 50891, 10099, 26038, 10350, 8082, 230418, 91468, 2822, 196411, 40589, 33553, 83569, + 92596, 11998, 44307, 17424, 96764, 12849, 152582, 141574, 152823, 25400, 29806, 23815, 65514, 99567, 54295, 22450, + 22819, 8254, 3779, 78344, 387277, 116301, 84235, 146179, 62176, 111891, 75668, 115049, 51225, 54668, 119070, 90975, + 40329, 139109, 42305, 58482, 15563, 96039, 231261, 608, 189, 79423, 116781, 399767, 3659, 221564, 150552, 66005, + 73670, 75682, 162148, 83033, 64357, 106094, 73326, 218725, 51793, 156164, 11491, 97189, 275136, 36754, 71371, 10881, + 33482, 5754, 4091, 21520, 86653, 55930, 27813, 23947, 74615, 44269, 3897, 24643, 67058, 22281, 108426, 85853, + 11318, 7545, 54923, 305706, 125720, 61497, 47223, 139282, 15388, 174552, 34639, 16339, 22388, 14264, 74736, 41059, + 8267, 136640, 7760, 45714, 1730, 8753, 1553, 19445, 102663, 112491, 8628, 51162, 170910, 24623, 47654, 50695, + 40784, 36170, 18125, 20583, 7144, 155439, 2288, 9897, 85373, 25015, 92695, 127810, 13040, 72704, 125096, 5883, + 7088, 2125, 51383, 31073, 94309, 21302, 31925, 6461, 583, 47582, 18007, 56348, 44224, 929, 3859, 152766, + 140424, 28867, 217612, 69471, 77439, 20336, 147072, 7793, 18496, 41996, 9932, 226194, 164026, 97199, 21473, 8913, + 36578, 370703, 232366, 40298, 4722, 21848, 30276, 32613, 9151, 13631, 19868, 1123, 5824, 133353, 58605, 148488, + 63365, 98514, 162970, 68898, 26510, 4366, 150983, 23673, 35199, 75476, 53507, 214063, 40500, 35220, 45220, 83826, + 79277, 94220, 130368, 10107, 32495, 103453, 274743, 4302, 44614, 2713, 157, 20394, 35233, 20579, 45213, 30353, + 9566, 163051, 42976, 78372, 203293, 8456, 26004, 25226, 152144, 18362, 25622, 32068, 7097, 9764, 19804, 105586, + 2330, 79661, 38440, 45558, 183480, 5445, 30623, 15936, 98629, 31188, 21742, 11320, 13423, 50238, 196800, 61702, + 330887, 6907, 165378, 29192, 44130, 36247, 125815, 76135, 68600, 3478, 102912, 38948, 30939, 32718, 115597, 14759, + 97829, 178650, 6398, 96989, 31012, 80346, 29170, 167665, 345465, 32639, 105679, 182158, 56747, 62826, 5573, 31888, + 94879, 251468, 82742, 74122, 4939, 105988, 50230, 43427, 160027, 68618, 115561, 16419, 149761, 8457, 34061, 57375, + 99423, 60632, 123694, 18569, 46099, 3392, 96711, 126188, 32251, 264812, 49338, 132633, 15332, 90783, 67149, 64854, + 8761, 62393, 76418, 81682, 96094, 50655, 103271, 39486, 128555, 50988, 205607, 4238, 5763, 202341, 1257, 225585, + 235318, 53879, 36434, 4671, 222223, 22096, 143525, 96909, 55729, 20281, 64025, 133074, 30241, 79441, 19012, 18415, + 55420, 135097, 144146, 48331, 201746, 72776, 317545, 66575, 3987, 123583, 63087, 50450, 81382, 173, 71558, 23663, + 8259, 7334, 32703, 83813, 49325, 14244, 94476, 64667, 23287, 26116, 23310, 52112, 7719, 44701, 18954, 2509, + 24633, 12226, 87574, 88717, 251100, 14491, 4274, 63011, 92311, 24796, 10214, 29179, 18591, 35862, 3752, 140316, + 110533, 16614, 111729, 140400, 64759, 105653, 88014, 99594, 170260, 100542, 4164, 21095, 393860, 1570, 11733, 2292, + 7175, 53291, 7019, 12084, 21144, 53309, 55975, 40606, 132887, 54500, 21593, 18556, 3593, 69, 28773, 8032, + 75346, 68243, 36013, 38871, 101277, 105666, 51660, 13332, 91109, 139335, 73372, 29329, 16387, 148050, 129561, 74071, + 259187, 18882, 26921, 62561, 11627, 44409, 155193, 111945, 57459, 200411, 28094, 21626, 173829, 42067, 142855, 22469, + 44694, 41492, 188424, 1317, 55780, 178410, 31665, 32476, 49797, 50096, 133424, 54045, 89192, 278490, 20313, 169877, + 120443, 901, 6058, 24102, 62622, 149284, 65967, 57346, 3904, 11937, 41372, 17994, 14814, 66911, 235601, 80170, + 23887, 4818, 145255, 128004, 1027, 25782, 724, 207408, 112258, 39194, 29870, 74011, 64955, 219934, 119689, 118077, + 99800, 207128, 83883, 66658, 11132, 62891, 83085, 25216, 66353, 9815, 11198, 16652, 36202, 13812, 128388, 227653, + 48624, 40080, 23190, 29953, 18158, 102077, 62237, 3854, 88481, 9533, 51501, 68427, 96882, 27608, 35856, 58154, + 43059, 186515, 64641, 36774, 11040, 20138, 41376, 37623, 2455, 47929, 25769, 20233, 7077, 19150, 180387, 26425, + 21651, 71540, 74198, 49630, 181159, 39337, 47603, 10453, 99126, 357301, 22413, 127961, 100434, 22332, 3856, 103557, + 61388, 149292, 137919, 5062, 68186, 8654, 8282, 23829, 19161, 9891, 41737, 115440, 110468, 4212, 22815, 35452, + 120052, 148515, 40823, 61325, 8819, 4354, 51789, 12976, 55324, 24368, 77300, 8725, 41494, 7736, 73140, 57163, + 7014, 42056, 13817, 48686, 37689, 9578, 13571, 104584, 3618, 3981, 2615, 22449, 87729, 68084, 20425, 146810, + 92615, 53907, 63016, 17337, 207943, 48593, 10105, 121091, 9823, 47400, 75309, 56784, 128657, 106634, 43561, 70981, + 12587, 51472, 92150, 54178, 15857, 121204, 6055, 42142, 6379, 144024, 8486, 145642, 67628, 94025, 8294, 117913, + 51216, 150314, 153347, 1042, 71775, 2265, 3224, 68227, 157617, 10548, 144881, 33324, 63244, 38654, 28271, 8525, + 1503, 958, 5778, 53723, 3864, 58302, 39847, 53240, 218754, 39512, 6285, 19507, 110864, 63755, 37263, 11172, + 5769, 8998, 16662, 20146, 21492, 50772, 13346, 118643, 7243, 7129, 118273, 175534, 1192, 22122, 76882, 43114, + 87921, 153196, 45106, 138465, 85529, 3075, 30600, 13487, 31512, 11916, 22411, 68282, 18537, 64660, 19916, 149911, + 42698, 4415, 22126, 52393, 35350, 288666, 27537, 97849, 73510, 52941, 15929, 294, 84474, 45720, 39658, 4675, + 34301, 453, 57231, 29931, 8991, 192137, 112683, 33353, 21013, 82429, 32028, 9768, 35034, 2225, 27949, 96606, + 566, 33483, 30858, 67296, 38832, 15545, 58085, 23327, 19838, 45987, 38289, 25368, 35586, 107553, 62141, 38448, + 37216, 107939, 32602, 87203, 39413, 653, 178536, 85622, 55006, 18371, 38085, 25177, 12178, 39794, 54315, 242178, + 59742, 149144, 10766, 127632, 856, 10442, 37459, 12213, 84563, 16802, 59641, 26529, 6900, 63962, 37381, 10736, + 25632, 95267, 29099, 16636, 17437, 23052, 153874, 11429, 49786, 45113, 58163, 65817, 30723, 19440, 8273, 176150, + 13847, 87151, 4967, 13744, 70845, 25959, 93062, 22114, 4044, 73295, 41922, 20005, 7843, 56037, 104704, 125857, + 23944, 178063, 113180, 671, 235976, 152556, 97222, 136235, 55530, 17906, 85124, 11074, 48942, 10651, 199, 27527, + 6518, 91832, 29772, 61127, 20939, 1458, 24121, 44597, 73769, 4219, 49564, 15715, 192653, 166296, 109313, 43836, + 52936, 33537, 1148, 25047, 95207, 270590, 56882, 97809, 23895, 31177, 26762, 3184, 132542, 49663, 59748, 3068, + 142982, 88469, 25870, 43552, 22632, 51664, 23936, 32111, 87452, 107086, 104873, 11521, 48042, 93260, 56685, 20765, + 54018, 59944, 11341, 144539, 178468, 24105, 32314, 35295, 10728, 11236, 29477, 35284, 26230, 226, 165790, 42461, + 23559, 117007, 118411, 290079, 363, 78533, 53121, 31307, 81269, 1905, 16159, 14155, 142012, 33360, 14799, 52790, + 5718, 161126, 136815, 27111, 346258, 23627, 343, 945, 31456, 9607, 277463, 25540, 84333, 314287, 2083, 110065, + 246476, 16799, 92713, 55883, 51018, 19760, 17707, 24992, 66692, 259273, 23704, 9218, 101804, 59562, 16967, 86612, + 120570, 10762, 23969, 43849, 39962, 166523, 272460, 8995, 30373, 16246, 141683, 23812, 70593, 83230, 21943, 323413, + 2864, 2355, 100471, 19429, 60541, 82374, 42741, 35603, 19425, 290629, 43248, 33804, 54209, 7864, 21420, 71753, + 104092, 2325, 5902, 24070, 7201, 62951, 120053, 14462, 31545, 99914, 13564, 113310, 48018, 103024, 129706, 25209, + 25865, 23609, 5945, 17372, 15442, 45351, 49273, 3849, 46257, 44296, 17650, 6, 40443, 52796, 50158, 89987, + 8328, 144410, 81530, 54477, 6451, 36594, 86466, 8893, 111782, 198927, 159705, 4360, 47527, 36893, 117496, 37320, + 97754, 50697, 10204, 162372, 33046, 29631, 95221, 7160, 470, 118627, 17716, 97731, 245116, 237362, 49912, 10078, + 31095, 22842, 98784, 60332, 39, 19235, 8973, 120765, 91934, 111045, 58972, 128887, 87208, 67602, 46022, 29104, + 73470, 5539, 281467, 182667, 36708, 176270, 59432, 31026, 36521, 1009, 33426, 52275, 12801, 73675, 38019, 38883, + 70624, 9854, 44370, 2164, 36272, 3412, 81053, 50116, 8892, 86510, 11513, 257297, 79768, 14274, 40531, 33581, + 12427, 106809, 39327, 15343, 55454, 3699, 118114, 30358, 103756, 3575, 184159, 130664, 5908, 82458, 120759, 58923, + 52390, 273629, 62193, 75485, 62962, 34707, 223853, 87062, 22720, 170728, 85085, 23937, 141138, 3062, 19956, 33198, + 94634, 43295, 14025, 57761, 41689, 20778, 135679, 19997, 128402, 22478, 95668, 1172, 126390, 47383, 9006, 96799, + 6628, 94152, 175749, 12095, 106394, 149169, 112174, 23774, 35527, 95733, 7587, 79649, 134394, 37602, 25349, 151895, + 82727, 41687, 20559, 85846, 254651, 19160, 594, 147220, 194502, 40954, 17422, 66352, 148064, 13518, 9302, 36919, + 89549, 90619, 13212, 61499, 3202, 41426, 127530, 57213, 28359, 266732, 167478, 35161, 957, 34000, 1394, 74891, + 21954, 67533, 109988, 70343, 27906, 20087, 74943, 12177, 49840, 84890, 252007, 38133, 142747, 13270, 9011, 1956, + 8307, 41129, 241047, 11967, 24206, 16598, 65926, 114801, 48978, 39444, 29569, 37783, 1186, 28611, 15878, 63609, + 71728, 79096, 37857, 7905, 133038, 64595, 170871, 4983, 71474, 90251, 41794, 1133, 91306, 2347, 11977, 301891, + 6333, 120429, 31567, 114536, 110959, 92026, 77401, 131107, 74600, 4947, 14258, 12464, 70387, 792, 46742, 49365, + 18983, 55855, 53296, 26567, 62609, 186547, 111481, 9625, 56057, 154515, 28578, 317259, 22970, 130016, 87061, 200872, + 1147, 156105, 94296, 61010, 135850, 23757, 304, 68146, 1321, 101784, 28699, 41754, 28834, 26821, 22763, 34257, + 3578, 58871, 46251, 42855, 59715, 38961, 106435, 68266, 102227, 100604, 165318, 20000, 32827, 79754, 41071, 488644, + 81415, 27823, 114001, 14947, 99952, 20692, 48050, 23304, 55636, 16428, 9523, 58498, 257598, 22601, 34441, 124581, + 390, 131974, 174602, 248497, 22702, 5303, 93016, 7999, 35701, 16482, 87643, 40024, 85872, 13254, 27598, 5591, + 402916, 332853, 161899, 167074, 39216, 53652, 3842, 15164, 189795, 61422, 24281, 156829, 14929, 126509, 20650, 40168, + 19467, 70654, 8316, 5943, 10112, 63120, 121775, 4962, 144422, 66650, 69732, 29954, 61514, 18947, 174426, 19368, + 5279, 6294, 146465, 101384, 63421, 46353, 14826, 52334, 134686, 24672, 129081, 113192, 41436, 46150, 14514, 94104, + 8424, 94187, 87766, 25900, 51925, 128539, 30970, 101548, 10598, 4499, 46545, 283177, 132295, 66488, 16337, 205974, + 124416, 16473, 174653, 64458, 4604, 27366, 56141, 24652, 194739, 122988, 146663, 6230, 210929, 11416, 14833, 244818, + 57866, 80769, 9608, 46573, 31829, 126854, 41657, 44969, 51626, 23726, 29384, 7856, 50007, 53704, 180935, 69829, + 9976, 25004, 73613, 77676, 91878, 46340, 96749, 1491, 61906, 71825, 107515, 42399, 56168, 45795, 84470, 28713, + 81906, 14060, 29478, 12282, 60918, 152243, 2069, 14131, 61859, 62748, 45514, 41973, 40017, 51530, 81012, 35281, + 23059, 3091, 91182, 40986, 16887, 215039, 49110, 48902, 40927, 4664, 22408, 2186, 62064, 38288, 20072, 39739, + 12785, 46679, 137755, 83190, 32893, 3331, 21910, 59337, 32913, 69540, 19401, 3027, 21782, 89291, 9283, 160441, + 93965, 2532, 14763, 2928, 20169, 41863, 117119, 29296, 44387, 196228, 25734, 37480, 79084, 56135, 111008, 23529, + 38463, 12553, 65044, 45442, 11457, 157661, 136804, 196290, 93950, 78976, 90672, 49250, 27127, 21222, 172616, 123647, + 157050, 84415, 40826, 176737, 2697, 48083, 49157, 116229, 337088, 109380, 128213, 191771, 5116, 6808, 21572, 57279, + 54128, 114903, 1829, 16332, 48255, 41551, 56053, 69223, 33279, 27772, 38971, 152878, 8821, 42333, 270100, 30247, + 27352, 31219, 62315, 48036, 25515, 5676, 64424, 6842, 124022, 67108, 75224, 26671, 65710, 5485, 81101, 180131, + 28906, 35401, 41128, 7420, 38557, 103577, 27651, 76992, 33390, 277265, 119976, 16075, 1722, 4016, 8160, 16260, + 104435, 28594, 108521, 42619, 72215, 13679, 41467, 25078, 38551, 17674, 12470, 98147, 12094, 238313, 103012, 94925, + 30978, 17555, 9120, 57009, 25113, 302349, 310035, 87798, 6671, 70215, 168771, 51466, 37355, 11995, 27263, 18145, + 175109, 36992, 46153, 119925, 42862, 41147, 72904, 29988, 98024, 78321, 120886, 98773, 65406, 84549, 46294, 207054, + 221276, 28987, 23494, 87283, 59624, 116795, 91279, 21644, 118012, 30256, 26515, 54289, 64637, 18879, 2366, 48426, + 78760, 113223, 59165, 22607, 86697, 2006, 87700, 5586, 21426, 221804, 43982, 42892, 1639, 18263, 97022, 7861, + 40350, 35831, 74589, 15387, 12584, 16272, 2350, 34840, 67193, 166860, 62, 129059, 84144, 13945, 8872, 16116, + 31396, 12848, 46665, 69587, 7863, 337732, 2021, 215090, 143011, 19774, 108067, 9097, 69629, 62086, 41087, 118595, + 63112, 27350, 127724, 168612, 9454, 94824, 47240, 23536, 28666, 13015, 8075, 83300, 65798, 136675, 46387, 66592, + 46372, 22200, 69177, 100328, 134721, 10046, 40569, 11636, 6314, 82208, 7888, 75928, 51402, 4086, 55946, 3983, + 30837, 6098, 23497, 57256, 151922, 183027, 50022, 109927, 2847, 71736, 48147, 21198, 38676, 17208, 38494, 26901, + 38008, 40243, 23664, 42890, 92823, 28712, 10052, 19450, 136245, 1296, 26037, 115524, 17540, 81959, 87998, 109947, + 5504, 135131, 70356, 151584, 109295, 23619, 23856, 16295, 205242, 19398, 32059, 269066, 37606, 78075, 65249, 115008, + 5211, 52062, 83105, 34423, 214892, 56403, 19696, 105923, 1586, 3982, 120399, 20581, 20670, 68532, 88135, 29281, + 208711, 27405, 153585, 40, 137743, 9722, 8280, 31119, 19328, 72088, 60804, 21766, 40652, 52930, 36086, 14049, + 16995, 33305, 97171, 70003, 20023, 20591, 134059, 31794, 14657, 45601, 41289, 83347, 154919, 205038, 18514, 16315, + 34422, 97287, 94105, 23527, 12996, 13917, 114122, 23417, 13918, 46358, 21310, 7972, 38221, 68113, 107783, 16386, + 47690, 56795, 8499, 18545, 16398, 691, 133344, 34814, 2959, 20119, 119395, 59372, 37680, 79653, 29760, 7411, + 89122, 12450, 10343, 56156, 6721, 9880, 13905, 42800, 198469, 177097, 106548, 82488, 91876, 24609, 10706, 76004, + 172043, 23144, 14890, 62366, 83898, 12968, 38955, 44234, 101992, 9519, 85215, 71444, 26084, 100199, 129339, 314155, + 94570, 5416, 9451, 23094, 3635, 8260, 19505, 102704, 76958, 35234, 26525, 2551, 22853, 44177, 13568, 15372, + 76497, 42940, 8996, 82568, 38266, 73994, 25359, 100653, 176590, 44214, 60988, 126450, 168403, 88749, 20773, 72958, + 44464, 16442, 108241, 43912, 142840, 28715, 26132, 62091, 79180, 335, 13854, 117482, 184594, 27524, 213171, 29588, + 1984, 151855, 9907, 24196, 41806, 10248, 139515, 110653, 83147, 67906, 50005, 126920, 11985, 7146, 70966, 73392, + 6546, 11571, 36898, 51082, 366068, 14298, 110969, 8480, 59732, 41291, 11948, 60798, 24533, 35525, 46894, 151863, + 271505, 8978, 152334, 240030, 8736, 86988, 55120, 56449, 39084, 131143, 52149, 20472, 222992, 92098, 113905, 160942, + 93429, 46821, 134338, 13359, 28962, 133572, 63437, 17720, 58985, 12668, 50951, 30088, 86665, 12598, 38135, 100049, + 8432, 23582, 12520, 74142, 30028, 37676, 72636, 19758, 14548, 75515, 10553, 14518, 32439, 41532, 19021, 32738, + 54424, 77334, 95984, 30113, 165029, 21959, 45058, 51094, 55175, 7821, 3808, 143697, 27458, 92493, 25546, 44547, + 69046, 4809, 22393, 105573, 121277, 83916, 93708, 65917, 46168, 169086, 41887, 133671, 33732, 21296, 85711, 93384, + 20661, 16850, 126144, 79900, 24581, 92813, 68853, 7556, 135574, 40156, 253, 27866, 123110, 108105, 8863, 6885, + 37556, 40974, 5474, 42318, 79260, 53275, 55822, 124845, 72611, 23898, 88911, 72971, 16957, 97387, 53099, 8161, + 12939, 7235, 19325, 37236, 46162, 6870, 103036, 9217, 58238, 52894, 25497, 87733, 44907, 108667, 102200, 20343, + 72936, 85840, 55962, 34676, 253758, 69668, 96389, 111954, 41324, 167841, 8806, 158362, 32518, 41375, 120632, 73135, + 96480, 21314, 4720, 259790, 11949, 5752, 116141, 21106, 124438, 7908, 114832, 63475, 65280, 49770, 31147, 141323, + 43256, 220098, 15804, 42500, 34107, 104757, 75473, 1964, 53533, 71047, 83715, 42845, 43531, 77188, 2321, 57356, + 37037, 106618, 27233, 20062, 28366, 40192, 37908, 345556, 45970, 11045, 55249, 98674, 32741, 77466, 25566, 44398, + 173438, 69888, 65582, 3754, 6121, 34129, 9737, 95439, 194202, 164322, 2102, 4704, 62969, 40994, 31759, 21994, + 26355, 77991, 29831, 64006, 30314, 111389, 59636, 131909, 58370, 93916, 82377, 99217, 28455, 53729, 81544, 46068, + 1848, 132138, 3234, 103227, 50519, 863, 34429, 22675, 82830, 85513, 87414, 12210, 90393, 3294, 143767, 94310, + 21761, 100363, 279561, 80941, 295490, 75238, 72247, 3049, 10936, 2278, 38484, 33144, 256940, 101817, 33856, 75506, + 133568, 39527, 156016, 262504, 44050, 98168, 25682, 41968, 20269, 88567, 37803, 5755, 4089, 36260, 33454, 69130, + 27457, 129154, 52277, 26140, 21610, 15320, 67142, 111206, 219460, 45685, 70880, 105780, 36743, 123247, 83438, 52208, + 14821, 80935, 29659, 180718, 101388, 45817, 3374, 57612, 52005, 6448, 72882, 184891, 13124, 12704, 30703, 25929, + 30979, 69399, 15380, 74068, 140816, 117172, 5753, 5065, 167362, 3697, 123452, 43307, 26054, 54528, 105177, 21154, + 18458, 72597, 72966, 61322, 60789, 5516, 142428, 84303, 34917, 14578, 112502, 12928, 40447, 8085, 94588, 23066, + 26606, 8666, 27561, 98346, 33422, 14547, 68915, 108761, 9066, 161082, 19796, 27553, 3452, 27494, 15534, 23906, + 83614, 19, 132782, 13705, 5761, 29605, 59087, 120796, 20263, 12290, 12851, 50370, 137238, 39286, 94500, 20667, + 25038, 21628, 33990, 8981, 91310, 200127, 63967, 88268, 101, 286303, 114772, 71059, 52322, 129656, 47566, 12562, + 74548, 64285, 49207, 24655, 42572, 28607, 237181, 77595, 38084, 16719, 839, 81694, 214054, 17721, 4580, 98901, + 186568, 121462, 138040, 42832, 31802, 161998, 197148, 18980, 17665, 152652, 24115, 15160, 30620, 99343, 9889, 87020, + 141936, 7344, 42231, 98797, 37730, 129285, 120827, 160915, 14420, 2535, 155000, 34611, 4265, 102563, 57689, 91604, + 187218, 17667, 1262, 39897, 49640, 200660, 37670, 19608, 188208, 32890, 127419, 68124, 51441, 81359, 123584, 58473, + 55388, 45140, 15680, 85159, 96452, 16120, 30294, 94559, 66659, 44469, 59032, 36146, 40869, 33455, 63569, 7922, + 42039, 12261, 115220, 18301, 60967, 146149, 29288, 13403, 221027, 9100, 52523, 139159, 19234, 40843, 206228, 37834, + 178581, 17027, 40896, 45534, 29105, 2544, 91128, 79269, 96050, 47912, 285426, 27282, 9165, 151, 46166, 7045, + 196118, 46867, 66112, 143251, 157335, 55563, 96710, 8207, 3738, 72403, 61089, 157979, 40354, 156883, 40650, 178616, + 117436, 102312, 20179, 41865, 2965, 21966, 28441, 168958, 34136, 38297, 9787, 20997, 59659, 36744, 60366, 114471, + 15570, 25271, 29375, 258002, 50843, 37278, 68479, 43715, 230035, 5024, 2970, 91666, 31444, 117750, 16847, 112904, + 71260, 12363, 55600, 48521, 43886, 46675, 13532, 57721, 18316, 20619, 56356, 4628, 5387, 78044, 11, 33577, + 38500, 104616, 16018, 21088, 168508, 6053, 33669, 9683, 17406, 34961, 60264, 30468, 32174, 24071, 78408, 25436, + 8828, 39141, 39373, 37714, 103373, 109235, 33475, 22349, 143806, 196511, 45872, 33957, 90367, 15839, 11624, 84305, + 3560, 26149, 26619, 50807, 18719, 36474, 43129, 70576, 122310, 12311, 103752, 19102, 16508, 25987, 44388, 63236, + 26719, 131892, 134604, 63602, 2541, 237340, 98802, 45539, 105429, 117394, 38028, 181798, 6645, 86186, 32830, 114213, + 37998, 21792, 4332, 69153, 73190, 173015, 127914, 9801, 10591, 4848, 14686, 23714, 235916, 53231, 142998, 35580, + 49737, 63998, 15639, 52103, 87112, 228066, 40885, 51065, 233941, 23415, 4046, 91775, 53188, 141652, 6728, 119152, + 32193, 121081, 48378, 63940, 79154, 100136, 68457, 56982, 4544, 133512, 181089, 126816, 52905, 78546, 37550, 11149, + 126477, 134049, 124361, 126465, 7580, 1593, 86300, 14758, 63501, 80748, 18813, 163792, 57314, 22986, 6141, 11406, + 3216, 32494, 110332, 28452, 31337, 162136, 21024, 26738, 6541, 7188, 22430, 92369, 103083, 128099, 204997, 51873, + 60846, 69568, 20383, 4773, 49, 48196, 29395, 14748, 9756, 43703, 50109, 163732, 15481, 130284, 88137, 10991, + 1355, 101243, 3715, 54949, 43840, 19376, 118637, 43614, 14319, 50237, 47535, 50675, 13743, 107035, 43658, 76751, + 27486, 7709, 13643, 96042, 10222, 21204, 124418, 113169, 73114, 71157, 117166, 121290, 230718, 203167, 36470, 64894, + 418644, 66714, 116018, 28732, 16706, 52824, 36929, 51413, 6674, 128215, 137726, 19964, 279748, 26329, 87214, 10512, + 23058, 116855, 58012, 13318, 45273, 76517, 33779, 74266, 295831, 1336, 47046, 106101, 86306, 13, 79499, 18377, + 2743, 123188, 92968, 51882, 5535, 6799, 86683, 1562, 88773, 138107, 96667, 11031, 21829, 4831, 30926, 76005, + 35253, 174880, 61764, 219165, 120938, 41317, 25064, 15262, 31153, 216734, 168694, 126329, 3169, 7494, 80595, 54300, + 16839, 27391, 60115, 17288, 42847, 68970, 16373, 61298, 8751, 32529, 67945, 20542, 80974, 71952, 139421, 42317, + 33942, 21817, 38951, 12239, 36867, 67209, 48716, 1479, 35514, 105236, 52386, 100232, 16485, 31451, 46256, 39894, + 26474, 60702, 20094, 10295, 77775, 1748, 117588, 48134, 56877, 51915, 11560, 224153, 5391, 3701, 26137, 58941, + 48346, 80382, 15181, 131665, 3882, 7358, 42702, 14975, 108713, 6343, 80451, 72290, 31403, 4314, 32244, 5000, + 27804, 42280, 29989, 44929, 55248, 6075, 42803, 20470, 26235, 53651, 44765, 109461, 43821, 69384, 24056, 170949, + 136104, 115740, 191734, 36140, 40118, 138988, 7264, 71698, 175507, 12620, 155241, 7468, 28034, 185481, 2642, 23421, + 203908, 130353, 51032, 89780, 18732, 56166, 21485, 47669, 1788, 12936, 94197, 57925, 34030, 152347, 132787, 4308, + 106427, 53840, 90424, 21211, 36958, 111950, 33872, 29233, 54359, 51008, 62593, 42396, 7251, 123282, 198715, 53630, + 44936, 14249, 77826, 258614, 15356, 45209, 51025, 80569, 69139, 10507, 10673, 56339, 455, 43204, 39237, 14263, + 157915, 72576, 100737, 58151, 22173, 51683, 197114, 28812, 140291, 15706, 153760, 89962, 50348, 37718, 108861, 24876, + 43275, 55149, 227847, 109122, 82066, 68840, 83530, 39884, 49621, 18581, 228664, 30516, 54952, 16372, 8186, 29493, + 15216, 129920, 30573, 45435, 36226, 109626, 22456, 7678, 96695, 109013, 34730, 57022, 66855, 100447, 13026, 32965, + 6936, 272617, 48390, 49584, 1987, 61711, 3050, 110021, 8227, 8769, 96593, 27031, 196087, 22068, 19421, 11454, + 4631, 39978, 242671, 88040, 68827, 46976, 54523, 87226, 99004, 37956, 109647, 20830, 47541, 918, 286374, 22872, + 110265, 74675, 4723, 101851, 17953, 216583, 119919, 65609, 147605, 1909, 30855, 55924, 110242, 97095, 129683, 9370, + 58520, 14221, 15934, 60347, 31312, 158125, 25989, 55743, 38724, 14896, 158699, 35678, 329975, 16243, 72941, 112881, + 642, 11436, 38291, 112105, 3155, 67914, 72572, 24476, 34497, 104985, 29314, 84672, 75937, 22802, 6429, 75538, + 6207, 163504, 199842, 23456, 73843, 79289, 235636, 7573, 6120, 124975, 52978, 95704, 17249, 175679, 9569, 104975, + 51429, 329147, 13133, 168135, 117746, 78571, 8080, 58294, 3218, 58849, 10361, 62017, 910, 33143, 555, 31814, + 68961, 145868, 122784, 15453, 319671, 26077, 236592, 74939, 14938, 125306, 95117, 36290, 29555, 75543, 1445, 3405, + 34700, 143718, 17140, 139460, 1649, 866, 29186, 90440, 66673, 82069, 69865, 13592, 72268, 105954, 55710, 1985, + 9655, 105536, 52207, 38806, 76800, 9513, 81900, 12656, 153087, 6755, 43141, 21061, 6941, 16813, 142735, 31152, + 210168, 41188, 100125, 29783, 27130, 37384, 93862, 71525, 35398, 12398, 26982, 303589, 1780, 9910, 34746, 13281, + 73242, 165400, 2032, 112057, 3135, 21087, 114080, 35562, 56689, 79328, 57005, 56383, 19556, 8454, 47363, 3132, + 165307, 54577, 13611, 2516, 8765, 81373, 65366, 57451, 35967, 256887, 71960, 42183, 121458, 34333, 89472, 15018, + 13333, 75535, 131910, 13497, 70453, 25831, 37776, 56546, 17350, 31676, 97161, 28621, 117253, 29257, 16050, 488, + 22265, 48754, 128425, 182002, 13340, 90943, 70744, 33837, 44265, 92131, 45994, 1482, 39869, 27394, 57718, 13956, + 441, 5487, 70995, 55529, 3742, 61617, 118889, 92005, 135025, 86222, 66571, 254704, 23581, 37646, 38231, 4846, + 33309, 92236, 6134, 18657, 16543, 2124, 77619, 45274, 17731, 39418, 106157, 55973, 13291, 9746, 65544, 32616, + 157637, 33791, 108671, 24882, 283005, 65971, 70349, 6146, 21408, 15989, 88659, 365, 101360, 128119, 22909, 2662, + 63887, 10345, 23354, 71146, 52312, 30052, 45711, 99568, 83873, 506963, 2554, 13515, 5338, 289237, 32776, 59272, + 233779, 82419, 26344, 71757, 23759, 15290, 14476, 154914, 186949, 43876, 60677, 44353, 34531, 1802, 77130, 15863, + 177320, 67156, 10629, 89062, 38069, 81953, 13367, 22945, 156465, 63862, 319916, 37995, 91004, 119548, 320016, 44035, + 19353, 17852, 114239, 24818, 59852, 114160, 51645, 12833, 68160, 65930, 36636, 297383, 14891, 2853, 73690, 26169, + 1338, 1151, 23088, 98141, 1072, 29833, 31924, 58527, 29823, 49223, 13440, 94333, 1950, 42392, 24753, 253517, + 28901, 4590, 1846, 35647, 81407, 166926, 16913, 15179, 313445, 157404, 11324, 72420, 73038, 62980, 67242, 98614, + 84807, 1288, 18715, 35345, 162348, 49709, 92384, 35688, 240257, 105106, 97424, 336014, 37162, 31857, 24409, 137966, + 138934, 29778, 20791, 88000, 16111, 4596, 20363, 25650, 58013, 116702, 57820, 5668, 41253, 31169, 251377, 5377, + 102951, 209713, 150400, 82980, 16457, 95615, 28341, 34104, 96056, 14290, 87446, 35563, 19541, 9842, 4673, 34998, + 56402, 37494, 140987, 144287, 67217, 38738, 69684, 90560, 41638, 6371, 67553, 130177, 94381, 18943, 22304, 5556, + 89674, 1540, 104069, 26091, 29481, 79348, 127520, 1738, 37456, 64314, 202862, 31135, 80815, 1319, 24743, 103040, + 151579, 2886, 45671, 10020, 13937, 23292, 25393, 58266, 13683, 1161, 48094, 156120, 132537, 22049, 1682, 76886, + 19699, 10385, 1058, 254528, 134545, 55004, 82397, 41659, 67020, 195747, 38970, 40798, 29816, 202866, 95435, 5414, + 222341, 122682, 143053, 294222, 141235, 18722, 36979, 52903, 427578, 91648, 78453, 23736, 48868, 95317, 62318, 53265, + 129557, 41511, 51444, 17062, 233342, 157429, 4110, 25190, 23077, 32496, 234890, 104393, 87871, 119604, 172405, 31296, + 16213, 41034, 147157, 76, 18728, 337132, 33035, 74049, 16184, 117337, 1430, 50125, 9469, 13116, 96853, 186079, + 37913, 79652, 76574, 12079, 19680, 20090, 74134, 22992, 2798, 111900, 29035, 78700, 171356, 103866, 25861, 76971, + 178328, 160559, 131679, 44747, 13216, 89528, 50885, 79922, 50049, 78775, 61642, 115486, 72690, 15613, 40111, 8974, + 71904, 99272, 76547, 36995, 124644, 58876, 62375, 56306, 55455, 10949, 9333, 20277, 7504, 41873, 102574, 28557, + 29052, 18656, 226780, 29795, 41036, 52032, 84065, 51914, 266546, 6019, 73011, 15118, 19899, 148821, 109409, 68842, + 30391, 102037, 158644, 78906, 188755, 20593, 56915, 26262, 8659, 76359, 39099, 42863, 59469, 24343, 170097, 72940, + 16, 31679, 33449, 44831, 104298, 168570, 329243, 12874, 112943, 10737, 81733, 10145, 53865, 30398, 84862, 90377, + 76203, 66, 37651, 15508, 138226, 36312, 36084, 66979, 68857, 69503, 23486, 27392, 139953, 43251, 74333, 109079, + 14125, 42935, 117495, 77115, 107625, 70706, 2266, 28248, 119795, 6372, 79378, 83196, 173133, 134246, 42289, 4799, + 4398, 34848, 30176, 134351, 50273, 66466, 87051, 132965, 48808, 31554, 48150, 75235, 54390, 30193, 11461, 79397, + 16466, 17661, 7427, 28480, 122086, 37993, 13959, 83801, 31835, 33998, 164771, 60458, 67035, 180999, 5256, 7006, + 50971, 114665, 163017, 23336, 48859, 250788, 4340, 9613, 7508, 81510, 27383, 68480, 46427, 15448, 81334, 97899, + 66477, 71, 139832, 7506, 73021, 97330, 136340, 46842, 84615, 21471, 141895, 32003, 39985, 62480, 13666, 12717, + 83076, 96305, 46422, 172149, 46779, 38567, 66589, 155205, 201569, 190175, 4693, 89306, 53336, 145244, 26386, 78125, + 36443, 60742, 64991, 315, 60865, 22001, 34462, 3145, 168164, 7227, 45365, 52278, 143810, 15529, 219120, 21490, + 51393, 22637, 26823, 15222, 25548, 102002, 40489, 96757, 169307, 3643, 119915, 68728, 32896, 113685, 70503, 18482, + 24485, 209111, 5537, 41079, 38424, 97942, 125499, 59570, 21837, 20492, 31623, 9701, 29087, 11628, 19585, 9000, + 275813, 117347, 75561, 10000, 51674, 25141, 80217, 10734, 6714, 202302, 17083, 85695, 64883, 71665, 13202, 80751, + 46169, 222686, 49498, 67783, 187369, 37577, 13134, 27844, 55186, 70471, 20101, 307605, 76192, 4390, 46641, 16931, + 12852, 7169, 5321, 137488, 12018, 197662, 2595, 12702, 62134, 52236, 43904, 2706, 31067, 311914, 70629, 280345, + 118303, 59493, 9152, 296895, 16542, 4127, 190174, 11204, 12125, 33624, 43704, 629, 10579, 161171, 436098, 110011, + 4928, 20741, 120332, 41283, 26291, 13782, 65933, 147206, 43854, 143015, 24103, 185039, 7091, 135245, 92175, 293076, + 10946, 19925, 19967, 110847, 253716, 42758, 95038, 69599, 109062, 27063, 120815, 57458, 39283, 10218, 39354, 7499, + 17261, 8263, 7839, 189220, 113012, 110601, 48485, 156100, 258512, 41840, 167472, 67791, 47764, 14675, 53087, 6354, + 125126, 12700, 41054, 45096, 32646, 70686, 11736, 1417, 55892, 49536, 45376, 6942, 80279, 12070, 89681, 183322, + 201623, 35389, 58180, 430, 149872, 18459, 444892, 19950, 3192, 82244, 305001, 83495, 385, 1258, 82408, 33652, + 1208, 738, 12995, 21781, 48750, 13634, 68571, 68149, 5376, 30653, 64669, 33991, 58738, 87302, 80018, 88747, + 22335, 35680, 106650, 40779, 5427, 30033, 3552, 51590, 82416, 25102, 25208, 3949, 47811, 74006, 93322, 124119, + 32435, 357395, 49716, 13835, 143086, 4083, 79989, 41030, 38930, 21275, 146867, 20485, 94128, 11151, 10472, 53127, + 59975, 30973, 116792, 75634, 156037, 15565, 112131, 58155, 37977, 33863, 74566, 194491, 38224, 22622, 88291, 51351, + 62485, 19885, 25695, 49858, 7698, 124574, 37501, 200, 50405, 11713, 287549, 195058, 71027, 14971, 39645, 70772, + 16462, 27850, 51933, 19178, 21559, 27321, 3458, 43074, 136153, 7003, 195280, 149565, 34131, 52040, 1210, 6796, + 107506, 11880, 278327, 23579, 162069, 86206, 10271, 126827, 63703, 27398, 13524, 13255, 3101, 29045, 7198, 55423, + 215029, 1232, 15504, 168293, 40407, 14532, 80445, 19258, 4178, 203513, 68565, 70756, 3774, 260344, 5233, 163405, + 9187, 46762, 107090, 26759, 80019, 11197, 524211, 114351, 17880, 91874, 35307, 46472, 97926, 12980, 2932, 75, + 67579, 57528, 43925, 163283, 2600, 68602, 18775, 154886, 18405, 19085, 144161, 117918, 8351, 60026, 40557, 1844, + 47924, 56160, 48862, 13071, 86638, 3171, 163462, 48967, 70820, 50635, 8327, 96197, 92206, 86504, 132, 17742, + 86453, 80271, 35704, 19660, 29610, 70884, 187507, 70566, 42241, 55397, 157816, 116938, 119200, 208499, 318827, 57917, + 3198, 33626, 18608, 33628, 15466, 58518, 23680, 48749, 67813, 203805, 73110, 32434, 57863, 126161, 76577, 74704, + 35454, 272624, 56452, 33611, 4779, 612, 20538, 20813, 99518, 12664, 37685, 51378, 4649, 48965, 52644, 7250, + 104641, 90980, 25121, 20782, 144269, 136467, 25473, 109758, 33730, 23835, 64889, 3994, 38073, 175725, 263011, 73296, + 65864, 7458, 91699, 99785, 6838, 11244, 30971, 22298, 109456, 24378, 14229, 234839, 193298, 16188, 31737, 116657, + 154007, 1122, 41881, 49733, 5623, 164859, 73807, 45069, 45741, 8551, 143581, 9315, 30846, 98697, 126198, 189421, + 182578, 54489, 24321, 45654, 25573, 17216, 24178, 85193, 157224, 15399, 12351, 94329, 1543, 110920, 86691, 20245, + 58575, 21729, 399974, 64597, 138703, 15574, 33184, 95550, 146140, 2393, 2271, 17693, 44971, 124299, 48652, 114592, + 49356, 244271, 56021, 82860, 18275, 26970, 11660, 198792, 59064, 6815, 87808, 78781, 20300, 104409, 662, 71033, + 13122, 35626, 44961, 91041, 11848, 14525, 52226, 42701, 24453, 111637, 27557, 12927, 11973, 27925, 2467, 122935, + 9797, 47887, 24976, 6515, 86843, 117000, 127598, 39829, 2919, 138824, 43874, 110700, 25530, 13248, 168387, 43479, + 49210, 1692, 1259, 64697, 1130, 20465, 27466, 19345, 161220, 120389, 31515, 56190, 76788, 22165, 29616, 5113, + 75373, 17538, 30755, 22978, 85604, 112134, 45015, 68154, 34926, 7355, 114461, 64044, 36014, 70882, 20391, 30584, + 17777, 8803, 13476, 33610, 17255, 352133, 26102, 24765, 51533, 55753, 68095, 21188, 11676, 21823, 21179, 271876, + 92226, 107529, 94889, 47154, 51845, 43801, 5311, 105238, 119859, 268539, 2435, 55644, 21525, 37454, 162919, 79553, + 5936, 143734, 13110, 3235, 18507, 21886, 124645, 8664, 28050, 67683, 58054, 52119, 1140, 3546, 35570, 180315, + 31418, 49700, 27671, 84075, 14857, 30098, 18009, 21868, 34207, 42097, 9293, 74669, 47859, 50876, 49991, 60692, + 10750, 72343, 7644, 83181, 36382, 115481, 14074, 68458, 32079, 110696, 30195, 6157, 106909, 22414, 134401, 11947, + 59426, 71942, 7548, 142461, 87757, 25760, 55425, 47637, 38393, 117046, 33833, 33451, 110042, 21631, 15553, 31475, + 15965, 52160, 30794, 68222, 97104, 44038, 134558, 22658, 33757, 7286, 148203, 73358, 35344, 42812, 2789, 141364, + 97993, 325497, 95230, 62242, 53979, 114390, 187, 3414, 33651, 72017, 42725, 163469, 45407, 53268, 119350, 24322, + 41884, 61527, 104655, 61374, 82515, 10912, 127557, 29939, 173089, 44405, 77727, 37217, 7177, 19015, 73371, 191300, + 58371, 10601, 4287, 145829, 35365, 250779, 11615, 1861, 47543, 67388, 153424, 85556, 51927, 90651, 19359, 9654, + 35587, 131677, 91637, 90460, 10670, 58134, 145964, 112159, 23544, 102870, 17599, 26304, 29306, 17111, 10277, 45092, + 84233, 79517, 44634, 85065, 39976, 55740, 13294, 40340, 76076, 274931, 24696, 94204, 62097, 19765, 27791, 72755, + 9007, 11276, 152590, 52634, 8668, 11381, 87423, 17757, 28119, 349, 237, 60867, 78281, 91158, 140967, 248103, + 120790, 33051, 142673, 247599, 19835, 85755, 184690, 18251, 143020, 164693, 4893, 85858, 54968, 19631, 20889, 110604, + 18670, 132107, 13187, 1827, 64959, 187020, 16093, 2357, 20649, 24949, 120227, 112146, 34469, 22861, 29222, 20839, + 42570, 12164, 72533, 58393, 33001, 67590, 100285, 77190, 136570, 1891, 29881, 176839, 87796, 169800, 46634, 42613, + 120044, 544671, 35573, 33409, 1106, 23688, 8382, 40809, 58700, 21997, 89694, 32633, 63951, 5925, 91071, 83353, + 127623, 193205, 8076, 91094, 12805, 5777, 59517, 20986, 83057, 34629, 28371, 28946, 40212, 16089, 140378, 2115, + 31773, 3807, 48370, 178737, 49850, 322390, 73229, 7228, 7361, 34085, 72856, 162851, 54336, 3090, 10705, 24203, + 347524, 3071, 11926, 15437, 101314, 38218, 37603, 25070, 23751, 18738, 10614, 30446, 19569, 34876, 34037, 143092, + 48791, 17269, 13448, 181374, 29174, 22705, 11280, 8389, 49369, 33246, 4494, 15136, 20467, 189070, 24240, 21646, + 7465, 86521, 109202, 104631, 75842, 73950, 26135, 39426, 38281, 58562, 87792, 10755, 623, 98319, 19283, 178647, + 112457, 28075, 23224, 51865, 60210, 1572, 16872, 3984, 28849, 17199, 19586, 53164, 51003, 578756, 51498, 45446, + 94720, 3831, 11364, 15400, 6426, 42807, 26765, 136732, 90047, 18712, 26660, 98061, 85560, 99889, 37338, 10153, + 43761, 188463, 24546, 9883, 3579, 47095, 149286, 1544, 85105, 109163, 22065, 84228, 34607, 9802, 24403, 6597, + 90410, 107034, 41249, 2151, 118528, 32433, 167290, 143308, 7224, 62473, 32534, 855, 42907, 31366, 15790, 130823, + 111163, 23740, 103312, 73946, 18168, 41718, 10722, 74804, 6960, 77903, 6730, 4836, 161135, 460161, 25329, 3966, + 191298, 108138, 97692, 28539, 5247, 14951, 16072, 148552, 100584, 72497, 44704, 114746, 127552, 2033, 34815, 27555, + 171568, 13044, 57905, 30463, 20121, 12578, 29578, 147967, 91173, 69059, 75171, 15963, 12636, 216233, 12189, 78098, + 54615, 17457, 34910, 14101, 20199, 38879, 33868, 12975, 63730, 19371, 122500, 36320, 98105, 44709, 16796, 8252, + 2396, 7493, 206206, 58138, 40387, 10906, 28152, 8026, 14438, 11987, 27633, 84118, 125012, 155087, 126314, 20627, + 4765, 60466, 170206, 93400, 33235, 15747, 658, 8854, 12865, 30917, 688, 103792, 45299, 136720, 88015, 54331, + 37728, 2913, 65993, 80667, 82098, 15958, 29994, 167188, 77872, 103575, 90590, 244435, 114037, 77901, 91272, 19428, + 59253, 30651, 149287, 11214, 19675, 21663, 134751, 84839, 24838, 61313, 45844, 7512, 398016, 64823, 127529, 3133, + 102561, 20453, 115896, 17344, 11446, 222828, 193, 155155, 17069, 58324, 4480, 25422, 57508, 105295, 23785, 108564, + 178277, 20918, 69131, 161769, 65836, 54488, 201783, 143191, 99941, 18413, 13719, 28184, 26114, 27888, 4392, 129687, + 2585, 3092, 113567, 150793, 271882, 1752, 282, 15224, 136866, 70660, 67393, 235271, 50126, 30236, 5205, 12951, + 11027, 106830, 33950, 26602, 155648, 159630, 116983, 47316, 118367, 77639, 2468, 20768, 14585, 66833, 4411, 197715, + 8910, 308244, 4325, 25115, 123015, 105047, 174692, 661, 335383, 65622, 43950, 89084, 40434, 55523, 40872, 29093, + 41016, 46235, 18304, 57207, 53021, 31025, 145373, 39883, 14439, 64867, 33271, 92303, 87098, 165627, 249075, 23882, + 176860, 43613, 45825, 64126, 201543, 92448, 76394, 85896, 121888, 56679, 6043, 5600, 2358, 43170, 38186, 77345, + 9286, 9851, 24013, 78703, 5739, 81394, 113639, 182825, 22666, 9031, 22509, 9570, 54270, 33648, 34339, 13164, + 37884, 37579, 110690, 71903, 169381, 124661, 154669, 17643, 33984, 69534, 35747, 99083, 93859, 18986, 20872, 30989, + 16124, 4894, 119685, 2601, 89364, 45420, 102352, 14665, 72207, 77064, 26614, 22336, 51639, 228, 56231, 815, + 76366, 85000, 4970, 44952, 99029, 11414, 154634, 81988, 65812, 71056, 307722, 32240, 2198, 67495, 76459, 289714, + 12147, 34660, 56034, 21936, 174891, 33766, 38677, 42238, 194289, 61206, 40811, 81549, 6986, 11184, 50356, 28762, + 30252, 169833, 26033, 37387, 88822, 7300, 54514, 41857, 21284, 89562, 16952, 95611, 11445, 78324, 76361, 17313, + 288337, 35719, 74225, 17706, 160821, 46786, 195486, 98124, 33034, 230403, 46596, 54312, 100869, 187581, 73087, 76045, + 43852, 51201, 111095, 53695, 25761, 171167, 1281, 12511, 52882, 77119, 180240, 70944, 1144, 132888, 99788, 35517, + 103809, 160506, 37582, 28159, 1924, 90499, 9703, 89568, 84458, 91412, 201459, 33796, 86079, 85006, 49619, 62157, + 43411, 14396, 37110, 43017, 13542, 75363, 34855, 3223, 139276, 79591, 32317, 66073, 18141, 8975, 111874, 25536, + 34978, 2876, 88258, 8764, 41298, 4941, 10664, 6849, 7276, 16023, 42365, 44065, 26481, 39848, 38615, 3468, + 173800, 385332, 27782, 6783, 33210, 23625, 31896, 1982, 17951, 11857, 55263, 92496, 142652, 13696, 62877, 77106, + 33616, 34409, 3165, 42139, 33677, 25816, 8589, 110980, 2210, 30731, 11059, 25363, 19941, 193105, 164524, 15578, + 98568, 36064, 29325, 13286, 2486, 12135, 218797, 3219, 192414, 393107, 34699, 89750, 80136, 7124, 7367, 13443, + 12058, 69118, 234202, 17915, 235883, 20792, 100851, 31528, 50963, 3680, 2664, 124375, 249638, 8483, 257047, 41605, + 29572, 29737, 139767, 51651, 27221, 6765, 55803, 23145, 47034, 40480, 52532, 73864, 6124, 42229, 93325, 34530, + 72107, 238280, 199709, 3744, 63346, 16597, 66408, 22715, 97620, 5271, 1410, 22445, 158513, 169512, 31624, 107883, + 299699, 50048, 63128, 87490, 40388, 185087, 19754, 75917, 23235, 138863, 325617, 37883, 37176, 65115, 41352, 25967, + 224244, 118096, 25013, 205505, 198386, 69311, 49810, 112803, 121323, 27224, 31934, 41103, 67992, 90172, 18343, 182947, + 23827, 233481, 44894, 9617, 63170, 38593, 111112, 18189, 17838, 11885, 38329, 7604, 106622, 67890, 139944, 6251, + 158590, 31160, 39376, 75979, 26807, 59454, 75828, 12609, 5345, 62668, 13410, 6377, 23489, 15227, 50336, 23847, + 91891, 46989, 219110, 5016, 55474, 182, 169668, 41243, 74834, 37258, 81806, 25477, 37981, 32374, 29946, 8558, + 13058, 27278, 55639, 110342, 5977, 7496, 7827, 224669, 72552, 8581, 18359, 28445, 34706, 45938, 138729, 19479, + 26828, 4897, 199990, 7309, 145172, 26292, 10057, 2903, 19904, 3127, 7625, 8343, 21367, 5265, 8513, 8299, + 34043, 7029, 6384, 111718, 960, 4780, 109654, 50272, 77092, 23412, 109010, 40059, 91381, 138810, 25275, 30422, + 4733, 94279, 5863, 2603, 47446, 7973, 33416, 25502, 7680, 106096, 17414, 15137, 41697, 38583, 90939, 13115, + 5170, 1287, 11657, 96186, 16960, 66479, 61042, 54454, 14741, 104736, 18646, 28260, 46101, 248526, 78951, 52606, + 13656, 58251, 8482, 69402, 473, 134516, 4405, 18865, 51842, 100181, 26348, 80528, 37433, 55053, 30045, 136822, + 11103, 22444, 11841, 2990, 11551, 36343, 57239, 17946, 121951, 165051, 7702, 15912, 13191, 61072, 26908, 5979, + 97536, 32603, 54072, 112162, 165932, 27730, 13979, 91093, 50397, 48878, 44400, 29260, 51628, 17193, 15977, 23879, + 129028, 208297, 58084, 29487, 9069, 58477, 73687, 7734, 44885, 223955, 46203, 40661, 6590, 253832, 62105, 27627, + 59195, 37610, 112, 160041, 47045, 121276, 9957, 89691, 32940, 13845, 859, 21447, 225472, 109616, 5172, 115309, + 90345, 174021, 7312, 26518, 21833, 129351, 285466, 54661, 13303, 119359, 7473, 179961, 29407, 61141, 37403, 357673, + 96615, 35776, 100714, 58390, 141951, 44340, 133721, 168376, 5198, 37474, 20461, 28860, 6028, 3028, 13118, 40061, + 18395, 65200, 55843, 156099, 7181, 326625, 72811, 24544, 3861, 106507, 15886, 80513, 14966, 54808, 143914, 131660, + 156358, 72569, 331, 115499, 167182, 181285, 3231, 35925, 36529, 34503, 18991, 46621, 55253, 10258, 55965, 813, + 25942, 89419, 48957, 177707, 173153, 46642, 4811, 91950, 30959, 57953, 55844, 6837, 27261, 33866, 171253, 83769, + 50691, 10414, 5492, 45302, 150176, 127189, 25506, 98266, 162201, 46921, 47463, 25896, 38467, 46851, 18084, 3144, + 48462, 72055, 57402, 19107, 80602, 38235, 64308, 11648, 42163, 101559, 80727, 54159, 118482, 153426, 60818, 128542, + 168, 55184, 5394, 2574, 108756, 27110, 245250, 38029, 26011, 2085, 2189, 19738, 17166, 17187, 129874, 85131, + 54149, 86936, 135307, 122042, 456538, 40725, 3718, 195077, 22512, 53925, 52733, 40639, 91374, 71487, 56427, 22962, + 13816, 20316, 44904, 29393, 90358, 36347, 18997, 57794, 131615, 11502, 90717, 6758, 18132, 32540, 226257, 10712, + 226707, 53602, 99511, 19231, 1824, 19111, 49236, 5491, 28139, 42348, 17387, 13741, 26860, 32136, 143030, 11826, + 42253, 125128, 48221, 24174, 93877, 39491, 28952, 24227, 77351, 64398, 63400, 162461, 65575, 4012, 37187, 20132, + 8980, 42178, 52118, 64518, 80574, 106352, 75873, 68981, 22020, 35576, 63767, 70957, 27948, 62633, 6166, 497, + 40422, 23112, 21321, 14642, 91324, 90681, 29471, 53428, 76376, 17160, 5165, 158982, 13528, 21170, 4421, 11861, + 39281, 97681, 28741, 5107, 91685, 14451, 28300, 33929, 82215, 202223, 39186, 1108, 122541, 3164, 84493, 54892, + 144066, 56213, 6189, 105740, 1983, 53506, 28897, 52102, 193851, 154542, 51373, 38315, 17283, 44071, 149080, 48489, + 26320, 80807, 30857, 143431, 2739, 197396, 39482, 10242, 194978, 39273, 69728, 108587, 4790, 80763, 38090, 13241, + 26845, 225930, 45466, 7671, 42627, 235691, 55444, 50456, 61300, 2137, 111458, 41994, 65815, 20573, 171738, 111385, + 174612, 46292, 37295, 150555, 55133, 45791, 85658, 132663, 4200, 13863, 247261, 33106, 191130, 68764, 69933, 342026, + 79771, 57623, 102440, 82923, 158321, 74104, 66775, 232997, 52280, 5348, 14740, 63482, 166796, 21974, 61836, 39710, + 221620, 16509, 20155, 122691, 62461, 15494, 286059, 74491, 11278, 173634, 24814, 36352, 4067, 124651, 6219, 20384, + 88152, 106522, 11199, 27155, 83409, 59291, 62619, 7943, 31717, 82823, 35872, 25490, 121367, 16822, 5527, 43809, + 13522, 275353, 25968, 13784, 47325, 66250, 55180, 23370, 37945, 34951, 32887, 154415, 10406, 26787, 7574, 51785, + 174348, 5257, 63098, 12141, 249321, 18164, 175374, 159625, 154101, 6386, 36436, 10514, 64912, 129913, 42505, 64489, + 29938, 34866, 92162, 115463, 51775, 138015, 32129, 31108, 17220, 19470, 60959, 82863, 15776, 2068, 11894, 44229, + 166138, 59776, 2329, 138779, 78890, 11618, 39616, 3684, 84425, 73187, 5203, 51002, 54121, 48875, 276201, 108655, + 42861, 116287, 106861, 140810, 16368, 27367, 102464, 4845, 24572, 65525, 25498, 65011, 291647, 3490, 34570, 87715, + 10197, 173917, 12769, 8636, 32073, 8577, 38657, 12073, 22651, 98887, 35637, 26878, 11677, 114271, 87008, 92497, + 97509, 14575, 3470, 58305, 26952, 16841, 8381, 10555, 35787, 2648, 41602, 77764, 18424, 35932, 45851, 49096, + 41910, 7650, 71685, 129774, 71614, 52658, 36248, 19880, 94977, 39129, 145464, 57624, 72318, 30245, 113156, 32799, + 41594, 8407, 15488, 66070, 70024, 38697, 26127, 49773, 275419, 9728, 21901, 111141, 37702, 136166, 21682, 76474, + 60199, 7085, 79133, 215800, 7335, 50628, 141287, 17217, 39107, 44612, 205482, 35296, 61315, 75127, 44962, 2175, + 18271, 83503, 115273, 114695, 18394, 122374, 164929, 11745, 33768, 52043, 39554, 3954, 87884, 6547, 14314, 26459, + 104277, 94471, 129578, 91248, 123724, 20555, 12338, 148214, 7277, 42970, 32692, 38110, 56288, 19752, 90889, 130277, + 71981, 95103, 10470, 106893, 189803, 47422, 67706, 38984, 49320, 67270, 32034, 67179, 3352, 105490, 2902, 57799, + 6798, 57302, 88662, 2520, 14240, 632, 64114, 111171, 8954, 67696, 178121, 64478, 69220, 98726, 78181, 52577, + 94433, 48703, 92812, 106819, 57372, 970, 11507, 56315, 28620, 13927, 5879, 50384, 68863, 811, 54518, 38111, + 193727, 4518, 82041, 45997, 85575, 141392, 39464, 38164, 42309, 34939, 27631, 115200, 41667, 5852, 85451, 45254, + 67689, 36959, 69349, 25516, 42081, 284, 1617, 24389, 22543, 92428, 55862, 39478, 44824, 158788, 112673, 24864, + 12719, 95525, 421417, 153017, 28540, 12854, 40525, 3447, 114236, 119912, 41795, 7482, 101553, 14084, 90262, 98146, + 27638, 309738, 63986, 26332, 27296, 73457, 26543, 61153, 4300, 19919, 75492, 157204, 5353, 16531, 61956, 47675, + 4663, 113612, 136374, 222705, 19379, 3505, 93057, 31, 94098, 199552, 229445, 75586, 3758, 9803, 54043, 51022, + 95888, 418251, 47815, 8325, 95144, 54354, 55865, 238684, 80344, 14773, 42431, 26078, 87320, 4173, 49174, 59477, + 28447, 53727, 59450, 37425, 259518, 260604, 13221, 59388, 12718, 19200, 54560, 211, 71391, 111794, 43082, 14317, + 152731, 24043, 16563, 55318, 37063, 33985, 12107, 8451, 24132, 3287, 51633, 24662, 31911, 94583, 27566, 47306, + 104896, 123698, 17450, 4892, 15672, 1239, 135524, 82674, 103782, 128381, 195863, 42040, 1521, 88669, 5368, 61959, + 4945, 14280, 54416, 134709, 72541, 71947, 141565, 31806, 23717, 13486, 49292, 28755, 122632, 37972, 227115, 71973, + 15619, 45930, 73185, 19728, 87175, 41028, 113786, 71313, 206120, 15801, 80915, 37045, 29428, 213276, 42087, 78562, + 189780, 69074, 397153, 114057, 61416, 106834, 67699, 184163, 28350, 15478, 41280, 87632, 44457, 50713, 90885, 28916, + 972, 63102, 58749, 38921, 1175, 182790, 133419, 33965, 47233, 11089, 17346, 24241, 198738, 99658, 3632, 15062, + 95789, 46049, 55098, 80139, 41907, 66419, 62949, 77436, 21953, 25574, 115070, 31261, 97034, 86959, 15541, 120250, + 59341, 34977, 37912, 95547, 22864, 57455, 27137, 114631, 53713, 28129, 16277, 219371, 16873, 48501, 25135, 20596, + 32971, 2044, 70095, 43252, 20693, 70672, 5134, 139706, 20954, 18793, 5240, 51062, 31336, 1055, 9964, 20812, + 21477, 94661, 40609, 21902, 16169, 19574, 74742, 44447, 38370, 72501, 159022, 27749, 16412, 12007, 11867, 64559, + 9019, 60758, 6521, 41890, 3841, 1011, 208127, 23460, 24599, 115489, 30488, 57116, 21938, 126419, 279459, 210650, + 17085, 29349, 117824, 4642, 6484, 24363, 70018, 30366, 81198, 51053, 57403, 18554, 76413, 87591, 130889, 12473, + 5849, 12616, 44081, 17726, 72514, 20574, 39804, 77427, 12320, 153366, 63071, 43010, 65247, 12837, 49822, 119883, + 276175, 48298, 17891, 55934, 37234, 15426, 536, 214834, 59796, 107143, 73492, 82284, 52642, 23860, 59584, 109240, + 16312, 295305, 2881, 141523, 57349, 24996, 10169, 27023, 198507, 100921, 101928, 19612, 94148, 193262, 51722, 22594, + 46134, 59320, 233123, 23163, 18958, 48350, 10418, 11573, 125552, 158579, 54776, 71219, 1747, 9488, 45024, 123446, + 18725, 52331, 24040, 29879, 151873, 17176, 22311, 178292, 14901, 31482, 26423, 45056, 5490, 10022, 15757, 97024, + 68287, 99243, 207125, 128979, 29470, 1325, 74812, 32791, 3689, 45845, 118509, 34820, 64794, 70223, 8344, 91384, + 40814, 104345, 56330, 22095, 26018, 85129, 77063, 49913, 25692, 80443, 48676, 207462, 54450, 117644, 131820, 12098, + 2703, 16863, 18276, 60530, 88278, 81796, 11213, 17129, 124886, 4875, 8932, 23106, 173087, 7396, 71377, 23220, + 174000, 24872, 76210, 196270, 24159, 83016, 95481, 92620, 179477, 142594, 74941, 14268, 24276, 115069, 15141, 25430, + 46004, 119419, 64735, 171433, 201876, 166502, 13507, 2133, 209202, 8831, 250649, 58555, 445, 79606, 10547, 18957, + 52876, 93525, 47741, 109879, 31948, 69285, 97122, 68070, 30206, 36316, 27294, 147592, 157610, 357846, 4949, 3838, + 39180, 165668, 28395, 105564, 18439, 113339, 26143, 6254, 44124, 41027, 149595, 57880, 50469, 74956, 105797, 64751, + 5774, 62996, 55064, 12300, 96278, 74378, 41632, 28378, 222758, 215455, 14905, 29733, 200216, 83974, 14267, 197651, + 50290, 108173, 83523, 72906, 45486, 17894, 248112, 6668, 20435, 12354, 69859, 105672, 46986, 26269, 26119, 21735, + 46276, 81332, 161990, 24229, 140133, 80736, 85948, 28342, 142326, 114859, 5246, 12288, 15569, 321372, 83346, 67317, + 13363, 11347, 62559, 87384, 47522, 66304, 51125, 158071, 92583, 215430, 30981, 130176, 2182, 17025, 35860, 41627, + 7135, 192109, 213, 29142, 16853, 130975, 2389, 127400, 22998, 131988, 9785, 68168, 30272, 21382, 58736, 6997, + 4952, 39834, 32713, 104019, 63263, 581, 147846, 14035, 35623, 7875, 177579, 12052, 39096, 112656, 33118, 37277, + 53789, 60622, 157938, 185910, 44864, 30132, 308910, 81836, 20053, 20029, 111, 252367, 110392, 9585, 162293, 4213, + 124213, 140484, 19392, 33595, 4630, 45380, 23884, 137937, 16087, 21464, 32146, 130095, 28221, 147475, 40847, 37757, + 127787, 95424, 105555, 146520, 25839, 9169, 5255, 99477, 77481, 245575, 97240, 7618, 44693, 52011, 5049, 29327, + 13464, 195851, 8615, 52596, 113146, 3124, 234482, 38343, 6983, 249017, 62799, 87690, 27069, 6892, 7757, 568, + 55717, 67952, 55524, 29469, 50102, 116514, 63808, 119487, 4760, 11374, 79868, 17622, 7107, 13396, 118343, 202733, + 26186, 94968, 133457, 113546, 66507, 11011, 141426, 116015, 59145, 7451, 3054, 4656, 36032, 68955, 55309, 29753, + 104182, 23389, 82478, 44486, 71328, 86912, 16831, 60480, 29425, 22716, 53199, 42308, 64317, 88346, 22804, 101981, + 50781, 6916, 20926, 87069, 47465, 22345, 6416, 67964, 94298, 12161, 198305, 25527, 69706, 1141, 24861, 18820, + 74899, 101908, 136290, 36246, 22754, 43947, 149419, 77020, 120756, 58182, 76675, 53183, 25108, 141513, 334998, 81890, + 93077, 30790, 76148, 97326, 56834, 21494, 3126, 13675, 73286, 10835, 21018, 39793, 39928, 69833, 40373, 1638, + 16218, 27262, 46999, 35926, 41699, 14586, 109707, 10621, 176763, 65754, 4781, 40629, 7555, 38881, 34586, 20380, + 70819, 99768, 116580, 11114, 50083, 71750, 38765, 26763, 26895, 31093, 26106, 99244, 23315, 195234, 103007, 80697, + 26014, 69431, 24523, 14850, 16773, 129449, 83866, 113767, 123079, 183143, 1343, 35751, 41712, 7818, 21857, 75865, + 5719, 13588, 11322, 41995, 31516, 21912, 16746, 20696, 90427, 100022, 97349, 50603, 158540, 42138, 33822, 20310, + 85051, 198477, 100819, 31299, 183128, 37925, 83454, 48059, 40864, 109756, 117963, 246050, 27505, 125055, 6202, 12888, + 55392, 82049, 6852, 20486, 9058, 55998, 15942, 21876, 45224, 30137, 11302, 33518, 96857, 5033, 17578, 243172, + 30901, 1136, 98132, 67204, 136622, 53361, 185908, 164211, 96557, 1199, 46191, 6810, 56304, 16854, 41481, 31638, + 120061, 167078, 70451, 36778, 11501, 72634, 53232, 33096, 151448, 12676, 107140, 3255, 5773, 230373, 199725, 58707, + 89743, 159601, 29117, 51821, 7769, 175079, 179962, 14736, 86069, 12406, 35599, 12585, 2935, 122863, 21218, 92679, + 18471, 74106, 23743, 2268, 41628, 25025, 251009, 101461, 10114, 69681, 874, 844, 33660, 84276, 20996, 3116, + 110170, 3629, 33273, 374091, 49479, 7043, 8134, 1695, 26745, 1439, 1061, 171360, 92846, 117704, 95171, 30559, + 33221, 6627, 172996, 24530, 26731, 509, 15456, 63235, 18795, 30005, 53873, 51891, 87076, 62196, 32574, 96562, + 8550, 98665, 117502, 67674, 2100, 12527, 40235, 66878, 29972, 78874, 26467, 41590, 120289, 181416, 78604, 54157, + 3077, 84697, 134742, 91234, 72490, 15005, 76558, 55084, 33784, 162703, 6048, 46791, 2630, 127835, 19594, 122511, + 208722, 193416, 9502, 8107, 50861, 143793, 44636, 51976, 63483, 12325, 10412, 23264, 79029, 29050, 159857, 149078, + 6419, 154772, 107400, 107603, 39467, 13028, 84919, 63134, 14302, 158425, 87104, 88768, 45286, 22612, 34903, 13577, + 64207, 6221, 59147, 11798, 9686, 121962, 135449, 86848, 67513, 17167, 43511, 68844, 44170, 71147, 44786, 64366, + 1050, 10887, 190612, 21896, 77246, 77296, 70814, 135434, 59266, 18452, 133, 55042, 17055, 1640, 13034, 42496, + 53801, 5748, 52414, 66381, 7150, 144739, 6440, 74993, 11111, 2539, 50363, 23303, 42432, 27028, 66935, 13005, + 4278, 7311, 46716, 3338, 94579, 8115, 26937, 50962, 362117, 30782, 3762, 141892, 36175, 73088, 50180, 37005, + 42902, 253122, 113704, 91922, 41933, 43732, 105477, 3520, 39002, 3843, 42324, 258344, 98489, 29853, 56586, 11607, + 22913, 43149, 12984, 35738, 74161, 6039, 61803, 269, 84773, 58569, 22403, 44259, 57036, 31666, 126796, 12483, + 17556, 38761, 298166, 122446, 162288, 3950, 44945, 1370, 74485, 97973, 26528, 36641, 178760, 75233, 37361, 147382, + 93867, 98504, 161890, 33435, 73635, 18503, 26688, 55952, 128860, 76113, 36649, 15218, 50362, 50874, 136633, 104263, + 261, 187132, 5194, 41473, 67455, 26709, 46683, 61196, 80001, 415, 103032, 77008, 46080, 63776, 21671, 45605, + 35662, 12969, 32724, 41546, 4368, 25676, 78170, 10132, 25247, 21941, 10589, 88199, 19230, 36489, 23652, 71018, + 74393, 15514, 33003, 61628, 22588, 82874, 278, 656, 1822, 7365, 51787, 44718, 27682, 7842, 148545, 22113, + 235324, 53467, 25889, 37986, 13798, 8780, 14653, 79341, 85998, 58114, 38940, 70133, 13194, 10663, 186560, 72895, + 235067, 15731, 34281, 180158, 23514, 60239, 132955, 17621, 71669, 107863, 209492, 4929, 147632, 35364, 73172, 45463, + 23191, 35596, 21865, 59198, 134748, 84141, 128176, 15559, 214683, 7375, 153174, 69569, 105101, 54279, 191537, 11893, + 1518, 28125, 88836, 27303, 25489, 46180, 96736, 5887, 247114, 5137, 287773, 60728, 7380, 108022, 182042, 30064, + 54842, 72963, 28745, 42623, 26922, 16894, 8922, 6003, 3971, 130326, 30795, 15767, 26361, 58938, 27324, 20292, + 20844, 29628, 16534, 159213, 68642, 15346, 219023, 63240, 170517, 8331, 15673, 3213, 77339, 151668, 65928, 33858, + 123255, 106689, 30575, 26185, 8963, 12688, 15792, 24737, 77818, 92544, 7997, 20221, 150998, 55663, 1268, 41573, + 48466, 14085, 128978, 65797, 36806, 28519, 69465, 20974, 2732, 41172, 202748, 116152, 23261, 39001, 2280, 32931, + 11741, 66879, 195696, 31356, 236162, 62810, 25653, 37741, 18243, 31739, 43296, 15723, 126216, 75117, 27208, 74878, + 28690, 17377, 22841, 46221, 50546, 479, 9735, 5075, 16385, 17152, 9080, 33925, 92760, 24705, 35011, 52286, + 197383, 118668, 24200, 32927, 246558, 83210, 49673, 39479, 201295, 11697, 23650, 58791, 88255, 2117, 58010, 136860, + 67588, 5287, 34543, 6591, 71687, 95613, 48832, 64315, 176076, 18307, 105134, 12037, 172653, 140943, 36060, 3370, + 169058, 87901, 2424, 35703, 33906, 68007, 83459, 86267, 63747, 78729, 15829, 39429, 24835, 60607, 1063, 942, + 157621, 15510, 142744, 36875, 43338, 26941, 6283, 201368, 30050, 1294, 14144, 28874, 46152, 163373, 100423, 33959, + 132741, 10200, 30369, 5793, 2770, 40793, 66426, 145294, 51371, 9412, 47667, 53918, 94835, 47111, 93658, 291281, + 6614, 6818, 28373, 98899, 15112, 55868, 85946, 13126, 11749, 15201, 6184, 52292, 56936, 9994, 67564, 15398, + 1250, 16480, 28355, 50093, 19027, 134101, 912, 36390, 399017, 67061, 175796, 31206, 58036, 37028, 36592, 15922, + 100215, 155543, 7324, 4771, 23388, 157277, 186074, 20469, 55815, 15438, 73729, 36924, 308768, 3933, 6366, 20641, + 124152, 60772, 12026, 70045, 94803, 6290, 19858, 1915, 9521, 22497, 33912, 49717, 64186, 47263, 9814, 19866, + 8971, 350258, 314, 10683, 28, 6135, 16425, 48283, 30427, 224788, 96210, 41227, 62163, 9112, 237935, 8329, + 7616, 14660, 20925, 152205, 103838, 6480, 53909, 29003, 35079, 21715, 38510, 2096, 29203, 37569, 47676, 30859, + 131235, 66331, 56052, 67144, 7743, 65717, 38496, 26265, 17389, 72433, 5984, 42527, 10882, 140995, 248537, 4000, + 37420, 43361, 72768, 79706, 61460, 44601, 88348, 120824, 228512, 92578, 101207, 2506, 85363, 72057, 112263, 74889, + 41581, 61184, 59336, 124955, 131077, 388, 24445, 445574, 62822, 10339, 54594, 139384, 119647, 26960, 115230, 377822, + 10130, 53380, 25507, 4582, 54445, 4045, 113722, 79437, 26925, 51571, 10619, 37744, 19968, 21756, 62099, 38841, + 29016, 19474, 28660, 169417, 24446, 77906, 53823, 54729, 74028, 4315, 3444, 12379, 24176, 2062, 118391, 71991, + 61448, 24221, 58190, 114666, 67185, 84137, 1932, 38777, 9254, 63804, 23453, 23502, 8563, 53758, 17591, 83661, + 119129, 33378, 156031, 31341, 9771, 4905, 245, 10643, 99184, 71196, 20709, 250, 37716, 19394, 203310, 82339, + 39514, 27829, 5347, 68674, 10532, 102550, 189900, 41082, 221512, 57643, 21885, 60429, 258753, 28243, 26729, 38284, + 218630, 266776, 74708, 10059, 55980, 59074, 26095, 4002, 23394, 34908, 56295, 38826, 32141, 56657, 44390, 129016, + 61924, 77979, 141893, 16627, 66749, 173128, 78650, 84113, 32411, 36734, 83212, 22287, 3741, 109048, 15156, 33529, + 36475, 217436, 48727, 82121, 26678, 67771, 256285, 2700, 77010, 79442, 5038, 3136, 44946, 56358, 46209, 4267, + 91203, 9096, 96644, 19035, 128749, 10636, 6976, 205036, 116953, 56466, 63959, 18341, 20476, 42517, 7840, 100552, + 49625, 4375, 77579, 19118, 53116, 3012, 35805, 64719, 13735, 124583, 30702, 85109, 102335, 116046, 63278, 101038, + 29376, 131644, 18364, 4281, 51946, 89017, 31230, 164451, 83407, 14320, 34509, 23271, 67892, 72729, 37652, 77746, + 59212, 14913, 6854, 43898, 34685, 72734, 50838, 3371, 21083, 24922, 49503, 29227, 1546, 61493, 17037, 10316, + 112982, 4328, 38907, 93116, 32972, 99365, 223827, 37012, 74397, 3821, 103422, 35362, 1078, 29713, 94154, 55450, + 190545, 68894, 29500, 75558, 16082, 49117, 103414, 107471, 86140, 770, 35589, 44869, 58591, 17981, 10817, 9420, + 89611, 22016, 15994, 34959, 101531, 126914, 193257, 72721, 10061, 73572, 85338, 101867, 105104, 609, 98863, 73482, + 76319, 100600, 207540, 8308, 20035, 8093, 56554, 15585, 17551, 38570, 177750, 85937, 52611, 10767, 28909, 26249, + 169061, 139097, 59137, 254690, 190842, 27037, 47208, 1901, 100780, 278291, 22166, 32105, 23907, 107009, 147748, 23093, + 90413, 43974, 38278, 110542, 115619, 45653, 24331, 51759, 9675, 125197, 28009, 227009, 34710, 181128, 25798, 132667, + 193435, 41954, 44477, 110078, 49443, 28528, 66593, 13781, 129734, 5325, 109119, 17206, 11183, 17837, 41403, 199989, + 258877, 23595, 49436, 2482, 16318, 60636, 117129, 70004, 136182, 100062, 20218, 28137, 126808, 127896, 48962, 38967, + 44635, 13158, 93741, 10921, 27304, 68089, 142263, 18325, 192375, 147811, 36115, 47851, 2599, 12879, 123482, 145544, + 125648, 78600, 106709, 37509, 47051, 31245, 9380, 153218, 12091, 99206, 351089, 1706, 23814, 20083, 2942, 45798, + 721, 22708, 105601, 201509, 58800, 153251, 16149, 130340, 40137, 47023, 45551, 84104, 66726, 85042, 67373, 116656, + 97930, 21507, 18614, 49333, 60877, 118514, 56360, 10125, 74487, 128507, 90887, 17233, 7942, 46505, 12104, 513, + 54326, 57737, 60599, 113700, 9841, 11073, 24431, 42281, 41428, 3734, 51341, 225984, 13762, 7257, 11599, 104571, + 8211, 44012, 104316, 48008, 85383, 17867, 24242, 577, 6950, 151859, 2565, 40033, 99177, 174326, 186646, 2995, + 79806, 4196, 14521, 60729, 201786, 35248, 27115, 28097, 296464, 53923, 41708, 44679, 124087, 83378, 146584, 6497, + 13144, 70640, 20047, 27733, 29741, 53377, 153924, 19142, 41721, 171276, 66163, 88810, 47634, 5092, 38780, 86108, + 55088, 32716, 141186, 15641, 254286, 116055, 26764, 59396, 106408, 75258, 2560, 73860, 17041, 253752, 52211, 39488, + 99064, 95466, 64462, 11423, 12942, 41175, 93052, 29798, 64086, 46186, 33800, 33567, 45233, 66006, 7617, 49299, + 14005, 40955, 150448, 239881, 2612, 82651, 30016, 5178, 55827, 9423, 94272, 251540, 255, 30751, 103573, 11587, + 7984, 28977, 4978, 95968, 13980, 47836, 58308, 50268, 38574, 77347, 20931, 57083, 12776, 22503, 10, 4635, + 46654, 154112, 11869, 151047, 73499, 9650, 31746, 60983, 249951, 39416, 25878, 43811, 2101, 9653, 7416, 8737, + 26676, 92346, 181430, 83072, 25996, 158181, 85015, 37325, 132326, 48445, 2731, 75518, 116415, 209483, 32511, 38210, + 119062, 17333, 2785, 908, 50449, 214116, 161693, 5897, 31033, 187419, 60336, 5447, 23038, 9049, 23426, 57262, + 11589, 1592, 18499, 5286, 179252, 44973, 418, 77691, 20007, 18386, 42112, 52950, 14860, 1598, 187402, 62235, + 129270, 92667, 2326, 100310, 21143, 53140, 34792, 111283, 17796, 65259, 194012, 97011, 144715, 35840, 20371, 15935, + 60106, 189595, 22778, 41157, 70758, 50788, 46106, 29863, 69842, 86840, 30479, 14570, 34674, 67390, 15509, 71299, + 282133, 2275, 35835, 109932, 44014, 100391, 67192, 15948, 16774, 13637, 53829, 16317, 57268, 94004, 20544, 25822, + 38528, 40203, 28555, 97510, 24053, 21113, 6021, 47281, 46373, 2496, 116133, 176010, 201667, 28820, 53091, 166496, + 28327, 26507, 34663, 247773, 471023, 17682, 2427, 24715, 51889, 11389, 166917, 3466, 102667, 63097, 164910, 47310, + 21193, 150917, 3081, 121294, 114909, 56277, 57524, 64525, 84132, 17553, 63486, 76104, 69317, 55368, 502, 4853, + 96723, 70125, 25212, 69051, 67969, 36687, 75249, 1403, 16134, 580, 2956, 41676, 68145, 22459, 93435, 124068, + 15058, 46025, 62695, 17614, 28765, 189125, 1647, 15184, 32035, 23120, 137691, 51605, 2524, 74673, 6620, 207114, + 101089, 36259, 21019, 104217, 98664, 31074, 19082, 94463, 25045, 6564, 91038, 90673, 76571, 79552, 64302, 92382, + 14957, 61083, 144594, 201758, 86040, 109363, 266748, 12661, 118506, 125644, 159814, 57896, 262428, 108888, 87913, 33717, + 154764, 294744, 43549, 58731, 81573, 67852, 24804, 51538, 39681, 122957, 62858, 15248, 283900, 55535, 49196, 35328, + 73287, 114610, 61587, 16985, 127825, 28981, 37479, 9256, 544, 41344, 20620, 91193, 80448, 170849, 59318, 7633, + 52347, 121720, 45439, 11408, 38512, 20264, 4581, 36309, 175971, 26347, 10413, 16235, 15180, 35078, 30388, 152653, + 45467, 29969, 183795, 49439, 33086, 1929, 164867, 88587, 46552, 130665, 18076, 34437, 48894, 15770, 53144, 83762, + 81107, 66843, 19430, 136312, 43213, 23986, 22371, 51721, 36672, 73932, 85044, 11462, 54025, 63006, 70924, 28412, + 76703, 388659, 28510, 37525, 8053, 29403, 351574, 243678, 7608, 105640, 74981, 222745, 13299, 69352, 22764, 32848, + 56619, 140685, 29353, 106, 20752, 4501, 61795, 68153, 238099, 39552, 89245, 17454, 54164, 23662, 42008, 59724, + 105133, 53821, 26404, 115768, 1444, 16209, 287358, 17881, 32942, 78671, 61192, 56974, 953, 17778, 20882, 55194, + 37564, 73360, 211669, 11594, 8000, 109829, 67377, 21481, 66316, 204718, 32898, 37701, 119463, 6868, 32788, 5503, + 106817, 232653, 56662, 123157, 404, 44879, 169840, 19912, 13667, 10522, 13222, 180347, 149108, 31852, 19954, 1455, + 128597, 19388, 66139, 13463, 31267, 28564, 85407, 118622, 10269, 12637, 135119, 151455, 49836, 122605, 44182, 26588, + 106150, 14664, 171949, 1452, 1484, 40891, 43483, 32813, 52330, 160046, 414611, 4668, 76965, 52847, 285294, 29777, + 160486, 19187, 64830, 245534, 171648, 8708, 16151, 96632, 38456, 197248, 3824, 13111, 31263, 5534, 22810, 94095, + 22424, 5060, 6994, 76043, 37738, 54013, 153414, 28274, 66245, 103049, 7220, 15850, 67467, 48469, 60783, 177423, + 143369, 15480, 20191, 1782, 60471, 187319, 90210, 9498, 75610, 1006, 245177, 1892, 20895, 6738, 21020, 52235, + 115528, 104750, 54596, 6369, 86070, 14562, 167100, 84334, 60854, 23828, 51465, 49525, 40796, 89711, 108733, 53141, + 49347, 11699, 22079, 52616, 18989, 60426, 6070, 1322, 15030, 77286, 28845, 5836, 11371, 49753, 49923, 40348, + 37578, 73337, 2788, 68945, 15779, 203365, 40093, 11808, 79867, 81426, 46442, 9689, 10187, 52258, 15730, 33729, + 86462, 49418, 30284, 16818, 46402, 43558, 19285, 95141, 155626, 31136, 296724, 58803, 93200, 46488, 332562, 48870, + 40229, 30569, 5173, 69228, 7090, 28830, 105171, 66711, 57547, 57695, 42695, 76635, 108053, 24676, 92847, 18249, + 99598, 51389, 17912, 84688, 11088, 33411, 178627, 569, 47505, 18773, 121108, 7263, 41218, 129818, 35668, 32165, + 206017, 146881, 10066, 21894, 2173, 12265, 27741, 23761, 20988, 8052, 179620, 44251, 30219, 107113, 49515, 5809, + 22919, 43643, 10121, 20448, 80563, 119663, 169374, 59245, 57566, 90682, 12457, 225388, 42369, 203562, 11662, 128551, + 93141, 84259, 24761, 94597, 41675, 122505, 212284, 48603, 2407, 9599, 7883, 24703, 182519, 107518, 90911, 22385, + 120495, 22791, 32676, 56812, 27154, 24521, 13655, 41800, 16702, 262168, 63509, 14150, 29456, 135382, 45733, 66046, + 14349, 2518, 233250, 50438, 7958, 21556, 8312, 32247, 16688, 7974, 4721, 4342, 117177, 13427, 43940, 123614, + 140375, 20924, 42414, 505, 42467, 36757, 55097, 32118, 261919, 34892, 58385, 134010, 74916, 2566, 138977, 120089, + 153569, 42388, 97409, 75482, 10836, 123, 5341, 33838, 34742, 48578, 76395, 92995, 49526, 37105, 106505, 72144, + 7621, 24215, 152644, 48127, 105997, 73105, 87109, 52037, 12212, 625, 111988, 112734, 2270, 76628, 35699, 44168, + 392377, 67240, 91475, 67254, 7755, 119314, 9723, 6967, 17959, 185692, 25707, 36302, 25086, 109996, 7225, 112068, + 232152, 122120, 101654, 13640, 138791, 16408, 39845, 8399, 33847, 12887, 152461, 34536, 13860, 12517, 180090, 169472, + 35316, 3208, 52910, 286726, 5811, 60049, 6687, 6745, 1344, 108692, 23669, 20503, 71259, 58644, 186034, 23770, + 50452, 17374, 5900, 712, 207539, 154425, 93220, 54448, 92635, 125802, 14285, 77361, 50359, 69288, 133264, 162621, + 5821, 93205, 28457, 129771, 33674, 8402, 51971, 38768, 30255, 195827, 18512, 68308, 2086, 8475, 44179, 212, + 2587, 255482, 11233, 42032, 96264, 234156, 71743, 9619, 17543, 9966, 59340, 53, 42, 51576, 68365, 150251, + 6029, 116729, 63303, 1303, 9580, 56310, 126033, 11299, 43007, 25304, 11348, 2202, 139248, 211176, 10147, 4290, + 82831, 107660, 57933, 177074, 12917, 54254, 36738, 72091, 29607, 42295, 47993, 166376, 25786, 73979, 352922, 17657, + 51467, 73749, 5917, 82140, 42137, 39138, 697, 49880, 85161, 40070, 149172, 172144, 100698, 83192, 48718, 29859, + 31561, 21429, 53401, 29518, 88989, 43651, 46656, 32160, 121990, 32912, 74292, 57977, 278500, 63671, 75205, 23517, + 3602, 60467, 33461, 137178, 109344, 49843, 1353, 103161, 37982, 43271, 19531, 62950, 15279, 34216, 34547, 113009, + 116442, 189404, 140865, 134948, 28936, 38460, 59707, 136053, 30880, 128067, 49530, 48855, 87894, 16331, 15771, 63989, + 58079, 104481, 125524, 14569, 128661, 25492, 365675, 116367, 126731, 94516, 122818, 30710, 67392, 52767, 2196, 47261, + 28051, 49914, 333288, 29945, 146885, 100058, 31013, 158363, 4861, 1817, 42266, 21215, 16216, 4256, 54248, 112813, + 97344, 128078, 30238, 120987, 42827, 6923, 14989, 69805, 147561, 47842, 51853, 2647, 153948, 13103, 39122, 18142, + 22684, 76687, 15882, 92285, 21335, 29519, 3993, 86408, 47685, 39612, 24929, 19453, 1853, 134405, 114177, 25894, + 43349, 26803, 12267, 92165, 15185, 61540, 9990, 69281, 59642, 76734, 309690, 136935, 10229, 92038, 49815, 104501, + 25520, 66774, 32406, 37445, 187921, 81418, 18633, 84262, 108972, 32019, 103853, 41207, 5579, 45804, 210683, 27613, + 98037, 39566, 18876, 154815, 24945, 108917, 31510, 38406, 6697, 20809, 29164, 106328, 19193, 8247, 16805, 3543, + 63734, 213048, 201574, 22433, 137934, 31798, 217223, 2939, 75056, 140267, 99972, 3047, 89740, 22878, 4763, 62402, + 19767, 110374, 49959, 24684, 224268, 106487, 32793, 8178, 56138, 27795, 3080, 77954, 63643, 24857, 121435, 175431, + 151661, 102435, 15023, 177670, 39313, 17174, 24416, 12895, 70618, 46646, 17001, 27902, 84031, 58519, 21749, 50823, + 89723, 59027, 57596, 61596, 84074, 33007, 8029, 24120, 13703, 108284, 63542, 58816, 85626, 83071, 91820, 14146, + 35460, 124390, 61351, 8006, 8867, 11495, 4529, 43870, 64845, 13482, 73015, 24763, 3439, 9485, 79856, 23851, + 57906, 220428, 88667, 80708, 99776, 38036, 39933, 208871, 63968, 30726, 291083, 68, 49270, 106842, 112123, 27384, + 81130, 110097, 118834, 241402, 34356, 13923, 23897, 40492, 16210, 71957, 62441, 58550, 23547, 13636, 20131, 42294, + 36446, 81802, 1100, 142364, 34090, 61710, 9270, 107601, 140028, 39980, 1414, 320109, 72439, 66107, 14862, 134653, + 2221, 1149, 9546, 36018, 22163, 35318, 143604, 19080, 57058, 48579, 2621, 55599, 363492, 110403, 14828, 57857, + 113754, 25759, 29811, 61553, 18913, 107232, 5290, 75792, 95451, 70056, 214553, 3329, 48663, 24095, 11961, 96108, + 54464, 155383, 53360, 112141, 54037, 49177, 57901, 67842, 176097, 123321, 6506, 228274, 68425, 4036, 160696, 23121, + 3023, 30678, 64279, 90792, 34906, 65080, 9259, 58549, 29482, 27140, 216012, 23499, 117389, 49482, 25665, 100543, + 341780, 54232, 60358, 235308, 80431, 37334, 14300, 53910, 58330, 29194, 117489, 59804, 16753, 37401, 37127, 35030, + 92616, 62680, 44495, 8116, 60907, 43835, 168603, 37896, 94846, 842, 40856, 25319, 147486, 395164, 90387, 68791, + 4498, 25599, 15543, 116574, 48646, 254235, 132631, 3917, 7773, 30355, 18277, 60008, 46801, 74243, 4222, 85032, + 7778, 17592, 14912, 22293, 18946, 6094, 46, 29454, 464978, 48886, 97248, 14694, 47558, 169023, 3388, 127473, + 33223, 22400, 144764, 181865, 177444, 13371, 44931, 27593, 7328, 194219, 91202, 3836, 15626, 22427, 52166, 39152, + 63337, 7531, 59378, 193696, 94700, 27634, 40257, 41337, 11743, 257393, 217307, 346548, 9351, 73104, 41502, 1488, + 255024, 105660, 39615, 20814, 39098, 149478, 69081, 19993, 16447, 55270, 37583, 19645, 42647, 14979, 8926, 28968, + 96230, 49277, 22527, 34250, 39769, 81745, 50791, 18698, 58840, 44616, 70138, 6720, 10068, 38140, 5653, 99473, + 63439, 3743, 19237, 163704, 35800, 1626, 33560, 38455, 65843, 158617, 28684, 92983, 58823, 71795, 71233, 1075, + 413844, 42288, 157276, 38514, 9156, 131335, 59762, 40948, 51258, 46584, 9950, 55371, 7434, 2577, 42703, 1693, + 61791, 27603, 63320, 25608, 85018, 30872, 100002, 36167, 6872, 2669, 51250, 778, 3692, 10451, 28383, 163025, + 28096, 44948, 19074, 128798, 7121, 36683, 2203, 17586, 33024, 70070, 348622, 5061, 6009, 23593, 42442, 28013, + 75532, 94062, 64585, 284254, 31997, 89645, 102394, 31393, 192535, 48721, 71088, 128192, 9661, 61738, 34411, 50069, + 3304, 16352, 53075, 45568, 9547, 42732, 1178, 93157, 14753, 88072, 51599, 88701, 31987, 23387, 63847, 44965, + 25314, 47565, 7560, 2438, 55689, 1314, 346, 23289, 15896, 475529, 112925, 131467, 20430, 150168, 2504, 17375, + 39472, 54601, 34817, 12000, 31340, 27414, 5063, 41639, 99744, 6404, 117189, 259172, 25398, 35063, 46527, 96170, + 115569, 8068, 179160, 161042, 54883, 97999, 36646, 8523, 28719, 11447, 6735, 26129, 205423, 83805, 44478, 94354, + 23071, 9474, 27662, 132536, 57855, 155315, 195915, 61922, 64638, 69412, 89700, 153852, 149867, 22483, 25631, 4401, + 25671, 191634, 58296, 7593, 82403, 23703, 17554, 61290, 37616, 211689, 4980, 2922, 20668, 148622, 109058, 2724, + 39989, 54579, 389750, 94744, 77996, 131928, 41416, 77516, 74948, 105981, 7862, 49124, 140555, 58696, 4033, 57560, + 175248, 201147, 43956, 80013, 64810, 82504, 14552, 11127, 36515, 10704, 23006, 45490, 46595, 111926, 16970, 31954, + 4958, 113746, 35379, 27153, 248773, 34760, 166030, 69750, 24045, 70012, 121173, 53304, 28728, 9870, 156097, 134089, + 136673, 71920, 25774, 2488, 168704, 5343, 127631, 74486, 20804, 188876, 26283, 102354, 114833, 476, 53497, 38795, + 100325, 26879, 18226, 1066, 27135, 41772, 14104, 58513, 21205, 5221, 84659, 49948, 96151, 18525, 149506, 51579, + 153134, 107909, 85993, 35590, 45992, 15182, 68394, 22750, 7093, 6602, 26954, 2528, 13992, 8645, 3748, 38754, + 76047, 16039, 28854, 52143, 1980, 22387, 6152, 255879, 19432, 56677, 64082, 99361, 145001, 56506, 42169, 13125, + 75159, 24500, 41901, 21053, 87462, 109469, 103771, 55888, 17710, 31989, 233429, 5318, 1013, 119131, 13220, 94790, + 45556, 27216, 5013, 108338, 34297, 51598, 16968, 224489, 144882, 29596, 70103, 32634, 20648, 23171, 115640, 2381, + 26061, 129018, 59090, 67066, 11319, 1052, 66080, 134106, 129567, 36464, 198632, 6394, 108555, 342064, 340, 57976, + 18872, 21980, 39272, 117475, 464580, 20395, 93823, 156783, 33386, 22005, 34188, 504700, 22717, 50887, 196433, 44491, + 65948, 106413, 3639, 94733, 167189, 37296, 49229, 1697, 5603, 70017, 72359, 61123, 135042, 93369, 6109, 45001, + 79542, 96019, 54203, 50884, 8801, 68912, 114197, 59072, 202632, 47922, 8431, 242124, 18114, 54405, 129410, 6472, + 91882, 124518, 39386, 91470, 5973, 31594, 93512, 401, 5239, 5661, 24933, 37492, 67315, 15503, 24586, 447, + 4431, 98481, 20358, 144946, 60916, 297453, 66825, 30645, 47819, 105167, 552, 87909, 71693, 40566, 5307, 32293, + 32597, 12315, 4634, 118577, 32606, 74622, 13999, 1446, 18183, 5010, 92389, 27675, 45072, 186756, 72549, 62625, + 80329, 3174, 188490, 17768, 76385, 56061, 44774, 4792, 24749, 6756, 29971, 24565, 51305, 2866, 185714, 7372, + 40314, 131257, 46345, 142745, 156514, 10853, 14992, 9306, 14693, 140671, 18567, 166507, 130345, 6503, 52141, 7521, + 13168, 8694, 14811, 40576, 66214, 114434, 97632, 88033, 18029, 21365, 15834, 397881, 12858, 6804, 73691, 171818, + 34801, 11558, 167427, 172844, 27628, 109803, 44373, 61609, 14544, 8723, 7897, 26839, 10823, 38501, 189122, 32876, + 40522, 18836, 231040, 28016, 40185, 9487, 60378, 40240, 33739, 35931, 69716, 16764, 148694, 148116, 26429, 90031, + 23548, 130862, 153367, 10154, 9923, 25899, 86890, 187712, 61012, 106844, 119164, 108121, 28859, 151900, 43746, 70054, + 17933, 46633, 32051, 40306, 19442, 73866, 51802, 202389, 34364, 59031, 39109, 86049, 99849, 27312, 354059, 431, + 164107, 160825, 29370, 26855, 141167, 209995, 47475, 25126, 30629, 112486, 16641, 31932, 21054, 13503, 62291, 8461, + 6744, 25340, 5056, 190589, 36491, 1498, 102273, 136482, 8096, 46702, 98246, 56502, 42474, 9181, 111985, 43767, + 41706, 30774, 3932, 26549, 155060, 66159, 102266, 53051, 30650, 208931, 3598, 31618, 10600, 67535, 135897, 87806, + 163442, 104978, 10409, 139772, 1143, 40979, 7330, 98219, 96655, 131263, 25023, 114039, 61390, 192001, 15973, 35549, + 52359, 902, 12202, 5580, 7559, 52829, 36364, 11107, 51568, 3787, 4394, 31819, 64256, 1505, 29813, 365608, + 203854, 33802, 39839, 47786, 4467, 50956, 226690, 12884, 22453, 47648, 16676, 45252, 14504, 2855, 18627, 541, + 436398, 14538, 2406, 20, 7878, 60282, 10602, 109448, 6980, 70267, 22616, 27176, 8293, 85130, 294480, 30144, + 63610, 187294, 289665, 163077, 293747, 55641, 995, 86282, 16167, 131142, 7732, 139426, 35763, 21669, 81048, 1053, + 19627, 16183, 153848, 41955, 147603, 49219, 127527, 60498, 15419, 62976, 59946, 18598, 18032, 16576, 207, 4670, + 110744, 11552, 9989, 2349, 51346, 15073, 25998, 160678, 33681, 220089, 68035, 65033, 54571, 77929, 12230, 88125, + 40472, 148399, 62247, 44687, 48615, 158618, 103484, 11572, 39073, 41233, 3610, 86331, 21604, 36776, 83989, 518, + 13754, 34617, 179678, 35290, 173027, 43237, 66547, 59016, 92560, 12741, 157332, 29334, 11083, 67849, 24492, 90041, + 47299, 109304, 10326, 20058, 63062, 46195, 31632, 9568, 11813, 949, 131768, 139099, 52007, 9458, 46429, 12293, + 29883, 97116, 3732, 32343, 9734, 20328, 4732, 83588, 139722, 11257, 49471, 2051, 15953, 233007, 15439, 88041, + 1550, 78033, 39910, 56576, 20651, 32790, 66091, 16869, 13616, 226368, 19098, 20124, 49306, 274210, 41089, 39818, + 16113, 202390, 49166, 5280, 90089, 148031, 55043, 2264, 92326, 62595, 168341, 67080, 7584, 39228, 2679, 31454, + 30712, 21771, 49469, 8092, 72424, 14892, 94819, 370101, 164858, 14108, 16628, 34424, 6831, 26672, 13360, 10293, + 152871, 13708, 152221, 56275, 55746, 3003, 189905, 73541, 197721, 19461, 138468, 38166, 34167, 86972, 78519, 126458, + 196442, 22647, 131900, 30322, 6022, 31039, 95120, 35519, 112107, 2704, 104049, 7805, 55215, 99039, 8898, 61822, + 7538, 79147, 8674, 19781, 123381, 122030, 61080, 29510, 4920, 252926, 24948, 29594, 43539, 79504, 36116, 27926, + 77165, 119791, 10396, 47075, 8939, 65089, 91291, 49470, 50392, 130812, 24665, 5396, 34192, 146915, 55, 32388, + 20225, 170176, 24246, 18217, 79762, 97481, 187002, 170504, 22505, 166717, 11581, 22954, 58667, 24092, 24239, 34967, + 40770, 168985, 20697, 10796, 29788, 36609, 33121, 48586, 97180, 70956, 4247, 10919, 82835, 29387, 24795, 134813, + 4568, 41932, 107494, 12409, 8579, 7615, 78083, 27482, 13273, 222151, 109832, 56337, 363569, 100711, 21692, 74289, + 35898, 156666, 112372, 33193, 49983, 165146, 13906, 30221, 436, 23307, 161876, 16834, 36598, 80261, 40181, 489, + 3237, 17307, 33708, 68069, 131691, 47411, 142213, 17996, 62418, 20656, 40859, 30297, 35591, 115572, 96762, 34638, + 8101, 100105, 87872, 93118, 4073, 13106, 53663, 14555, 379438, 12544, 34665, 144134, 65218, 83887, 41458, 1700, + 76072, 7062, 45362, 51519, 33887, 113928, 230002, 145590, 2968, 109731, 69584, 145887, 27573, 34080, 696, 54442, + 212619, 61698, 42014, 1469, 288680, 91524, 69494, 176890, 68278, 36380, 91390, 73061, 72851, 136365, 18061, 126629, + 150504, 108159, 73403, 20532, 217896, 18800, 83394, 3780, 6913, 42351, 72130, 124219, 121339, 338937, 19687, 8446, + 22017, 13873, 48885, 120125, 35340, 27891, 4562, 52291, 51072, 5972, 97159, 14055, 43616, 105781, 67483, 207916, + 75043, 12256, 28487, 7209, 31437, 59474, 13217, 149676, 10833, 46754, 7502, 32640, 81487, 26299, 56642, 3989, + 4364, 2409, 1896, 58704, 22968, 42546, 57069, 47889, 41454, 136134, 46051, 102015, 106687, 15526, 254717, 58, + 85446, 14369, 99446, 71688, 19863, 126847, 291582, 51244, 109625, 70818, 1547, 189380, 149241, 28615, 6289, 179303, + 524, 62440, 6853, 175754, 141850, 162709, 4217, 140213, 214404, 32835, 370939, 250072, 54376, 228761, 71916, 144701, + 657, 89940, 17521, 80160, 237023, 148575, 164257, 272527, 9401, 198903, 24729, 17703, 108137, 43135, 48966, 56162, + 53800, 36151, 13173, 1783, 32474, 18864, 70754, 46888, 49712, 30038, 58553, 64793, 53334, 174049, 42965, 84561, + 126876, 70090, 16520, 63753, 27337, 69921, 58122, 69010, 45552, 33142, 1092, 120910, 177696, 3676, 16059, 23396, + 8269, 22160, 9571, 34657, 15036, 46764, 37354, 25445, 12097, 63888, 48103, 145, 42240, 80858, 105547, 28234, + 2328, 51188, 12063, 12469, 125374, 98182, 171585, 129756, 119295, 23533, 25395, 181401, 99715, 107908, 42579, 37609, + 2500, 59133, 67194, 46635, 19624, 31959, 24153, 277972, 39441, 105587, 56371, 24069, 27220, 18122, 50693, 3846, + 102691, 55065, 140440, 293, 60957, 118436, 1340, 17314, 94543, 71522, 9010, 49481, 39101, 30757, 52442, 3349, + 18566, 55681, 6148, 49861, 67362, 29473, 16424, 51773, 13975, 16105, 153263, 53902, 78230, 197042, 15803, 187130, + 25017, 6214, 105388, 38599, 34017, 9107, 660, 114778, 239007, 212872, 16230, 195154, 90027, 38987, 248, 60897, + 39351, 34856, 31011, 21775, 41681, 1559, 85670, 6103, 35354, 83280, 187563, 5745, 43822, 13397, 20816, 140079, + 1043, 6348, 13019, 188905, 916, 83185, 13921, 197369, 58587, 308353, 44852, 37817, 141983, 32764, 68581, 40892, + 94818, 6526, 46289, 37353, 38799, 65245, 127045, 12280, 75459, 107508, 56307, 93576, 41114, 92631, 22742, 68224, + 67432, 122795, 2131, 30261, 16195, 71686, 80872, 19067, 36606, 55415, 51055, 65943, 59568, 48358, 40947, 230410, + 22272, 116297, 133612, 74166, 126769, 58783, 115647, 39171, 31424, 59980, 6420, 75687, 68659, 22219, 19662, 51609, + 12287, 7887, 94526, 61885, 134302, 46006, 92537, 80123, 257977, 126663, 55154, 71071, 5756, 38621, 29511, 61768, + 207285, 85526, 35878, 1517, 95637, 40711, 214057, 75041, 47248, 72951, 22699, 85378, 117689, 4729, 158936, 22518, + 19583, 25056, 17451, 43230, 77451, 141822, 2028, 7801, 22373, 4034, 75301, 60991, 12200, 59589, 123234, 17449, + 54993, 3264, 16430, 33128, 117118, 56124, 178609, 12642, 34244, 236200, 43665, 19313, 29386, 45091, 42098, 10042, + 34562, 71330, 29635, 50068, 53819, 124237, 44714, 32804, 71267, 130300, 48998, 56578, 64172, 172768, 50075, 17351, + 77665, 85602, 1594, 81728, 49368, 46606, 19775, 75183, 7716, 32889, 26648, 13436, 59301, 29561, 77044, 108652, + 25749, 26512, 343982, 16328, 45426, 53772, 84254, 67097, 194789, 61224, 17035, 160685, 17297, 202215, 135406, 118341, + 2650, 2712, 165122, 39668, 1766, 97847, 41583, 64750, 32501, 260547, 28864, 64103, 45198, 19516, 1158, 166912, + 20403, 34027, 10963, 16141, 20984, 163663, 185362, 27299, 6600, 243594, 45496, 154199, 14171, 53891, 52940, 101642, + 94604, 7963, 104592, 152606, 19037, 11118, 25808, 54515, 5402, 42084, 147184, 18390, 29896, 164225, 162873, 40466, + 9938, 54801, 70146, 66759, 59935, 43540, 58676, 69171, 109708, 38543, 32207, 46591, 88081, 20140, 41767, 101298, + 145182, 39899, 12204, 21085, 44844, 32313, 226062, 13138, 39167, 7649, 21294, 19544, 352626, 42947, 112978, 162137, + 164173, 121993, 17813, 6102, 35374, 5269, 42206, 30800, 45982, 22982, 36251, 17144, 6122, 8671, 8084, 272404, + 154, 122768, 12006, 76527, 73419, 69325, 105807, 9495, 220487, 29197, 89056, 160446, 53834, 197550, 37292, 117751, + 53601, 24091, 108269, 72650, 17992, 118251, 13578, 64227, 8609, 97876, 56750, 36113, 229321, 150223, 85160, 26383, + 5610, 88738, 33839, 35306, 68098, 12374, 121473, 27197, 66815, 63716, 10127, 10388, 71012, 155117, 10660, 38130, + 95069, 200906, 56997, 10546, 140968, 26164, 58789, 80414, 27396, 29337, 17319, 78747, 8957, 43718, 57739, 8704, + 134489, 9251, 14262, 40583, 24656, 39133, 5306, 43837, 86659, 164677, 194782, 27468, 56598, 41406, 95731, 17647, + 134852, 11972, 71605, 77846, 17316, 34195, 24465, 42471, 123838, 4286, 11465, 5223, 255436, 106016, 15363, 133653, + 6613, 57615, 21482, 5929, 41610, 5528, 159163, 20266, 138033, 2783, 48074, 249145, 81452, 57741, 38155, 31191, + 32023, 131830, 8712, 116513, 32396, 160702, 187621, 166002, 123687, 12, 62689, 145928, 63398, 18560, 86346, 150231, + 8693, 5478, 54663, 56869, 29712, 20471, 322015, 164692, 30407, 52016, 160121, 22929, 19296, 52881, 60340, 71650, + 121188, 31059, 10424, 72973, 3551, 30412, 44737, 172383, 36099, 243424, 5274, 49999, 20032, 79415, 43567, 95143, + 111948, 20318, 17729, 101737, 56624, 96891, 161576, 14956, 16547, 135980, 59262, 77152, 27453, 6123, 35571, 43380, + 35916, 62277, 21785, 53693, 15378, 108237, 63, 2276, 52039, 70272, 78694, 41537, 56849, 116796, 14411, 20761, + 13489, 233058, 9422, 23296, 22214, 27805, 167552, 26532, 73177, 43781, 1976, 47479, 53097, 70358, 25233, 10202, + 277349, 32720, 23465, 45782, 2157, 75011, 99414, 46797, 14029, 331188, 26634, 25912, 187886, 51411, 142415, 54672, + 10260, 67364, 68176, 84898, 141743, 32203, 8882, 16414, 246460, 67826, 1065, 38386, 91880, 168610, 5162, 41010, + 50869, 14162, 7962, 335266, 3788, 18011, 86185, 14140, 49486, 66814, 124474, 12893, 133566, 255655, 79151, 46849, + 54950, 40987, 113502, 4653, 33120, 1563, 160382, 117713, 129337, 309186, 18171, 10889, 53768, 44858, 38544, 36763, + 18333, 15858, 58971, 6477, 9525, 8535, 14726, 14096, 26902, 170756, 28405, 233366, 312251, 51708, 14127, 19199, + 10297, 110312, 48460, 646, 9020, 40769, 83604, 51716, 70759, 2649, 59125, 55621, 16647, 2952, 10961, 74126, + 112432, 43916, 267460, 5120, 59260, 28040, 31308, 16545, 84609, 47186, 40537, 205682, 9818, 19650, 93983, 42181, + 82766, 50191, 13339, 114720, 73569, 23501, 5541, 66254, 468, 17966, 5125, 81538, 46001, 88315, 134477, 4042, + 75780, 17161, 37372, 9273, 55028, 52868, 48506, 197660, 52106, 1678, 131509, 88997, 11498, 229161, 99808, 17550, + 43645, 124582, 219145, 8184, 108069, 70061, 175724, 99312, 17150, 2838, 7073, 156152, 17753, 49092, 16803, 1821, + 29417, 92090, 23379, 66219, 16705, 25405, 141529, 27280, 31799, 6767, 12496, 46640, 9606, 10300, 33865, 90498, + 289, 141972, 28645, 1755, 122254, 36574, 145200, 57778, 115975, 15433, 1941, 4099, 8620, 50560, 123303, 55676, + 6133, 5443, 25678, 28512, 255357, 14348, 122676, 93720, 56908, 9978, 32758, 60073, 14456, 30325, 74179, 182377, + 133464, 124701, 18020, 32177, 43554, 808, 19883, 16600, 79224, 7238, 18109, 28556, 11247, 50684, 94823, 7729, + 29630, 27895, 43494, 66615, 160, 75616, 204393, 4150, 12756, 120948, 108425, 9998, 25464, 61334, 213823, 15423, + 65960, 63934, 87262, 84230, 350428, 96963, 99319, 27630, 62521, 82558, 7456, 70035, 321796, 22677, 117013, 180582, + 100359, 79812, 34557, 287830, 67358, 14176, 80683, 114848, 35169, 90997, 1447, 22600, 46172, 146596, 10923, 103084, + 113128, 53346, 226456, 59683, 48988, 21632, 90741, 80771, 88868, 89090, 59673, 44207, 31094, 81602, 72782, 32997, + 33266, 124468, 127301, 33848, 6847, 2940, 167663, 1154, 60887, 4791, 68165, 51588, 98188, 27452, 53523, 3630, + 49659, 31844, 716, 23618, 69117, 101601, 4697, 29366, 92977, 133129, 100459, 35256, 220228, 220740, 11194, 50122, + 13947, 1305, 2379, 119210, 80181, 112061, 18955, 53969, 35103, 28242, 18281, 26482, 62170, 23125, 22627, 17903, + 97351, 70139, 14931, 69751, 13475, 194213, 6823, 66651, 2440, 3123, 124201, 127058, 199768, 273513, 29218, 168746, + 19498, 30628, 254726, 18151, 36597, 16458, 114447, 3813, 46971, 184066, 132731, 85793, 25234, 113561, 20977, 87033, + 67806, 81570, 82077, 83128, 62881, 16590, 59929, 31721, 84717, 54839, 152353, 27946, 73648, 1152, 51494, 25166, + 181966, 18536, 35859, 21096, 10488, 5434, 87296, 116782, 94149, 20100, 42748, 119284, 21550, 80954, 161142, 3281, + 26655, 56068, 31234, 68973, 63436, 197146, 77802, 53836, 48375, 31390, 138097, 215755, 14405, 14690, 48482, 192674, + 165650, 4356, 6779, 90318, 9621, 53563, 21892, 11380, 24439, 27988, 65408, 32100, 28043, 30121, 124, 52304, + 42735, 36882, 47875, 40915, 4490, 1857, 64523, 63890, 29963, 3265, 24732, 6558, 56674, 255187, 78937, 55716, + 45373, 202097, 105143, 40496, 1934, 50343, 10400, 93193, 262446, 123174, 33291, 88639, 50855, 19733, 11387, 78609, + 67098, 33565, 79076, 71724, 26898, 68956, 47175, 78105, 5261, 194162, 6861, 11334, 52696, 3195, 1099, 854, + 40644, 42446, 51986, 165826, 33900, 14512, 8567, 107082, 9440, 96468, 48368, 15017, 180286, 38407, 11266, 27073, + 87162, 25059, 1767, 90124, 22940, 50038, 4456, 79274, 19704, 269589, 3740, 24611, 26936, 118228, 122759, 44861, + 69769, 8268, 21928, 1448, 10254, 25662, 37572, 15808, 101759, 47818, 56338, 32066, 27406, 61598, 102489, 68037, + 12243, 45731, 6222, 13525, 48000, 97528, 22882, 28821, 73926, 12033, 35515, 19990, 113215, 45359, 13095, 69110, + 54935, 144153, 32952, 39972, 5726, 20322, 27148, 119607, 192787, 10814, 127655, 29129, 4312, 11899, 293735, 47721, + 106216, 47945, 13663, 4293, 9366, 4600, 36217, 51600, 11550, 30486, 35147, 4378, 52949, 225366, 876, 56535, + 23457, 10620, 14352, 63024, 212271, 53386, 55283, 2154, 277152, 6832, 58247, 34965, 133895, 60302, 8020, 17598, + 108374, 41827, 77422, 41356, 6191, 78382, 44389, 79737, 96477, 57997, 36253, 168231, 29980, 58643, 13506, 77777, + 218916, 163459, 37836, 70135, 58024, 40795, 89998, 95793, 54696, 46896, 3850, 14959, 40853, 50010, 53886, 103929, + 91124, 21842, 109259, 112031, 65894, 24294, 11400, 75618, 91170, 52085, 77528, 106068, 65908, 36186, 196059, 70011, + 252552, 674, 93814, 79169, 6793, 31343, 87518, 50063, 29212, 56507, 62602, 24490, 15389, 130371, 20806, 17839, + 44516, 4956, 102925, 118742, 122515, 17602, 47643, 17175, 52617, 34827, 384, 128737, 35058, 16456, 4055, 91444, + 9017, 27903, 32324, 74054, 103536, 349949, 23135, 91177, 39510, 20237, 139249, 107742, 49136, 161940, 10176, 4296, + 19242, 19236, 38664, 13941, 130652, 63883, 181786, 74033, 662077, 40517, 51656, 4092, 74699, 174254, 30240, 249851, + 47024, 124719, 88983, 17979, 31422, 88107, 12752, 18046, 8517, 112048, 15131, 61643, 73351, 4553, 10608, 181387, + 24399, 17507, 26238, 34094, 13867, 45419, 28560, 23320, 128360, 95692, 140246, 250559, 4810, 17968, 25372, 235183, + 4434, 11316, 6759, 113457, 61779, 50021, 20556, 133305, 111983, 259709, 231509, 141441, 61036, 58891, 28950, 14898, + 17798, 35773, 7261, 450465, 110240, 66004, 161650, 164984, 59722, 17874, 41866, 39325, 102960, 36234, 10606, 25254, + 39688, 16397, 879, 188946, 10001, 46267, 109745, 88992, 23803, 12899, 109186, 223568, 23039, 16254, 20592, 126376, + 176498, 68200, 93812, 5609, 56659, 71490, 16814, 75820, 44814, 26002, 31909, 11613, 134295, 51635, 17304, 5479, + 17188, 72639, 166564, 60617, 77577, 9173, 51736, 125261, 74466, 141449, 33396, 52135, 226175, 206041, 16540, 2241, + 102472, 15065, 11417, 44369, 154333, 39439, 21371, 35696, 63900, 86098, 215585, 10637, 111747, 26520, 35829, 5072, + 18062, 38762, 86113, 33683, 41171, 51676, 206735, 11386, 79669, 104994, 174586, 84969, 32773, 6701, 65682, 16472, + 408933, 62302, 88447, 143840, 42562, 29889, 168822, 199833, 28931, 31217, 94805, 6702, 30907, 53329, 73464, 80367, + 107388, 92999, 83741, 56375, 43487, 94239, 54863, 13740, 2946, 15038, 117251, 65511, 240310, 36372, 2795, 110090, + 23938, 154352, 180646, 13562, 24354, 38003, 14983, 27192, 319, 49724, 68544, 92943, 184983, 39339, 36199, 161825, + 7927, 16738, 7599, 1393, 6488, 53031, 27832, 35812, 1422, 77769, 52152, 9393, 10790, 70529, 103117, 58677, + 68809, 142754, 214789, 212425, 68209, 24340, 33236, 124155, 64775, 336, 120720, 43770, 4361, 63444, 9512, 52337, + 202, 37869, 58071, 28602, 17123, 124940, 64579, 79394, 59634, 16838, 71347, 33171, 51200, 72048, 194123, 84312, + 44391, 184338, 30592, 49986, 18188, 72135, 53498, 57477, 17843, 74498, 12560, 37524, 2619, 153428, 26875, 24918, + 74278, 49884, 44432, 39983, 3230, 39257, 81646, 26616, 9540, 23710, 69802, 52778, 47187, 280, 20102, 190963, + 21702, 33112, 201384, 189730, 36274, 151103, 62470, 79614, 56894, 160976, 37846, 3819, 43907, 28142, 33980, 44483, + 16310, 43780, 91255, 6410, 34790, 53414, 55594, 62493, 16866, 126630, 78730, 70800, 6150, 2638, 96447, 42805, + 5561, 80903, 142508, 69107, 13587, 90093, 68310, 13770, 107545, 142426, 6310, 11281, 108873, 30379, 19476, 19039, + 126867, 47619, 44321, 1557, 86986, 12174, 285300, 692, 28640, 174731, 39442, 33395, 33427, 183086, 62041, 33967, + 19017, 71946, 141533, 41962, 5762, 27368, 19966, 260045, 80637, 136704, 106076, 25336, 17430, 7907, 59393, 184, + 46903, 143058, 51209, 156531, 2047, 28617, 58028, 3727, 131055, 2181, 190078, 104219, 25958, 39516, 25800, 76861, + 13558, 64738, 31952, 28604, 5444, 142725, 28795, 87891, 47152, 833, 20563, 69475, 13900, 17091, 271888, 185043, + 44563, 4833, 59908, 40623, 122857, 138131, 6213, 136826, 45348, 94359, 56641, 22196, 70863, 57354, 56451, 72278, + 39593, 53647, 46234, 29708, 54332, 63721, 17639, 16420, 38068, 45645, 18971, 54437, 33637, 39722, 36484, 68634, + 318, 5298, 22418, 20417, 40310, 88, 18126, 44073, 143467, 137263, 71354, 82354, 18502, 189426, 11156, 72484, + 24520, 27572, 28397, 1057, 11377, 43227, 61610, 141001, 62013, 3621, 2123, 25838, 28942, 106389, 138280, 139177, + 27246, 10742, 1290, 24912, 28269, 40413, 12701, 122933, 83545, 3131, 5191, 33319, 17999, 208755, 75460, 44990, + 59015, 14954, 33696, 180654, 90707, 63223, 87538, 51, 30065, 70087, 47405, 49098, 15161, 490862, 57902, 10363, + 34720, 124013, 2826, 107593, 1263, 16539, 8297, 53595, 37008, 190173, 24783, 28381, 2012, 14817, 222228, 23569, + 6060, 37405, 4132, 23773, 98575, 114011, 35293, 24317, 92933, 80389, 14038, 96901, 5721, 58820, 71786, 38355, + 299, 2937, 128393, 129071, 199555, 22135, 61163, 3457, 24578, 103336, 75552, 8037, 29223, 24032, 36855, 65087, + 2985, 11252, 15167, 48922, 743, 16251, 113770, 51774, 115825, 202685, 4095, 133501, 109523, 3240, 22784, 51862, + 136657, 17899, 114978, 57429, 47454, 8657, 11392, 32391, 26378, 35272, 1426, 34467, 53586, 83481, 40561, 57729, + 3733, 111799, 328168, 6514, 174945, 20097, 14557, 18636, 93340, 171450, 639, 117760, 244456, 15998, 75359, 111774, + 5693, 73895, 98142, 34182, 37386, 132752, 48186, 121074, 28782, 11866, 26615, 23940, 89767, 129357, 80551, 82029, + 27545, 83711, 126798, 801, 23573, 21400, 128295, 14924, 18798, 114163, 50035, 114816, 136425, 471234, 15959, 173936, + 34320, 17327, 80636, 27686, 84778, 119579, 98823, 73515, 20041, 82828, 124250, 4650, 48453, 64519, 115563, 26853, + 38215, 37801, 92219, 69955, 7477, 145790, 19159, 94085, 71958, 65302, 12375, 44454, 40621, 106911, 19581, 3379, + 8773, 16999, 182583, 5202, 5874, 127304, 16993, 14116, 187927, 3375, 20370, 44171, 105965, 18978, 61953, 17115, + 51100, 102276, 75811, 7602, 43533, 31235, 7956, 72681, 18083, 5986, 190352, 3671, 8443, 19561, 18603, 95186, + 10180, 31524, 10515, 35607, 43597, 12356, 10299, 174108, 2003, 31154, 62144, 6234, 183999, 16214, 205583, 69997, + 69689, 1386, 87561, 18340, 12216, 23427, 2010, 44232, 129696, 140942, 7349, 4623, 146188, 5101, 86380, 150439, + 62389, 21860, 117536, 12248, 34044, 63481, 85500, 98463, 68410, 7339, 87770, 71963, 12765, 3686, 14919, 2974, + 43273, 7350, 39745, 6266, 26949, 192687, 75021, 968, 266807, 27515, 15493, 5904, 3345, 21226, 90343, 14616, + 34477, 13783, 5111, 69002, 79197, 20455, 25812, 125162, 5688, 23290, 86326, 151802, 47539, 53270, 120925, 57870, + 213110, 15305, 23776, 142238, 21634, 69658, 179702, 13601, 22257, 9455, 35397, 86555, 50092, 17185, 21662, 47115, + 32222, 159490, 66608, 20354, 42346, 75706, 11938, 55979, 39530, 138927, 7527, 13431, 63668, 92125, 206545, 83160, + 98, 105744, 113739, 10666, 134978, 88373, 50980, 17237, 74022, 5974, 44855, 31946, 5152, 17761, 22091, 89954, + 59088, 181724, 89377, 71648, 174145, 6081, 202459, 12825, 37220, 45669, 60029, 47529, 9934, 69759, 92928, 1003, + 9545, 40944, 40882, 123191, 118937, 215977, 4632, 152290, 5724, 38351, 20824, 19010, 87240, 135102, 56782, 135053, + 19875, 30902, 38714, 93406, 15784, 18212, 103460, 25829, 40143, 17780, 5626, 20039, 23263, 66779, 128772, 41751, + 87513, 216438, 5230, 73516, 181654, 37997, 80801, 90214, 285152, 76150, 31873, 8348, 37881, 138317, 50195, 1565, + 263241, 15964, 118491, 28092, 4966, 6035, 45147, 26418, 43934, 84355, 16241, 7487, 10433, 247295, 3172, 8129, + 186657, 57, 71773, 143295, 6470, 101381, 39489, 160086, 74416, 43233, 52957, 51944, 225854, 53358, 11933, 29452, + 25908, 40737, 49314, 60112, 142677, 7636, 42896, 27738, 246262, 17093, 14777, 56250, 32280, 129157, 16346, 76797, + 6192, 34415, 425, 120600, 75890, 191879, 176315, 63506, 45546, 161456, 5005, 46773, 143264, 38320, 150132, 134225, + 135305, 182762, 55889, 102851, 29742, 44842, 129661, 64244, 47013, 53257, 4250, 50419, 77787, 123983, 24915, 12948, + 11732, 36176, 80467, 160621, 126658, 56748, 175875, 78143, 8763, 54016, 205303, 6236, 37950, 84876, 66862, 80427, + 21806, 125486, 21484, 35813, 57557, 14539, 213401, 86192, 113464, 36625, 64405, 27231, 89465, 4451, 75847, 20978, + 108995, 205734, 68217, 94454, 164574, 18012, 255036, 16771, 23894, 158505, 7114, 43317, 22996, 11028, 52204, 124949, + 23169, 226500, 10370, 46407, 15369, 14412, 60558, 218161, 23117, 18847, 313212, 60955, 17642, 82698, 38578, 289214, + 130607, 42162, 81718, 82632, 40503, 951, 48442, 14289, 36239, 91499, 48742, 125633, 280990, 7266, 26286, 77911, + 44666, 7534, 217478, 178981, 9981, 2833, 22818, 156155, 40427, 12913, 72539, 44825, 147487, 28272, 67343, 16061, + 26869, 28878, 13104, 26717, 168452, 222284, 63772, 8001, 32886, 55288, 25367, 12083, 32991, 27965, 29014, 23535, + 46798, 8822, 7448, 101081, 240839, 93683, 48095, 16054, 15111, 14427, 104643, 135450, 70502, 37385, 89619, 135605, + 65697, 66256, 31643, 242955, 88548, 21883, 9676, 103291, 44145, 3863, 31735, 8400, 28701, 1387, 89573, 11921, + 48767, 27191, 47327, 74488, 31139, 34928, 58382, 10630, 206777, 28582, 17378, 118639, 35659, 45393, 41374, 26204, + 181164, 243974, 22596, 109998, 166262, 140883, 75323, 38999, 14554, 45944, 89326, 18593, 171445, 14273, 83848, 7094, + 31786, 136223, 135153, 75926, 66523, 5050, 82214, 24940, 76607, 13068, 103875, 30264, 17956, 28575, 70190, 14699, + 6507, 6918, 148803, 40975, 31279, 13140, 17326, 280841, 90476, 164678, 26191, 29026, 116611, 14717, 6030, 73654, + 167918, 94589, 13531, 31467, 6560, 37936, 764, 2646, 1243, 47040, 46211, 49422, 115324, 23197, 48193, 11038, + 80128, 4014, 18828, 39730, 41867, 964, 138962, 14313, 55897, 4976, 27379, 30682, 187323, 81139, 45324, 19782, + 37069, 15003, 3973, 32623, 32596, 5813, 218135, 46814, 189444, 1329, 15593, 67740, 145931, 8233, 95368, 52092, + 13390, 126973, 24773, 78080, 105530, 127257, 27684, 75829, 65709, 23804, 30679, 23341, 26805, 39433, 72773, 79105, + 6999, 9337, 78288, 91647, 55714, 45624, 31732, 25179, 41300, 62926, 8984, 56532, 22915, 82260, 13175, 111014, + 68951, 8391, 237398, 27237, 22138, 159504, 224263, 75273, 21120, 32545, 81951, 75664, 22264, 44392, 981, 6782, + 10058, 4181, 2250, 85033, 19945, 215931, 9376, 41673, 33635, 15417, 217394, 101669, 56123, 23340, 51752, 11920, + 99085, 5011, 143610, 229235, 10032, 59585, 16698, 27704, 5818, 10883, 13785, 186415, 6016, 52857, 9702, 70336, + 46649, 206034, 15092, 14481, 57476, 8081, 27610, 12151, 35264, 32218, 24641, 138702, 94413, 16922, 15037, 25736, + 112522, 11746, 14172, 11310, 262288, 112160, 142819, 50926, 93686, 24209, 43747, 11953, 83038, 1813, 102643, 324202, + 14341, 3919, 29176, 21127, 23204, 81844, 69984, 61119, 28807, 12474, 58355, 40271, 66084, 21889, 11758, 31845, + 77987, 65881, 45978, 68177, 6101, 28932, 58051, 649, 126673, 52123, 157370, 15105, 7133, 62360, 40724, 9837, + 38126, 27864, 30072, 264757, 5923, 6078, 20776, 4896, 122091, 30718, 48046, 119459, 170240, 303310, 26816, 100117, + 97772, 9974, 81454, 42024, 46874, 11564, 45132, 109732, 215746, 2127, 10903, 7713, 43948, 4937, 28852, 25103, + 41622, 38117, 17887, 60135, 3272, 72498, 31571, 43132, 55596, 108898, 45911, 110563, 8332, 37358, 183144, 1744, + 146411, 106155, 85432, 89589, 251315, 29773, 4572, 57991, 13533, 23984, 36596, 74746, 8561, 47865, 143388, 13408, + 81521, 143096, 93820, 10893, 115449, 113660, 48899, 7902, 48616, 6164, 68386, 80304, 175175, 147319, 43500, 47779, + 2063, 16353, 18616, 12432, 186556, 23124, 95665, 69513, 3036, 14556, 14786, 10437, 134537, 36883, 56269, 63535, + 75772, 100719, 86026, 42447, 29728, 3767, 25145, 40239, 82360, 26124, 91863, 12060, 22973, 30854, 96321, 53650, + 186559, 22801, 8489, 72885, 86348, 51954, 28230, 88192, 89100, 269995, 13885, 51315, 38388, 73083, 25625, 53485, + 82297, 39389, 100926, 72363, 45610, 10521, 13154, 68652, 2613, 44579, 170934, 38080, 87082, 32745, 40511, 28882, + 9986, 23752, 68927, 62035, 177812, 181149, 29031, 11611, 57884, 182442, 8046, 104980, 23591, 100153, 104125, 9117, + 47485, 23873, 2671, 349983, 42543, 328134, 85104, 58966, 33582, 332001, 133483, 9354, 44713, 26316, 6446, 63766, + 74439, 40756, 76029, 97107, 257444, 43586, 84500, 59959, 252451, 55620, 150696, 63676, 31825, 65735, 146929, 23371, + 35631, 35977, 145121, 51984, 38540, 33976, 24513, 207079, 33066, 10465, 7127, 153150, 5147, 36952, 154507, 3865, + 13973, 14200, 52272, 11308, 4343, 15766, 13965, 24679, 51830, 184838, 3348, 86524, 70378, 36337, 84987, 49030, + 22827, 32995, 19326, 2046, 26448, 253830, 60248, 12393, 95560, 44044, 28370, 1662, 36896, 50220, 48315, 80320, + 241741, 43652, 242555, 131179, 48067, 39495, 113599, 13797, 203953, 20287, 78696, 3410, 298860, 46405, 39410, 64369, + 61620, 171971, 71030, 204186, 20450, 29322, 37991, 260572, 3220, 386508, 87523, 9404, 67272, 73458, 10375, 45255, + 6586, 2590, 34096, 4160, 107662, 57683, 97396, 79188, 100160, 35851, 78921, 149875, 108684, 200141, 33908, 53318, + 6929, 19857, 56702, 3398, 57226, 58810, 9304, 20429, 4762, 64257, 64571, 51955, 7457, 60202, 39068, 65191, + 1320, 89495, 11353, 17456, 40404, 104230, 19164, 17854, 77204, 58530, 172392, 75503, 99309, 15916, 157308, 83740, + 62750, 50622, 1879, 15474, 208653, 18824, 11343, 41248, 59977, 127748, 31363, 172064, 44000, 65018, 12188, 41891, + 74315, 17651, 19590, 90710, 34332, 9615, 58267, 127126, 5819, 63902, 44975, 20415, 172217, 26030, 99297, 158027, + 64904, 15382, 45953, 118417, 114077, 18724, 56092, 87313, 18147, 79997, 136198, 62361, 84012, 22885, 9665, 4621, + 1791, 3009, 54017, 91348, 98456, 56262, 72712, 106254, 90930, 42901, 80747, 25508, 21446, 133798, 113357, 6097, + 116669, 1181, 110413, 11032, 103938, 49121, 260341, 161282, 7422, 24145, 56140, 35654, 85140, 174230, 9633, 104905, + 59713, 728, 60193, 191876, 5768, 22655, 5145, 41262, 326211, 147566, 80079, 41245, 16239, 59176, 15547, 123829, + 75411, 13376, 315047, 105840, 13229, 35046, 43694, 56413, 29398, 90069, 53794, 84673, 10758, 107725, 5524, 23780, + 236107, 388309, 62023, 165588, 1539, 46003, 176003, 163955, 112472, 361654, 29424, 49364, 95979, 3700, 306600, 117453, + 152154, 17800, 82564, 14444, 151294, 22058, 29517, 47312, 306, 266768, 196797, 94605, 21196, 107639, 225607, 18057, + 38146, 50176, 69453, 50095, 10700, 216046, 17364, 47494, 6891, 29894, 48715, 14004, 84282, 21694, 7598, 82070, + 109646, 6365, 16302, 27108, 56492, 142883, 77880, 27851, 40539, 187868, 189893, 289432, 6589, 19096, 22176, 166724, + 119491, 38469, 38709, 163079, 51354, 26677, 199471, 115939, 30685, 126480, 79686, 66788, 140209, 95841, 256423, 20274, + 136906, 108937, 4472, 99520, 29622, 157862, 29670, 35606, 73617, 56291, 14416, 1391, 49553, 41902, 66050, 23269, + 70525, 139634, 148637, 11479, 51671, 3128, 65679, 40966, 166869, 116434, 159850, 7654, 139616, 20315, 65982, 116183, + 74395, 50212, 88368, 27581, 37439, 11453, 97247, 212239, 49595, 3922, 25404, 51622, 45678, 120847, 23534, 2190, + 11959, 15866, 21030, 7156, 33211, 32273, 16756, 51864, 86560, 62359, 37272, 150553, 52434, 48096, 52877, 35909, + 9282, 150331, 56064, 3339, 62690, 77469, 38848, 312832, 112155, 50347, 133337, 6119, 130810, 19939, 40188, 198954, + 5243, 178898, 39868, 142856, 108261, 286939, 44549, 159984, 99970, 197697, 81046, 134326, 265613, 8809, 13626, 21584, + 72551, 29643, 102979, 213474, 80049, 198207, 20362, 229516, 6391, 82595, 72275, 12563, 33365, 2420, 161399, 254521, + 90721, 10070, 61781, 32490, 66737, 212773, 229338, 7775, 69872, 54551, 80069, 13914, 87011, 91386, 134664, 33101, + 1860, 15322, 69366, 97910, 9032, 31405, 11616, 221, 112544, 23414, 109925, 66229, 60905, 34215, 18312, 31402, + 37371, 77552, 57720, 2026, 89015, 4380, 50369, 20157, 140351, 42001, 57692, 30433, 19076, 51739, 23715, 62058, + 850, 121732, 145992, 46915, 373531, 25804, 8590, 87747, 2802, 16807, 15221, 116280, 36725, 12360, 34724, 117090, + 218795, 142043, 148440, 65614, 72062, 18466, 55923, 22439, 28990, 58866, 64866, 114538, 16550, 89174, 112318, 27549, + 24614, 155152, 5486, 45048, 7815, 58664, 6423, 11415, 6187, 21207, 67086, 238124, 26336, 2489, 21350, 54052, + 33373, 60539, 51387, 100319, 32162, 11584, 95109, 44016, 42791, 31049, 47206, 52852, 73555, 110693, 7535, 38410, + 32062, 15667, 9670, 65566, 23386, 531, 44985, 2760, 10244, 123017, 50775, 39638, 56392, 170971, 54953, 18366, + 49442, 134359, 57768, 10659, 27076, 77194, 62382, 113419, 136262, 150169, 22322, 207134, 12412, 139797, 55514, 2505, + 14883, 65500, 22972, 15267, 1134, 64278, 37799, 235955, 33675, 43711, 22813, 276041, 97153, 48116, 34495, 6178, + 199281, 32510, 95181, 5794, 15608, 76263, 19924, 230629, 100152, 10562, 76444, 119798, 74072, 219457, 36986, 12066, + 47942, 54591, 35202, 23051, 254301, 155103, 68248, 13470, 36451, 42899, 93606, 121040, 16026, 27968, 10851, 17794, + 10687, 100974, 49021, 10866, 65067, 10018, 39088, 10965, 56708, 897, 11410, 7452, 254030, 47692, 32629, 18771, + 30290, 48037, 43471, 14347, 50490, 66808, 37049, 49968, 13864, 83559, 25801, 3591, 57941, 75692, 173303, 61385, + 259331, 1969, 57685, 2094, 35588, 6233, 27697, 16717, 23485, 26772, 4734, 15135, 43486, 85019, 26988, 179071, + 24869, 25026, 9295, 27083, 21620, 11383, 45847, 134822, 92971, 19856, 42005, 31000, 22072, 2896, 21798, 125082, + 88645, 561, 47297, 28868, 1048, 75739, 25425, 197147, 182050, 124782, 126886, 12162, 13343, 152665, 53046, 7557, + 32452, 9893, 110355, 9538, 14825, 62686, 7879, 104424, 19509, 31568, 4996, 5559, 3325, 22164, 66618, 2476, + 216938, 38862, 52182, 79198, 45740, 52776, 32070, 132672, 99716, 19543, 5515, 40777, 189082, 6051, 3103, 146615, + 53740, 256827, 80531, 104166, 78245, 34550, 28933, 112044, 25609, 72638, 36640, 25629, 24311, 56326, 11524, 83163, + 176777, 23393, 82414, 6106, 47340, 19377, 61707, 10698, 308354, 82475, 8066, 15310, 40669, 62347, 33738, 15955, + 66085, 140789, 4852, 37500, 14102, 5845, 9813, 54656, 125339, 67825, 97677, 67735, 9225, 11506, 173536, 159289, + 128709, 12613, 20379, 46259, 97207, 42699, 91068, 45947, 1271, 211146, 104284, 55003, 200933, 14250, 55082, 49995, + 78439, 185897, 62876, 11600, 113451, 32229, 199030, 36486, 88975, 65343, 140167, 135960, 18324, 638, 86929, 96115, + 46521, 34134, 437, 7115, 11819, 80629, 96102, 12424, 18570, 81183, 15089, 30525, 141756, 201210, 66036, 47056, + 72512, 98759, 18003, 68671, 170020, 14775, 7872, 86707, 52754, 279230, 82966, 13276, 63550, 101747, 103537, 30259, + 118515, 110652, 15079, 51435, 103073, 104977, 76964, 5981, 93330, 91388, 21050, 56718, 32736, 2464, 36579, 80299, + 50499, 49852, 67313, 130037, 14722, 2418, 7783, 76521, 31600, 78508, 133834, 49167, 68452, 47680, 2363, 25459, + 398867, 67795, 165159, 68999, 29316, 33111, 23239, 12957, 172786, 66330, 3816, 4414, 18417, 12030, 30134, 7919, + 104924, 9960, 36133, 26144, 2606, 105224, 32252, 42036, 5670, 72687, 493, 78524, 84818, 34715, 26322, 28439, + 16288, 21908, 74255, 9962, 67106, 147542, 139191, 43764, 59580, 72920, 393509, 63136, 82929, 53980, 78657, 4543, + 607401, 11665, 318088, 11366, 291, 7537, 212378, 77254, 85829, 59252, 37336, 13232, 359, 43117, 65592, 71269, + 15897, 112396, 53939, 40125, 35830, 56176, 59326, 11017, 50696, 114234, 276483, 22837, 65630, 17802, 22227, 18232, + 52672, 51170, 100713, 92360, 22115, 91842, 43063, 195957, 356968, 3794, 166425, 56044, 29895, 163395, 11168, 56699, + 40837, 67702, 27339, 20360, 231192, 89936, 103744, 1998, 34024, 32020, 3803, 117654, 38957, 94943, 70290, 85606, + 26722, 43088, 170484, 36210, 406, 282841, 54770, 175134, 23335, 44094, 73528, 47037, 124952, 31360, 23208, 78534, + 72068, 123285, 11398, 40458, 68804, 30009, 6939, 3499, 13268, 40221, 12223, 61566, 147101, 333845, 73905, 2372, + 164740, 293468, 55614, 327574, 276569, 59394, 21940, 154180, 162596, 28918, 37039, 166169, 66943, 84556, 40144, 10616, + 11569, 25337, 104847, 48420, 26654, 76526, 228642, 20116, 66358, 44381, 25600, 2578, 4777, 70479, 5757, 64766, + 23229, 11688, 27998, 24560, 102127, 6006, 130766, 11689, 5848, 24290, 203474, 51926, 978, 76149, 170663, 68953, + 2921, 5461, 117041, 24360, 59666, 1098, 64926, 198078, 5371, 1164, 166512, 13456, 28212, 22987, 95713, 13302, + 90108, 31433, 120078, 63947, 42938, 68482, 38260, 42265, 39320, 109797, 110494, 79743, 2499, 2553, 58577, 180281, + 4271, 259624, 94417, 68375, 108792, 50431, 9717, 29255, 33510, 160264, 7272, 343301, 125072, 154624, 6168, 27338, + 71653, 51148, 140929, 51394, 65239, 109678, 179395, 7761, 38250, 81439, 23490, 79048, 66357, 53948, 107018, 28855, + 38577, 94122, 43589, 44430, 13964, 103761, 2708, 12411, 86251, 119198, 17302, 51623, 35708, 305, 95393, 8798, + 50755, 41461, 203637, 19736, 36010, 8599, 54546, 13603, 29448, 118755, 50260, 10357, 12209, 86678, 39594, 88467, + 3844, 173096, 17788, 39975, 38222, 14809, 54370, 53581, 206337, 67848, 23694, 2309, 100876, 41983, 276960, 18075, + 67827, 14170, 117970, 89349, 137088, 75893, 70548, 20757, 14167, 10804, 5959, 67463, 252225, 44451, 87528, 36335, + 84163, 175996, 66912, 69227, 195270, 25238, 167523, 96366, 1306, 7967, 27706, 52700, 5703, 285, 51677, 60197, + 54198, 170697, 20548, 18244, 779, 4822, 39984, 71212, 46802, 72502, 31290, 74896, 22028, 154697, 58236, 131173, + 51124, 252252, 64234, 48608, 86759, 36236, 13170, 143379, 70560, 101041, 195793, 70671, 113164, 99377, 70248, 34118, + 35685, 116394, 50149, 302730, 162145, 121592, 530, 30881, 45471, 162432, 6235, 49645, 34561, 40287, 58509, 43757, + 422, 70918, 113036, 190344, 2611, 233661, 162936, 32114, 6464, 94933, 54217, 64327, 47486, 871, 90931, 33404, + 19223, 20183, 3928, 34508, 38246, 36359, 11459, 66339, 9191, 90968, 122115, 45027, 18331, 84569, 82055, 106565, + 89942, 52285, 40019, 20438, 243642, 100401, 166242, 127119, 212364, 42312, 34711, 1671, 15893, 23179, 5020, 74061, + 17518, 110465, 11940, 3873, 22617, 123195, 18144, 100726, 6409, 91356, 45936, 73471, 30046, 108852, 212969, 66765, + 126182, 98830, 107226, 23993, 59716, 48049, 45651, 82888, 36560, 16256, 52004, 17296, 104428, 12933, 38645, 135609, + 18846, 26099, 40801, 56830, 26592, 992, 156526, 79480, 19458, 91618, 39463, 7988, 50793, 54675, 156601, 19881, + 147333, 1159, 50024, 77736, 30826, 64647, 13710, 115978, 1388, 51510, 5276, 207487, 27647, 59310, 5123, 271841, + 10922, 2382, 11425, 17267, 14495, 244507, 2126, 492, 33545, 12138, 8818, 184454, 19269, 134769, 8528, 57017, + 135828, 73552, 22221, 65808, 39727, 367870, 203492, 24483, 41601, 196988, 198, 55446, 46931, 68675, 244761, 5411, + 233379, 19207, 36423, 316277, 49169, 745, 204311, 317017, 131130, 150130, 101903, 260111, 182112, 30434, 25375, 59274, + 16276, 109977, 54255, 20999, 82381, 135770, 2885, 31724, 118209, 21645, 119343, 36886, 142445, 81249, 42421, 43503, + 128310, 66260, 92555, 94890, 19672, 1769, 178045, 35419, 28740, 2136, 226543, 24030, 82907, 124857, 54353, 157870, + 33436, 38109, 85642, 96673, 3118, 112407, 1944, 31498, 102206, 135319, 205619, 160787, 28723, 91910, 50034, 79540, + 24819, 28372, 80113, 173951, 41937, 15370, 19059, 55603, 38854, 100638, 70561, 519, 5157, 19218, 16617, 91793, + 3881, 75012, 176191, 145596, 111491, 20452, 154738, 27981, 1142, 2054, 22256, 54130, 9776, 19737, 32399, 69945, + 421673, 103058, 91031, 7281, 152241, 74595, 46116, 86993, 29309, 22846, 33982, 54529, 14961, 41775, 23014, 131668, + 87854, 171036, 94711, 50319, 6054, 72531, 3482, 3581, 15424, 83151, 45387, 66155, 3796, 118067, 32026, 181774, + 82656, 49811, 12569, 44671, 54996, 83240, 157346, 143069, 2108, 19813, 11164, 42601, 55367, 1359, 101577, 27699, + 239450, 9023, 33206, 152235, 154525, 73472, 7296, 55929, 9643, 80206, 87554, 68722, 118103, 89632, 161537, 59640, + 106041, 77231, 63719, 12373, 64601, 98305, 1056, 46674, 68549, 18960, 17748, 19013, 48707, 296146, 134285, 64092, + 30266, 15379, 85084, 87899, 25772, 62788, 25525, 31250, 18740, 80665, 23101, 34025, 9462, 7075, 49746, 39284, + 229669, 57834, 2626, 248569, 91798, 873, 22206, 84442, 112152, 160148, 59240, 6711, 191327, 15256, 141511, 171566, + 14493, 68797, 15010, 17086, 72828, 164513, 36088, 32054, 8175, 11054, 81290, 64307, 66636, 51647, 21137, 68255, + 236474, 72999, 12123, 66901, 25817, 58290, 23813, 41818, 87351, 51685, 349139, 15386, 129027, 92193, 14750, 7028, + 76653, 56861, 59524, 43395, 20422, 123741, 40958, 19478, 22983, 87931, 5921, 15341, 71240, 18213, 18961, 25648, + 27846, 61261, 75568, 216919, 44661, 12442, 49311, 68342, 12399, 74324, 7455, 42754, 46158, 66251, 405, 72411, + 77704, 58295, 15625, 4552, 53101, 50537, 30941, 37141, 35032, 18292, 98289, 17870, 11072, 115848, 60108, 70972, + 17300, 13269, 63524, 140693, 109294, 93883, 56701, 69184, 33638, 4485, 36667, 26721, 24408, 5954, 28290, 80247, + 1895, 82128, 40307, 96015, 11241, 5825, 45230, 255638, 760, 31698, 12512, 26145, 17584, 92444, 8948, 17954, + 82479, 9085, 5850, 120208, 125877, 9751, 11265, 22102, 63150, 153550, 69826, 75885, 141075, 131001, 14419, 128804, + 34259, 129918, 115229, 23808, 23274, 3580, 82265, 18942, 81698, 8545, 39913, 79933, 15732, 6741, 38339, 39271, + 43577, 31006, 30604, 53478, 48340, 102062, 39630, 12695, 91584, 222, 20589, 89230, 14688, 30824, 97582, 47266, + 16379, 99608, 42679, 70464, 24481, 4475, 80121, 49522, 150280, 121584, 178585, 20071, 96420, 5695, 31648, 64033, + 262050, 20662, 107571, 34749, 48635, 192388, 60052, 163993, 43727, 40545, 72642, 99324, 61819, 17935, 20846, 61496, + 56268, 69226, 133071, 52853, 72003, 57628, 110499, 29460, 88178, 40245, 24970, 58958, 17281, 21360, 121825, 31853, + 79912, 81792, 201844, 95444, 13218, 256154, 26236, 61260, 122519, 90685, 37984, 5119, 125295, 126359, 310134, 54407, + 166396, 6520, 28971, 31149, 11811, 266489, 27120, 1794, 2171, 23105, 744, 2814, 118930, 46693, 140092, 4993, + 67746, 27308, 66270, 97039, 17636, 6061, 69135, 4202, 178278, 7472, 32642, 40673, 174656, 26758, 204108, 44815, + 95661, 95589, 192828, 73663, 173039, 77882, 43232, 71654, 83845, 55846, 26313, 21216, 79689, 31469, 85659, 11793, + 17473, 17000, 64471, 78858, 98555, 104223, 20905, 121028, 127696, 15679, 22246, 93167, 203415, 40670, 1525, 47197, + 54730, 29955, 27650, 142614, 22925, 38365, 107626, 61283, 232239, 25514, 194946, 12768, 9309, 63949, 114873, 57567, + 12136, 30868, 3548, 537341, 175026, 133711, 27455, 27667, 20740, 32351, 1997, 26211, 180188, 35259, 10358, 54362, + 10747, 42370, 12304, 6425, 39816, 22704, 99010, 215128, 314017, 17879, 58536, 20732, 266131, 43327, 1650, 27592, + 10040, 89403, 28410, 125002, 175732, 21475, 13832, 98954, 112550, 155503, 53781, 62057, 220651, 63490, 218647, 26496, + 31974, 28320, 13557, 72935, 37393, 40244, 102949, 25746, 888, 15552, 12165, 23782, 23008, 37306, 182690, 178294, + 86799, 19876, 69717, 10583, 4303, 116880, 7218, 92683, 64905, 100026, 340736, 142052, 148467, 8925, 2702, 63925, + 75337, 81983, 220124, 89751, 251, 226035, 14097, 1808, 3284, 142418, 16036, 72819, 370102, 13289, 144922, 3996, + 50264, 199033, 45199, 139880, 9835, 4702, 60405, 74816, 5438, 7368, 27687, 162954, 23655, 159039, 21280, 61851, + 4481, 92865, 109762, 3285, 29851, 3021, 104939, 2905, 329, 63385, 22681, 52094, 12855, 38488, 18381, 19211, + 7162, 61266, 8835, 22825, 64931, 45593, 66502, 25309, 78141, 46199, 59413, 50610, 12804, 59952, 186517, 61018, + 42372, 46728, 18388, 90815, 296771, 59091, 46636, 192289, 83547, 3423, 29852, 2745, 18624, 16583, 357641, 32404, + 34874, 30511, 86377, 868, 86271, 59760, 81404, 39749, 3360, 74207, 15394, 156217, 48665, 41137, 72366, 52831, + 77735, 59042, 22515, 6142, 88767, 22116, 68286, 40920, 11463, 78197, 68958, 24062, 63527, 100286, 139882, 65777, + 28889, 12481, 28953, 8266, 22258, 3319, 99181, 17609, 29140, 179534, 30832, 42841, 194315, 120705, 27548, 161124, + 113924, 42548, 41864, 56260, 25499, 42783, 177062, 105955, 6406, 14311, 23992, 86657, 31334, 225197, 24185, 39921, + 1845, 104026, 301294, 95718, 4802, 8899, 157667, 77564, 49184, 6115, 80340, 47518, 43455, 6339, 54561, 39882, + 35469, 115497, 123233, 68548, 127594, 20262, 97680, 60841, 92970, 5781, 28954, 4558, 61038, 45382, 35089, 49876, + 115005, 15489, 27010, 91676, 38840, 12352, 20606, 19800, 87761, 12264, 9268, 146639, 106838, 47766, 91230, 8234, + 8811, 48534, 107720, 27259, 20572, 34400, 108143, 52933, 55637, 28872, 61739, 77203, 11162, 21038, 66975, 30423, + 96721, 31993, 45541, 7376, 132425, 71889, 178420, 446221, 108925, 260438, 102283, 4056, 2948, 77259, 83943, 38199, + 125457, 36830, 123208, 391, 36356, 138390, 99456, 92051, 3502, 239674, 36201, 114068, 75270, 3160, 39536, 218269, + 27622, 12173, 56780, 8501, 127192, 66434, 47097, 13635, 2561, 98519, 73258, 96646, 123095, 5710, 42788, 66384, + 49394, 12035, 7389, 23253, 61155, 251141, 4195, 439, 16897, 56354, 25580, 66462, 110064, 188570, 17260, 12827, + 9699, 13844, 208611, 7653, 89448, 41275, 5078, 37917, 53356, 45195, 15877, 74097, 19628, 231041, 21225, 15175, + 220310, 3514, 79626, 97496, 21622, 20434, 48926, 95346, 83036, 47481, 10584, 14331, 9885, 4023, 29396, 21139, + 112214, 87100, 83793, 9796, 6087, 423, 60612, 11748, 26713, 29951, 132442, 40260, 17901, 55713, 5620, 88019, + 161912, 177970, 3729, 49808, 91492, 35869, 138357, 40508, 3440, 61216, 56765, 68562, 68594, 2747, 88777, 43463, + 9266, 44125, 1567, 2354, 92238, 29774, 47207, 47789, 8087, 20375, 191924, 3415, 6866, 22316, 82861, 233038, + 150194, 13698, 143688, 29411, 72175, 16465, 14358, 220015, 80701, 53366, 59020, 22661, 13459, 20745, 8739, 76074, + 31836, 46743, 45518, 51271, 43243, 19787, 114669, 18136, 239700, 15692, 105609, 60536, 95846, 27460, 7762, 225232, + 44749, 11206, 14819, 1690, 50647, 170657, 224611, 139596, 21945, 134017, 15972, 174955, 230538, 2804, 25876, 121127, + 120612, 18921, 14091, 435, 132371, 178953, 144326, 158152, 244604, 220898, 21478, 121856, 5193, 4031, 105823, 11008, + 105637, 134379, 253591, 97747, 34661, 247232, 20987, 6949, 41341, 106816, 110210, 45958, 68775, 150399, 11104, 93886, + 85393, 28015, 147749, 112829, 1874, 19994, 21402, 16367, 8771, 33037, 11041, 96701, 33718, 36354, 26705, 23369, + 49672, 29673, 72422, 32419, 77403, 36496, 28454, 23255, 595452, 242129, 61562, 58092, 99507, 41978, 40275, 32822, + 6490, 1688, 175006, 8864, 58895, 13716, 45499, 120546, 128742, 24764, 141091, 121483, 7704, 83412, 14149, 58968, + 39239, 165272, 32855, 72184, 73217, 52628, 13081, 73279, 43816, 9383, 216195, 56823, 62824, 48448, 191659, 3540, + 37804, 223316, 171995, 17606, 199976, 21733, 141024, 23939, 22361, 42786, 77686, 3523, 80005, 1542, 22284, 32365, + 87514, 43833, 4665, 93155, 94832, 32683, 134693, 9494, 14089, 54921, 16128, 131782, 4574, 168587, 76247, 7989, + 139975, 821, 8368, 108503, 59142, 158797, 137, 205170, 75523, 18074, 13682, 91077, 100268, 65492, 54879, 15629, + 43906, 38056, 45569, 40180, 53442, 24989, 20763, 24867, 15152, 30094, 129619, 140074, 2547, 23241, 27435, 7171, + 186002, 4003, 5665, 192737, 17011, 57494, 230276, 241405, 19513, 27773, 95035, 92634, 204282, 5213, 32107, 87507, + 3343, 10550, 3806, 71001, 60568, 10837, 23329, 144168, 128318, 1900, 47551, 4240, 119250, 50444, 64351, 85851, + 4298, 169567, 1401, 13814, 51871, 3524, 75657, 25885, 41336, 136110, 12759, 77034, 71759, 22871, 604, 13904, + 21921, 84968, 84920, 208954, 45074, 13960, 4204, 102255, 98169, 58850, 58448, 58879, 145889, 22357, 8919, 58428, + 99427, 13803, 157733, 68068, 11350, 61811, 360594, 118202, 1237, 824, 163104, 118356, 5520, 769, 31581, 20685, + 28799, 181670, 40637, 38360, 7803, 8532, 69133, 37235, 53702, 86519, 85294, 62552, 21026, 8827, 142049, 30386, + 136352, 11344, 158995, 19682, 38293, 242831, 103750, 55804, 128690, 108982, 27181, 18409, 12158, 167408, 120214, 132169, + 90132, 134213, 7909, 28749, 44600, 10115, 55121, 16581, 10184, 82321, 25270, 21542, 26957, 2707, 106897, 145041, + 39459, 145473, 48977, 26927, 126025, 157588, 249490, 64382, 78904, 11519, 1284, 9871, 82999, 78364, 173378, 109477, + 59373, 50500, 2168, 30838, 39301, 154212, 66143, 91333, 150198, 28707, 45440, 20859, 120529, 33550, 21869, 80014, + 153042, 19905, 153475, 81658, 20177, 158807, 120156, 38566, 50089, 6373, 63762, 19510, 14764, 26971, 108976, 72526, + 271571, 84066, 18309, 66438, 30530, 98093, 65740, 53411, 123161, 23236, 24050, 64130, 38975, 177329, 37078, 133183, + 101562, 89382, 51844, 19732, 22941, 26188, 51520, 22735, 5648, 43118, 130081, 12788, 124654, 200339, 25097, 48211, + 109243, 196680, 216387, 69966, 69817, 55482, 6031, 5293, 71675, 18384, 137078, 73066, 49162, 68808, 11413, 25901, + 106884, 643, 4412, 18355, 21241, 36413, 7382, 16629, 107795, 6893, 5332, 242, 30258, 49533, 74544, 39490, + 16572, 4199, 12724, 122748, 188262, 108611, 126989, 88570, 141456, 72114, 87870, 20276, 7688, 37800, 22712, 59241, + 60718, 170557, 299711, 3515, 8271, 16537, 107094, 81327, 11044, 299399, 71715, 154123, 32440, 16413, 169052, 42581, + 104608, 33812, 5696, 16661, 103419, 161, 39832, 179084, 236109, 71375, 67676, 75508, 93156, 21777, 80970, 58192, + 43293, 31757, 51423, 41531, 128929, 182898, 12880, 113231, 42107, 61632, 45914, 4884, 67180, 4744, 128700, 2781, + 25201, 36266, 194380, 87971, 115254, 341, 41014, 57871, 185488, 92043, 17835, 89050, 130954, 19517, 84683, 21380, + 72813, 45915, 93851, 203411, 167547, 176973, 63085, 59916, 20537, 17002, 36711, 31276, 39969, 36726, 65357, 13243, + 38432, 15644, 94063, 10719, 22582, 47135, 16038, 5381, 184022, 23165, 76012, 35198, 1139, 18638, 45545, 84452, + 27199, 192134, 119684, 123811, 5655, 13706, 141932, 24822, 17767, 37181, 5142, 34476, 97412, 225589, 175180, 68777, + 122606, 11285, 10611, 55686, 209377, 100096, 22340, 26689, 27070, 51760, 149649, 30372, 35871, 50512, 21058, 17439, + 326617, 170142, 107982, 135181, 188954, 85308, 56136, 9593, 42680, 26872, 58659, 5746, 73512, 25617, 2549, 48114, + 80911, 1733, 156604, 26196, 22629, 16115, 47515, 69763, 3011, 81888, 4772, 72580, 95021, 23422, 61841, 69210, + 315242, 20699, 13055, 19951, 157737, 52563, 31431, 59838, 383, 35462, 55449, 68880, 41821, 63984, 213573, 50441, + 41808, 53480, 40494, 130778, 19335, 64598, 138641, 25152, 27950, 8191, 57199, 35528, 15674, 204275, 70906, 3181, + 25677, 26876, 2717, 132658, 110950, 49839, 49173, 20862, 35375, 20135, 50308, 213100, 76835, 103314, 64615, 7399, + 59108, 22329, 92119, 34649, 57370, 20920, 11016, 129444, 35262, 68761, 92220, 17938, 16569, 14039, 59057, 72434, + 160415, 16248, 7148, 40010, 37706, 58080, 149680, 137070, 78086, 105307, 67671, 478, 32041, 27870, 179796, 13035, + 49691, 26716, 81195, 147295, 137143, 13139, 168200, 45495, 9782, 24335, 30927, 557, 172080, 226060, 57625, 14169, + 50148, 53124, 40398, 22321, 77917, 74830, 6334, 70846, 6323, 77024, 9517, 93307, 10110, 13831, 4136, 54992, + 69172, 15584, 33047, 77148, 17711, 31085, 33621, 126215, 21795, 114268, 35065, 145060, 59511, 11859, 154026, 131303, + 76184, 102024, 58089, 66420, 135114, 32471, 26586, 9983, 31046, 232116, 194394, 99288, 132319, 610, 10459, 98229, + 59105, 34807, 29993, 22965, 157578, 4107, 28141, 140655, 20549, 7101, 7846, 55412, 80778, 17135, 7430, 73220, + 57649, 27939, 10941, 92844, 158421, 173174, 64726, 12726, 65143, 202755, 176021, 57189, 4575, 7195, 177904, 25156, + 72235, 146111, 11686, 22007, 21899, 135284, 138978, 752, 10797, 65724, 5168, 151662, 92745, 109290, 75372, 160210, + 34035, 17369, 97529, 60335, 106079, 2306, 2423, 4131, 80159, 158934, 136359, 59711, 4508, 40343, 250673, 65860, + 78304, 17795, 104032, 148124, 25350, 58256, 33525, 20642, 75457, 81761, 183350, 24569, 46458, 63924, 58666, 8047, + 32937, 81997, 33987, 7245, 25623, 17931, 5112, 122123, 47, 80630, 79317, 15250, 8531, 7845, 42854, 87493, + 104751, 31479, 59823, 168974, 84953, 28434, 95840, 86398, 8138, 40995, 4860, 26024, 36508, 101200, 49636, 8174, + 187199, 50053, 89152, 20854, 66310, 61067, 8004, 30413, 115274, 278866, 106773, 120445, 13253, 40328, 1516, 70360, + 32461, 1703, 301530, 572, 38536, 75536, 423620, 18713, 1916, 3143, 70650, 60724, 42007, 14851, 262515, 136679, + 187160, 70985, 131034, 54573, 35055, 14435, 225137, 23005, 26325, 174156, 20786, 195824, 84394, 19162, 85376, 70194, + 35963, 49566, 21279, 91399, 94216, 64873, 68891, 55512, 45590, 3382, 26979, 72069, 97782, 126859, 187860, 246200, +} diff --git a/v2/internal/coord/cplutil/cpl_test.go b/v2/internal/coord/cplutil/cpl_test.go new file mode 100644 index 00000000..4ea4ad73 --- /dev/null +++ b/v2/internal/coord/cplutil/cpl_test.go @@ -0,0 +1,60 @@ +package cplutil + +import ( + "crypto/rand" + "encoding/binary" + "testing" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPrefix(t *testing.T) { + testCases := []uint16{ + 0b1111111111111111, + 0b1111111111111101, + 0b1011111111111101, + 0b0000000000000000, + 0b0000000000000010, + } + + makeKeyWithPrefix := func(v uint16) key.Key256 { + data := [32]byte{} + binary.BigEndian.PutUint16(data[0:2], v) + return key.NewKey256(data[:]) + } + + for _, tc := range testCases { + k := makeKeyWithPrefix(tc) + + for cpl := 0; cpl < 15; cpl++ { + p := prefix(k, cpl) + k2 := makeKeyWithPrefix(p) + assert.Equal(t, cpl, k.CommonPrefixLength(k2), "cpl %d: generated prefix %016b for key starting %016b", cpl, p, tc) + } + } +} + +func TestGenRandPeerID(t *testing.T) { + randomKey := func() kadt.Key { + var buf [32]byte + _, _ = rand.Read(buf[:]) + return kadt.NewKey(buf[:]) + } + + keys := make([]kadt.Key, 20) + for i := range keys { + keys[i] = randomKey() + } + + for _, k := range keys { + for cpl := 0; cpl < 15; cpl++ { + id, err := GenRandPeerID(k, cpl) + require.NoError(t, err) + + assert.Equal(t, cpl, k.CommonPrefixLength(id.Key())) + } + } +} diff --git a/v2/internal/coord/cplutil/gen.go b/v2/internal/coord/cplutil/gen.go new file mode 100644 index 00000000..e4e3a676 --- /dev/null +++ b/v2/internal/coord/cplutil/gen.go @@ -0,0 +1,76 @@ +//go:build ignore + +package main + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "os" + "strings" + + mh "github.com/multiformats/go-multihash" +) + +const ( + bits = 16 + target = 1 << bits + idLen = 32 + 2 +) + +func main() { + pkg := os.Getenv("GOPACKAGE") + file := os.Getenv("GOFILE") + targetFile := strings.TrimSuffix(file, ".go") + "_prefixmap.go" + + ids := new([target]uint32) + found := new([target]bool) + count := int32(0) + + out := make([]byte, 32) + inp := [idLen]byte{mh.SHA2_256, 32} + hasher := sha256.New() + + for i := uint32(0); count < target; i++ { + binary.BigEndian.PutUint32(inp[2:], i) + + hasher.Write(inp[:]) + out = hasher.Sum(out[:0]) + hasher.Reset() + + prefix := binary.BigEndian.Uint32(out) >> (32 - bits) + if !found[prefix] { + found[prefix] = true + ids[prefix] = i + count++ + } + } + + f, err := os.Create(targetFile) + if err != nil { + panic(err) + } + + printf := func(s string, args ...interface{}) { + _, err = fmt.Fprintf(f, s, args...) + if err != nil { + panic(err) + } + } + + printf("package %s\n\n", pkg) + printf("// Code generated by ./gen.go DO NOT EDIT\n") + printf("var keyPrefixMap = [...]uint32{") + for i, j := range ids[:] { + if i%16 == 0 { + printf("\n\t") + } else { + printf(" ") + } + printf("%d,", j) + } + printf("\n}\n") + if err = f.Close(); err != nil { + panic(err) + } +} diff --git a/v2/internal/coord/event.go b/v2/internal/coord/event.go index fddc40ef..69f86a33 100644 --- a/v2/internal/coord/event.go +++ b/v2/internal/coord/event.go @@ -232,3 +232,9 @@ type EventNotifyNonConnectivity struct { func (*EventNotifyNonConnectivity) behaviourEvent() {} func (*EventNotifyNonConnectivity) routingCommand() {} + +// EventRoutingPoll notifies a routing behaviour that it may proceed with any pending work. +type EventRoutingPoll struct{} + +func (*EventRoutingPoll) behaviourEvent() {} +func (*EventRoutingPoll) routingCommand() {} diff --git a/v2/internal/coord/internal/nettest/layouts.go b/v2/internal/coord/internal/nettest/layouts.go index 7fce42f0..b85d320d 100644 --- a/v2/internal/coord/internal/nettest/layouts.go +++ b/v2/internal/coord/internal/nettest/layouts.go @@ -44,11 +44,11 @@ func LinearTopology(n int, clk clock.Clock) (*Topology, []*Peer, error) { for i := 0; i < len(nodes); i++ { if i > 0 { nodes[i].Router.AddToPeerStore(context.Background(), nodes[i-1].NodeID) - nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i-1].NodeID)) + nodes[i].RoutingTable.AddNode(nodes[i-1].NodeID) } if i < len(nodes)-1 { nodes[i].Router.AddToPeerStore(context.Background(), nodes[i+1].NodeID) - nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i+1].NodeID)) + nodes[i].RoutingTable.AddNode(nodes[i+1].NodeID) } } diff --git a/v2/internal/coord/internal/nettest/routing.go b/v2/internal/coord/internal/nettest/routing.go index 880e27e4..05360c87 100644 --- a/v2/internal/coord/internal/nettest/routing.go +++ b/v2/internal/coord/internal/nettest/routing.go @@ -69,7 +69,7 @@ func NewRouter(self kadt.PeerID, top *Topology) *Router { } func (r *Router) NodeID() kad.NodeID[kadt.Key] { - return kadt.PeerID(r.self) + return r.self } func (r *Router) handleMessage(ctx context.Context, n kadt.PeerID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { diff --git a/v2/internal/coord/internal/tiny/node.go b/v2/internal/coord/internal/tiny/node.go index 2ad224cc..065b5881 100644 --- a/v2/internal/coord/internal/tiny/node.go +++ b/v2/internal/coord/internal/tiny/node.go @@ -2,6 +2,8 @@ package tiny import ( + "fmt" + "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" ) @@ -33,3 +35,14 @@ func (n Node) Equal(other Node) bool { func (n Node) String() string { return key.HexString(n.key) } + +// NodeWithCpl returns a [Node] that has a common prefix length of cpl with the supplied [Key] +func NodeWithCpl(k Key, cpl int) (Node, error) { + if cpl > k.BitLen()-1 { + return Node{}, fmt.Errorf("cpl too large") + } + + // flip the bit after the cpl + mask := Key(1 << (k.BitLen() - cpl - 1)) + return Node{key: k.Xor(mask)}, nil +} diff --git a/v2/internal/coord/internal/tiny/node_test.go b/v2/internal/coord/internal/tiny/node_test.go index a6e175e5..32dd05f9 100644 --- a/v2/internal/coord/internal/tiny/node_test.go +++ b/v2/internal/coord/internal/tiny/node_test.go @@ -2,6 +2,9 @@ package tiny import ( "fmt" + "testing" + + "github.com/stretchr/testify/assert" ) func ExampleNode_String() { @@ -15,3 +18,15 @@ func ExampleNode_String() { // ff // 55 } + +func TestNodeWithCpl(t *testing.T) { + testCases := []Key{Key(1), Key(2), Key(4), Key(8), Key(16), Key(32), Key(16), Key(128), Key(33), Key(159), Key(0), Key(255)} + + for _, k := range testCases { + for cpl := 0; cpl < 8; cpl++ { + n, err := NodeWithCpl(k, cpl) + assert.NoError(t, err) + assert.Equal(t, cpl, k.CommonPrefixLength(n.Key())) + } + } +} diff --git a/v2/internal/coord/routing.go b/v2/internal/coord/routing.go index 70e64868..955b63cb 100644 --- a/v2/internal/coord/routing.go +++ b/v2/internal/coord/routing.go @@ -5,16 +5,26 @@ import ( "fmt" "sync" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -// A RoutingBehaviour provices the behaviours for bootstrapping and maintaining a DHT's routing table. +const ( + // IncludeQueryID is the id for connectivity checks performed by the include state machine. + // This identifier is used for routing network responses to the state machine. + IncludeQueryID = coordt.QueryID("include") + + // ProbeQueryID is the id for connectivity checks performed by the probe state machine + // This identifier is used for routing network responses to the state machine. + ProbeQueryID = coordt.QueryID("probe") +) + +// A RoutingBehaviour provides the behaviours for bootstrapping and maintaining a DHT's routing table. type RoutingBehaviour struct { // self is the peer id of the system the dht is running on self kadt.PeerID @@ -28,6 +38,9 @@ type RoutingBehaviour struct { // probe is the node probing state machine, responsible for periodically checking connectivity of nodes in the routing table probe coordt.StateMachine[routing.ProbeEvent, routing.ProbeState] + // explore is the routing table explore state machine, responsible for increasing the occupanct of the routing table + explore coordt.StateMachine[routing.ExploreEvent, routing.ExploreState] + pendingMu sync.Mutex pending []BehaviourEvent ready chan struct{} @@ -41,6 +54,7 @@ func NewRoutingBehaviour( bootstrap coordt.StateMachine[routing.BootstrapEvent, routing.BootstrapState], include coordt.StateMachine[routing.IncludeEvent, routing.IncludeState], probe coordt.StateMachine[routing.ProbeEvent, routing.ProbeState], + explore coordt.StateMachine[routing.ExploreEvent, routing.ExploreState], logger *slog.Logger, tracer trace.Tracer, ) *RoutingBehaviour { @@ -49,6 +63,7 @@ func NewRoutingBehaviour( bootstrap: bootstrap, include: include, probe: probe, + explore: explore, ready: make(chan struct{}, 1), logger: logger.With("behaviour", "routing"), tracer: tracer, @@ -112,7 +127,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventGetCloserNodesSuccess: span.SetAttributes(attribute.String("event", "EventGetCloserNodesSuccess"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) switch ev.QueryID { - case "bootstrap": + case routing.BootstrapQueryID: for _, info := range ev.CloserNodes { // TODO: do this after advancing bootstrap r.pending = append(r.pending, &EventAddNode{ @@ -129,7 +144,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { r.pending = append(r.pending, next) } - case "include": + case IncludeQueryID: var cmd routing.IncludeEvent // require that the node responded with at least one closer node if len(ev.CloserNodes) > 0 { @@ -148,7 +163,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { r.pending = append(r.pending, next) } - case "probe": + case ProbeQueryID: var cmd routing.ProbeEvent // require that the node responded with at least one closer node if len(ev.CloserNodes) > 0 { @@ -167,6 +182,21 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { r.pending = append(r.pending, next) } + case routing.ExploreQueryID: + for _, info := range ev.CloserNodes { + r.pending = append(r.pending, &EventAddNode{ + NodeID: info, + }) + } + cmd := &routing.EventExploreFindCloserResponse[kadt.Key, kadt.PeerID]{ + NodeID: ev.To, + CloserNodes: ev.CloserNodes, + } + next, ok := r.advanceExplore(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + default: panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) } @@ -174,7 +204,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { span.SetAttributes(attribute.String("event", "EventGetCloserNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) span.RecordError(ev.Err) switch ev.QueryID { - case "bootstrap": + case routing.BootstrapQueryID: cmd := &routing.EventBootstrapFindCloserFailure[kadt.Key, kadt.PeerID]{ NodeID: ev.To, Error: ev.Err, @@ -184,7 +214,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { if ok { r.pending = append(r.pending, next) } - case "include": + case IncludeQueryID: cmd := &routing.EventIncludeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ NodeID: ev.To, Error: ev.Err, @@ -194,7 +224,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { if ok { r.pending = append(r.pending, next) } - case "probe": + case ProbeQueryID: cmd := &routing.EventProbeConnectivityCheckFailure[kadt.Key, kadt.PeerID]{ NodeID: ev.To, Error: ev.Err, @@ -204,6 +234,16 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { if ok { r.pending = append(r.pending, next) } + case routing.ExploreQueryID: + cmd := &routing.EventExploreFindCloserFailure[kadt.Key, kadt.PeerID]{ + NodeID: ev.To, + Error: ev.Err, + } + // attempt to advance the explore + next, ok := r.advanceExplore(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } default: panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) @@ -236,12 +276,14 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { // tell the probe state machine to remove the node from the routing table and probe list cmdProbe := &routing.EventProbeRemove[kadt.Key, kadt.PeerID]{ - NodeID: kadt.PeerID(ev.NodeID), + NodeID: ev.NodeID, } nextProbe, ok := r.advanceProbe(ctx, cmdProbe) if ok { r.pending = append(r.pending, nextProbe) } + case *EventRoutingPoll: + r.pollChildren(ctx) default: panic(fmt.Sprintf("unexpected dht event: %T", ev)) @@ -283,21 +325,7 @@ func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { } // poll the child state machines in priority order to give each an opportunity to perform work - - ev, ok := r.advanceBootstrap(ctx, &routing.EventBootstrapPoll{}) - if ok { - return ev, true - } - - ev, ok = r.advanceInclude(ctx, &routing.EventIncludePoll{}) - if ok { - return ev, true - } - - ev, ok = r.advanceProbe(ctx, &routing.EventProbePoll{}) - if ok { - return ev, true - } + r.pollChildren(ctx) // finally check if any pending events were accumulated in the meantime if len(r.pending) == 0 { @@ -306,6 +334,29 @@ func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { } } +// pollChildren must only be called while r.pendingMu is locked +func (r *RoutingBehaviour) pollChildren(ctx context.Context) { + ev, ok := r.advanceBootstrap(ctx, &routing.EventBootstrapPoll{}) + if ok { + r.pending = append(r.pending, ev) + } + + ev, ok = r.advanceInclude(ctx, &routing.EventIncludePoll{}) + if ok { + r.pending = append(r.pending, ev) + } + + ev, ok = r.advanceProbe(ctx, &routing.EventProbePoll{}) + if ok { + r.pending = append(r.pending, ev) + } + + ev, ok = r.advanceExplore(ctx, &routing.EventExplorePoll{}) + if ok { + r.pending = append(r.pending, ev) + } +} + func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (BehaviourEvent, bool) { ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceBootstrap") defer span.End() @@ -314,7 +365,7 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot case *routing.StateBootstrapFindCloser[kadt.Key, kadt.PeerID]: return &EventOutboundGetCloserNodes{ - QueryID: "bootstrap", + QueryID: routing.BootstrapQueryID, To: st.NodeID, Target: st.Target, Notify: r, @@ -345,7 +396,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ span.SetAttributes(attribute.String("out_event", "EventOutboundGetCloserNodes")) // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ - QueryID: "include", + QueryID: IncludeQueryID, To: st.NodeID, Target: st.NodeID.Key(), Notify: r, @@ -387,7 +438,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve case *routing.StateProbeConnectivityCheck[kadt.Key, kadt.PeerID]: // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ - QueryID: "probe", + QueryID: ProbeQueryID, To: st.NodeID, Target: st.NodeID.Key(), Notify: r, @@ -419,3 +470,34 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve return nil, false } + +func (r *RoutingBehaviour) advanceExplore(ctx context.Context, ev routing.ExploreEvent) (BehaviourEvent, bool) { + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceExplore") + defer span.End() + bstate := r.explore.Advance(ctx, ev) + switch st := bstate.(type) { + + case *routing.StateExploreFindCloser[kadt.Key, kadt.PeerID]: + return &EventOutboundGetCloserNodes{ + QueryID: routing.ExploreQueryID, + To: st.NodeID, + Target: st.Target, + Notify: r, + }, true + + case *routing.StateExploreWaiting: + // explore waiting for a message response, nothing to do + case *routing.StateExploreQueryFinished: + // nothing to do except notify via telemetry + case *routing.StateExploreQueryTimeout: + // nothing to do except notify via telemetry + case *routing.StateExploreFailure: + r.logger.Warn("explore failure", "cpl", st.Cpl, "error", st.Error) + case *routing.StateExploreIdle: + // bootstrap not running, nothing to do + default: + panic(fmt.Sprintf("unexpected explore state: %T", st)) + } + + return nil, false +} diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index 8580fbc1..9b098f0b 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -16,6 +16,9 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) +// BootstrapQueryID is the id for the query operated by the bootstrap process +const BootstrapQueryID = coordt.QueryID("bootstrap") + type Bootstrap[K kad.Key[K], N kad.NodeID[K]] struct { // self is the node id of the system the bootstrap is running on self N @@ -107,9 +110,7 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst qryCfg.Concurrency = b.cfg.RequestConcurrency qryCfg.RequestTimeout = b.cfg.RequestTimeout - queryID := coordt.QueryID("bootstrap") - - qry, err := query.NewFindCloserQuery[K, N, any](b.self, queryID, b.self.Key(), iter, tev.KnownClosestNodes, qryCfg) + qry, err := query.NewFindCloserQuery[K, N, any](b.self, BootstrapQueryID, b.self.Key(), iter, tev.KnownClosestNodes, qryCfg) if err != nil { // TODO: don't panic panic(err) diff --git a/v2/internal/coord/routing/explore.go b/v2/internal/coord/routing/explore.go new file mode 100644 index 00000000..954dd6aa --- /dev/null +++ b/v2/internal/coord/routing/explore.go @@ -0,0 +1,485 @@ +package routing + +import ( + "container/heap" + "context" + "fmt" + "math/rand" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/kad" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// ExploreQueryID is the id for the query operated by the explore process +const ExploreQueryID = coordt.QueryID("explore") + +// The Explore state machine is used to discover new nodes at various distances from the local node in order to improve +// the occupancy of routing table buckets. +// +// For each bucket a random key is generated that would occupy the bucket and a query initiated to find the nodes close +// to it. Dicovered nodes are added to the candidate queue for inclusion in the routing table. During the course of the +// query discovered nodes may result in being included in buckets other than the one being processed. +// +// The explore operation processes buckets in order of distance from the local node and waits for query completion +// before proceeding to the next bucket. +// +// The frequency of running an explore varies by bucket distance, such that closer buckets are processed more frequently. +type Explore[K kad.Key[K], N kad.NodeID[K]] struct { + // self is the node id of the system the explore is running on + self N + + // rt is the local routing table + rt RoutingTableCpl[K, N] + + cplFn NodeIDForCplFunc[K, N] + + // qry is the query used by the explore process + qry *query.Query[K, N, any] + + // qryCpl is the cpl the current query is exploring for + qryCpl int + + // cfg is a copy of the optional configuration supplied to the Explore + cfg ExploreConfig + + schedule ExploreSchedule +} + +// NodeIDForCplFunc is a function that given a cpl generates a [kad.NodeID] with a key that has +// a common prefix length with k of length cpl. +// Invariant: CommonPrefixLength(k, node.Key()) = cpl +type NodeIDForCplFunc[K kad.Key[K], N kad.NodeID[K]] func(k K, cpl int) (N, error) + +// An ExploreSchedule provides an ordering for explorations of each cpl in a routing table. +type ExploreSchedule interface { + // NextCpl returns the first cpl to be explored whose due time is before or equal to the given time. + // The due time of the cpl should be updated by its designated interval so that its next due time is increased. + // If no cpl is due at the given time NextCpl should return -1, false + NextCpl(ts time.Time) (int, bool) +} + +// ExploreConfig specifies optional configuration for an [Explore] +type ExploreConfig struct { + // Clock is a clock that may replaced by a mock when testing + Clock clock.Clock + + // Timeout is maximum time to allow for performing an explore for a CPL. + Timeout time.Duration +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *ExploreConfig) Validate() error { + if cfg.Clock == nil { + return fmt.Errorf("clock must not be nil") + } + + if cfg.Timeout < 1 { + return fmt.Errorf("timeout must be greater than zero") + } + + return nil +} + +// DefaultExploreConfig returns the default configuration options for an [Explore]. +// Options may be overridden before passing to [NewExplore]. +func DefaultExploreConfig() *ExploreConfig { + return &ExploreConfig{ + Clock: clock.New(), // use standard time + Timeout: 10 * time.Minute, // MAGIC + } +} + +func NewExplore[K kad.Key[K], N kad.NodeID[K]](self N, rt RoutingTableCpl[K, N], cplFn NodeIDForCplFunc[K, N], schedule ExploreSchedule, cfg *ExploreConfig) (*Explore[K, N], error) { + if cfg == nil { + cfg = DefaultExploreConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + e := &Explore[K, N]{ + self: self, + cplFn: cplFn, + rt: rt, + cfg: *cfg, + qryCpl: -1, + schedule: schedule, + } + + return e, nil +} + +// Advance advances the state of the explore by attempting to advance its query if running. +func (e *Explore[K, N]) Advance(ctx context.Context, ev ExploreEvent) ExploreState { + ctx, span := tele.StartSpan(ctx, "Explore.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer span.End() + + switch tev := ev.(type) { + case *EventExplorePoll: + // ignore, nothing to do + case *EventExploreFindCloserResponse[K, N]: + return e.advanceQuery(ctx, &query.EventQueryNodeResponse[K, N]{ + NodeID: tev.NodeID, + CloserNodes: tev.CloserNodes, + }) + case *EventExploreFindCloserFailure[K, N]: + span.RecordError(tev.Error) + return e.advanceQuery(ctx, &query.EventQueryNodeFailure[K, N]{ + NodeID: tev.NodeID, + Error: tev.Error, + }) + default: + panic(fmt.Sprintf("unexpected event: %T", tev)) + } + + // if query is running, give it a chance to advance + if e.qry != nil { + return e.advanceQuery(ctx, &query.EventQueryPoll{}) + } + + // is an explore due yet? + cpl, ok := e.schedule.NextCpl(e.cfg.Clock.Now()) + if !ok { + return &StateExploreIdle{} + } + + // start an explore query by synthesizing a node whose key has the appropriate cpl + node, err := e.cplFn(e.self.Key(), cpl) + if err != nil { + return &StateExploreFailure{ + Cpl: cpl, + Error: fmt.Errorf("synthesize random node for cpl %d: %w", cpl, err), + } + } + seeds := e.rt.NearestNodes(node.Key(), 20) + + iter := query.NewClosestNodesIter[K, N](e.self.Key()) + + qryCfg := query.DefaultQueryConfig() + qryCfg.Clock = e.cfg.Clock + // qryCfg.Concurrency = b.cfg.RequestConcurrency + // qryCfg.RequestTimeout = b.cfg.RequestTimeout + + qry, err := query.NewFindCloserQuery[K, N, any](e.self, ExploreQueryID, node.Key(), iter, seeds, qryCfg) + if err != nil { + return &StateExploreFailure{ + Cpl: cpl, + Error: fmt.Errorf("start explore query for cpl %d: %w", cpl, err), + } + } + e.qry = qry + e.qryCpl = cpl + + return e.advanceQuery(ctx, &query.EventQueryPoll{}) +} + +func (e *Explore[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) ExploreState { + ctx, span := tele.StartSpan(ctx, "Explore.advanceQuery") + defer span.End() + state := e.qry.Advance(ctx, qev) + switch st := state.(type) { + case *query.StateQueryFindCloser[K, N]: + return &StateExploreFindCloser[K, N]{ + Cpl: e.qryCpl, + QueryID: st.QueryID, + Stats: st.Stats, + NodeID: st.NodeID, + Target: st.Target, + } + case *query.StateQueryFinished[K, N]: + span.SetAttributes(attribute.String("out_state", "StateExploreFinished")) + e.qry = nil + e.qryCpl = -1 + return &StateExploreQueryFinished{ + Cpl: e.qryCpl, + Stats: st.Stats, + } + case *query.StateQueryWaitingAtCapacity: + elapsed := e.cfg.Clock.Since(st.Stats.Start) + if elapsed > e.cfg.Timeout { + span.SetAttributes(attribute.String("out_state", "StateExploreTimeout")) + e.qry = nil + e.qryCpl = -1 + return &StateExploreQueryTimeout{ + Cpl: e.qryCpl, + Stats: st.Stats, + } + } + span.SetAttributes(attribute.String("out_state", "StateExploreWaiting")) + return &StateExploreWaiting{ + Cpl: e.qryCpl, + Stats: st.Stats, + } + case *query.StateQueryWaitingWithCapacity: + elapsed := e.cfg.Clock.Since(st.Stats.Start) + if elapsed > e.cfg.Timeout { + span.SetAttributes(attribute.String("out_state", "StateExploreTimeout")) + e.qry = nil + e.qryCpl = -1 + return &StateExploreQueryTimeout{ + Cpl: e.qryCpl, + Stats: st.Stats, + } + } + span.SetAttributes(attribute.String("out_state", "StateExploreWaiting")) + return &StateExploreWaiting{ + Cpl: e.qryCpl, + Stats: st.Stats, + } + default: + panic(fmt.Sprintf("unexpected state: %T", st)) + } +} + +// ExploreState is the state of an [Explore]. +type ExploreState interface { + exploreState() +} + +// StateExploreIdle indicates that the explore is not running its query. +type StateExploreIdle struct{} + +// StateExploreFindCloser indicates that the explore query wants to send a find closer nodes message to a node. +type StateExploreFindCloser[K kad.Key[K], N kad.NodeID[K]] struct { + Cpl int // the cpl being explored + QueryID coordt.QueryID + Target K // the key that the query wants to find closer nodes for + NodeID N // the node to send the message to + Stats query.QueryStats +} + +// StateExploreWaiting indicates that the explore query is waiting for a response. +type StateExploreWaiting struct { + Cpl int // the cpl being explored + Stats query.QueryStats +} + +// StateExploreQueryFinished indicates that an explore query has finished. +type StateExploreQueryFinished struct { + Cpl int // the cpl being explored + Stats query.QueryStats +} + +// StateExploreQueryTimeout indicates that an explore query has timed out. +type StateExploreQueryTimeout struct { + Cpl int // the cpl being explored + Stats query.QueryStats +} + +// StateExploreFailure indicates that the explore state machine encountered a failure condition when +// attempting to explore a cpl. +type StateExploreFailure struct { + Cpl int // the cpl being explored + Error error +} + +// exploreState() ensures that only [Explore] states can be assigned to an [ExploreState]. +func (*StateExploreIdle) exploreState() {} +func (*StateExploreFindCloser[K, N]) exploreState() {} +func (*StateExploreWaiting) exploreState() {} +func (*StateExploreQueryFinished) exploreState() {} +func (*StateExploreQueryTimeout) exploreState() {} +func (*StateExploreFailure) exploreState() {} + +// ExploreEvent is an event intended to advance the state of an [Explore]. +type ExploreEvent interface { + exploreEvent() +} + +// EventExplorePoll is an event that signals the explore that it can perform housekeeping work such as time out queries. +type EventExplorePoll struct{} + +// EventExploreFindCloserResponse notifies a explore that an attempt to find closer nodes has received a successful response. +type EventExploreFindCloserResponse[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + CloserNodes []N // the closer nodes sent by the node +} + +// EventExploreFindCloserFailure notifies a explore that an attempt to find closer nodes has failed. +type EventExploreFindCloserFailure[K kad.Key[K], N kad.NodeID[K]] struct { + NodeID N // the node the message was sent to + Error error // the error that caused the failure, if any +} + +// exploreState() ensures that only [Explore] events can be assigned to an [ExploreEvent]. +func (*EventExplorePoll) exploreEvent() {} +func (*EventExploreFindCloserResponse[K, N]) exploreEvent() {} +func (*EventExploreFindCloserFailure[K, N]) exploreEvent() {} + +type exploreEntry struct { + Cpl int // the longest common prefix length shared with the routing table's key + Due time.Time // the time at which the next explore operation for this cpl is due +} + +// exploreList is a min-heap of exploreEntry ordered by Due +type exploreList []*exploreEntry + +func (l exploreList) Len() int { return len(l) } + +func (l exploreList) Less(i, j int) bool { + // if due times are equal, then sort lower cpls first + if l[i].Due.Equal(l[j].Due) { + return l[i].Cpl < l[j].Cpl + } + + return l[i].Due.Before(l[j].Due) +} + +func (l exploreList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l *exploreList) Push(x any) { + v := x.(*exploreEntry) + *l = append(*l, v) +} + +func (l *exploreList) Pop() any { + if len(*l) == 0 { + return nil + } + old := *l + n := len(old) + v := old[n-1] + old[n-1] = nil + *l = old[0 : n-1] + return v +} + +// A DynamicExploreSchedule calculates an explore schedule dynamically +type DynamicExploreSchedule struct { + // maxCpl is the maximum CPL (common prefix length) that will be scheduled. + maxCpl int + + // interval is the minimum time interval to leave between explorations of the same CPL. + interval time.Duration + + // multiplier is a factor that is applied to interval for CPLs lower than the maximum + multiplier float64 + + // jitter is a factor that is used to increase the calculated interval for the next explore + // operation by a small random amount. + jitter float64 + + // cpls is a list of cpls ordered by the time the next explore is due + cpls *exploreList +} + +// NewDynamicExploreSchedule creates a new dynamic explore schedule. +// +// maxCpl is the maximum CPL (common prefix length) that will be scheduled. +// interval is the base time interval to leave between explorations of the same CPL. +// multiplier is a factor that is applied to interval for CPLs lower than the maximum to increase the interval between +// explorations for lower CPLs (which contain nodes that are more distant). +// jitter is a factor that is used to increase the calculated interval for the next explore +// operation by a small random amount. It must be between 0 and 0.05. When zero, no jitter is applied. +// +// The interval to the next explore is calculated using the following formula: +// +// interval + (maxCpl - CPL) x interval x multiplier + interval * rand(jitter) +// +// For example, given an a max CPL of 14, an interval of 1 hour, a multiplier of 1 and 0 jitter the following +// schedule will be created: +// +// CPL 14 explored every hour +// CPL 13 explored every two hours +// CPL 12 explored every three hours +// ... +// CPL 0 explored every 14 hours +// +// For example, given an a max CPL of 14, an interval of 1 hour, a multiplier of 1.5 and 0.01 jitter the following +// schedule will be created: +// +// CPL 14 explored every 1 hour + up to 6 minutes random jitter +// CPL 13 explored every 2.5 hours + up to 6 minutes random jitter +// CPL 12 explored every 4 hours + up to 6 minutes random jitter +// ... +// CPL 0 explored every 22 hours + up to 6 minutes random jitter +func NewDynamicExploreSchedule(maxCpl int, start time.Time, interval time.Duration, multiplier float64, jitter float64) (*DynamicExploreSchedule, error) { + if maxCpl < 1 { + return nil, fmt.Errorf("maximum cpl must be greater than zero") + } + + if interval < 1 { + return nil, fmt.Errorf("interval must be greater than zero") + } + + if multiplier < 1 { + return nil, fmt.Errorf("interval multiplier must be greater than or equal to one") + } + + if jitter < 0 { + return nil, fmt.Errorf("interval jitter must not be negative") + } + + if jitter > 0.05 { + return nil, fmt.Errorf("interval jitter must not be greater than 0.05") + } + + s := &DynamicExploreSchedule{ + maxCpl: maxCpl, + interval: interval, + multiplier: multiplier, + jitter: jitter, + cpls: new(exploreList), + } + + // build the initial schedule + for cpl := maxCpl; cpl >= 0; cpl-- { + *s.cpls = append(*s.cpls, &exploreEntry{ + Cpl: cpl, + Due: start.Add(s.cplInterval(cpl)), + }) + } + heap.Init(s.cpls) + + return s, nil +} + +func (s *DynamicExploreSchedule) NextCpl(ts time.Time) (int, bool) { + // is an explore due yet? + next := (*s.cpls)[0] + if next.Due.After(ts) { + return -1, false + } + // update its schedule + next.Due = ts.Add(s.cplInterval(next.Cpl)) + heap.Fix(s.cpls, 0) // update the heap + return next.Cpl, true +} + +// cplInterval calculates the explore interval for a given cpl +func (s *DynamicExploreSchedule) cplInterval(cpl int) time.Duration { + interval := float64(s.interval) + interval += float64(s.interval) * float64(s.maxCpl-cpl) * s.multiplier + interval += float64(s.interval) * s.jitter * rand.Float64() + return time.Duration(interval) +} + +// A NoWaitExploreSchedule implements an explore schedule that cycles through each cpl without delays +type NoWaitExploreSchedule struct { + maxCpl int + nextCpl int +} + +func NewNoWaitExploreSchedule(maxCpl int) *NoWaitExploreSchedule { + return &NoWaitExploreSchedule{ + maxCpl: maxCpl, + nextCpl: maxCpl, + } +} + +func (n *NoWaitExploreSchedule) NextCpl(ts time.Time) (int, bool) { + next := n.nextCpl + n.nextCpl-- + if n.nextCpl < 0 { + n.nextCpl = n.maxCpl + } + return next, true +} diff --git a/v2/internal/coord/routing/explore_test.go b/v2/internal/coord/routing/explore_test.go new file mode 100644 index 00000000..f2a6903d --- /dev/null +++ b/v2/internal/coord/routing/explore_test.go @@ -0,0 +1,387 @@ +package routing + +import ( + "context" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" +) + +func TestExploreConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultExploreConfig() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultExploreConfig() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("timeout positive", func(t *testing.T) { + cfg := DefaultExploreConfig() + cfg.Timeout = 0 + require.Error(t, cfg.Validate()) + cfg.Timeout = -1 + require.Error(t, cfg.Validate()) + }) +} + +// maxCpl is 7 since we are using tiny 8-bit keys +const maxCplTinyKeys = 7 + +func DefaultDynamicSchedule(t *testing.T, clk clock.Clock) *DynamicExploreSchedule { + t.Helper() + s, err := NewDynamicExploreSchedule(maxCplTinyKeys, clk.Now(), time.Hour, 1, 0) + require.NoError(t, err) + return s +} + +func TestDynamicExploreSchedule(t *testing.T) { + testCases := []struct { + interval time.Duration + multiplier float64 + }{ + { + interval: time.Hour, + multiplier: 1, + }, + { + interval: time.Hour, + multiplier: 1.1, + }, + { + interval: time.Hour, + multiplier: 2, + }, + } + + // test invariants + for _, tc := range testCases { + clk := clock.NewMock() + maxCpl := 20 + + s, err := NewDynamicExploreSchedule(maxCpl, clk.Now(), tc.interval, tc.multiplier, 0) + require.NoError(t, err) + + intervals := make([]time.Duration, 0, maxCpl+1) + cpl := maxCpl + for cpl >= 0 { + intervals = append(intervals, s.cplInterval(cpl)) + cpl-- + } + + // higher cpls must have a shorter interval than lower cpls + assert.IsIncreasing(t, intervals) + + // intervals increase by at least one base interval for each cpl + // interval for cpl[x-1] is at twice the interval of than cpl[x] + // and cpl[x-2] is at least three times larger than cpl[x] + for i := 1; i < len(intervals); i++ { + assert.GreaterOrEqual(t, intervals[i], intervals[0]*time.Duration(i+1)) + } + + } +} + +func TestExploreStartsIdle(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultExploreConfig() + cfg.Clock = clk + + self := tiny.NewNode(128) + rt := simplert.New[tiny.Key, tiny.Node](self, 5) + schedule := DefaultDynamicSchedule(t, clk) + ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) + require.NoError(t, err) + + state := ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreIdle{}, state) +} + +func TestExploreFirstQueriesForMaximumCpl(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultExploreConfig() + cfg.Clock = clk + + self := tiny.NewNode(128) + rt := simplert.New[tiny.Key, tiny.Node](self, 5) + + // populate the routing table with at least one node + a := tiny.NewNode(4) + rt.AddNode(a) + + schedule := DefaultDynamicSchedule(t, clk) + ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) + require.NoError(t, err) + + state := ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreIdle{}, state) + + // advance the clock to the due time of the first explore that should be started + clk.Add(schedule.cplInterval(schedule.maxCpl)) + + // explore should now start the explore query + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should attempt to contact the node it was given + st := state.(*StateExploreFindCloser[tiny.Key, tiny.Node]) + + // the query should have the correct ID + require.Equal(t, ExploreQueryID, st.QueryID) + + // with the correct cpl + require.Equal(t, schedule.maxCpl, st.Cpl) + + // the query should attempt to look for nodes near a key with the maximum cpl + require.Equal(t, schedule.maxCpl, self.Key().CommonPrefixLength(st.Target)) + + // the query should be contacting the nearest known node + require.Equal(t, a, st.NodeID) + + // now the explore reports that it is waiting + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreWaiting{}, state) +} + +func TestExploreFindCloserResponse(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultExploreConfig() + cfg.Clock = clk + + self := tiny.NewNode(128) + rt := simplert.New[tiny.Key, tiny.Node](self, 5) + + // populate the routing table with at least one node + a := tiny.NewNode(4) + rt.AddNode(a) + + start := clk.Now() + + schedule := DefaultDynamicSchedule(t, clk) + ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) + require.NoError(t, err) + + state := ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreIdle{}, state) + + // advance the clock to the due time of the first explore that should be started + interval1 := schedule.cplInterval(schedule.maxCpl) + clk.Set(start.Add(interval1)) + + // explore should now start the explore query + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + + // now the explore reports that it is waiting + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreWaiting{}, state) + + // notify explore that node was contacted successfully, but no closer nodes + state = ex.Advance(ctx, &EventExploreFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + require.IsType(t, &StateExploreQueryFinished{}, state) +} + +func TestExploreFindCloserFailure(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultExploreConfig() + cfg.Clock = clk + + self := tiny.NewNode(128) + rt := simplert.New[tiny.Key, tiny.Node](self, 5) + + // populate the routing table with at least one node + a := tiny.NewNode(4) + rt.AddNode(a) + + start := clk.Now() + + schedule := DefaultDynamicSchedule(t, clk) + ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) + require.NoError(t, err) + + state := ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreIdle{}, state) + + // advance the clock to the due time of the first explore that should be started + interval1 := schedule.cplInterval(schedule.maxCpl) + clk.Set(start.Add(interval1)) + + // explore should now start the explore query + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + + // now the explore reports that it is waiting + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreWaiting{}, state) + + // notify explore that node was not + state = ex.Advance(ctx, &EventExploreFindCloserFailure[tiny.Key, tiny.Node]{ + NodeID: a, + }) + require.IsType(t, &StateExploreQueryFinished{}, state) +} + +func TestExploreProgress(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultExploreConfig() + cfg.Clock = clk + + self := tiny.NewNode(128) + rt := simplert.New[tiny.Key, tiny.Node](self, 5) + + a := tiny.NewNode(4) // 4 + b := tiny.NewNode(8) // 8 + c := tiny.NewNode(16) // 16 + + // ensure the order of the known nodes + require.True(t, self.Key().Xor(a.Key()).Compare(self.Key().Xor(b.Key())) == -1) + require.True(t, self.Key().Xor(b.Key()).Compare(self.Key().Xor(c.Key())) == -1) + + // populate the routing table with at least one node + rt.AddNode(a) + + start := clk.Now() + + schedule := DefaultDynamicSchedule(t, clk) + ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) + require.NoError(t, err) + + state := ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreIdle{}, state) + + // advance the clock to the due time of the first explore that should be started + interval1 := schedule.cplInterval(schedule.maxCpl) + clk.Set(start.Add(interval1)) + + // explore should now start the explore query + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should attempt to contact the node it was given + st := state.(*StateExploreFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, a, st.NodeID) + + // now the explore reports that it is waiting + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreWaiting{}, state) + + // notify explore that node was contacted successfully, with a closer node + state = ex.Advance(ctx, &EventExploreFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + CloserNodes: []tiny.Node{b}, + }) + + // explore tries to contact nearer node + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + + // notify explore that node was contacted successfully, with a closer node + state = ex.Advance(ctx, &EventExploreFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{c}, + }) + + // explore tries to contact nearer node + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + + // the query should attempt to contact the node it was given + st = state.(*StateExploreFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, c, st.NodeID) + + // notify explore that node was contacted successfully, but no closer nodes + state = ex.Advance(ctx, &EventExploreFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: c, + }) + require.IsType(t, &StateExploreQueryFinished{}, state) +} + +func TestExploreQueriesNextHighestCpl(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + + cfg := DefaultExploreConfig() + cfg.Clock = clk + + self := tiny.NewNode(128) + rt := simplert.New[tiny.Key, tiny.Node](self, 5) + + // populate the routing table with at least one node + a := tiny.NewNode(4) + rt.AddNode(a) + + start := clk.Now() + + schedule := DefaultDynamicSchedule(t, clk) + ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) + require.NoError(t, err) + + state := ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreIdle{}, state) + + // advance the clock to the due time of the first explore that should be started + interval1 := schedule.cplInterval(schedule.maxCpl) + clk.Set(start.Add(interval1)) + + // explore should now start the explore query + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + st := state.(*StateExploreFindCloser[tiny.Key, tiny.Node]) + + // the query should have the correct ID + require.Equal(t, ExploreQueryID, st.QueryID) + + // with the correct cpl + require.Equal(t, schedule.maxCpl, st.Cpl) + + // the query should attempt to look for nodes near a key with the maximum cpl + require.Equal(t, schedule.maxCpl, self.Key().CommonPrefixLength(st.Target)) + + // the query should be contacting the nearest known node + require.Equal(t, a, st.NodeID) + + // now the explore reports that it is waiting + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreWaiting{}, state) + + // notify explore that node was contacted successfully, but no closer nodes + state = ex.Advance(ctx, &EventExploreFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + require.IsType(t, &StateExploreQueryFinished{}, state) + + // advance the clock to the due time of the second cpl explore that should be started + interval2 := schedule.cplInterval(schedule.maxCpl - 1) + clk.Set(start.Add(interval2)) + + // explore should now start another explore query + state = ex.Advance(ctx, &EventExplorePoll{}) + require.IsType(t, &StateExploreFindCloser[tiny.Key, tiny.Node]{}, state) + st = state.(*StateExploreFindCloser[tiny.Key, tiny.Node]) + + // with the correct cpl + require.Equal(t, schedule.maxCpl-1, st.Cpl) + + // the query should attempt to look for nodes near a key with the maximum cpl + require.Equal(t, schedule.maxCpl-1, self.Key().CommonPrefixLength(st.Target)) + + // the query should be contacting the nearest known node + require.Equal(t, a, st.NodeID) +} diff --git a/v2/internal/coord/routing/probe.go b/v2/internal/coord/routing/probe.go index 248d450b..ec686946 100644 --- a/v2/internal/coord/routing/probe.go +++ b/v2/internal/coord/routing/probe.go @@ -38,9 +38,9 @@ type RoutingTableCpl[K kad.Key[K], N kad.NodeID[K]] interface { // returns at least one node in the list of closer nodes. The state machine emits the [StateProbeConnectivityCheck] // state when it wants to check the status of a node. // -// The state machine expects to be notified either with the [EventProbeMessageResponse] or the -// [EventProbeMessageFailure] events to determine the outcome of the check. If neither are received within a -// configurable timeout the node is marked as failed. +// The state machine expects to be notified either with the [EventProbeConnectivityCheckSuccess] or the +// [EventProbeConnectivityCheckSuccessFailure] events to determine the outcome of the check. If neither are received +// within a configurable timeout the node is marked as failed. // // Nodes that receive a successful response have their next check time updated to the current time plus the configured // [ProbeConfig.CheckInterval]. diff --git a/v2/internal/coord/routing_test.go b/v2/internal/coord/routing_test.go index 545680da..dbc7b456 100644 --- a/v2/internal/coord/routing_test.go +++ b/v2/internal/coord/routing_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/cplutil" "go.opentelemetry.io/otel" @@ -20,6 +21,26 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) +// idleBootstrap returns a bootstrap state machine that is always idle +func idleBootstrap() *RecordingSM[routing.BootstrapEvent, routing.BootstrapState] { + return NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) +} + +// idleInclude returns an include state machine that is always idle +func idleInclude() *RecordingSM[routing.IncludeEvent, routing.IncludeState] { + return NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) +} + +// idleProbe returns a probe state machine that is always idle +func idleProbe() *RecordingSM[routing.ProbeEvent, routing.ProbeState] { + return NewRecordingSM[routing.ProbeEvent, routing.ProbeState](&routing.StateProbeIdle{}) +} + +// idleExplore returns an explore state machine that is always idle +func idleExplore() *RecordingSM[routing.ExploreEvent, routing.ExploreState] { + return NewRecordingSM[routing.ExploreEvent, routing.ExploreState](&routing.StateExploreIdle{}) +} + func TestRoutingStartBootstrapSendsEvent(t *testing.T) { ctx := kadtest.CtxShort(t) @@ -31,10 +52,8 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) - probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) ev := &EventStartBootstrap{ SeedNodes: []kadt.PeerID{nodes[1].NodeID}, @@ -60,13 +79,11 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) - probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) ev := &EventGetCloserNodesSuccess{ - QueryID: coordt.QueryID("bootstrap"), + QueryID: routing.BootstrapQueryID, To: nodes[1].NodeID, Target: nodes[0].NodeID.Key(), CloserNodes: []kadt.PeerID{nodes[2].NodeID}, @@ -93,14 +110,12 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) - probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ - QueryID: coordt.QueryID("bootstrap"), + QueryID: routing.BootstrapQueryID, To: nodes[1].NodeID, Target: nodes[0].NodeID.Key(), Err: failure, @@ -128,10 +143,7 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) - bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) - probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) ev := &EventAddNode{ NodeID: nodes[2].NodeID, @@ -158,10 +170,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) - bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) - probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) ev := &EventGetCloserNodesSuccess{ QueryID: coordt.QueryID("include"), @@ -191,10 +200,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) - bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) - probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ @@ -235,10 +241,7 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { probe, err := routing.NewProbe[kadt.Key](rt, probeCfg) require.NoError(t, err) - // ensure bootstrap is always idle - bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, probe, idleExplore(), slog.Default(), otel.Tracer("test")) // a new node to be included candidate := nodes[len(nodes)-1].NodeID @@ -294,3 +297,102 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { require.Equal(t, coordt.QueryID("probe"), oev.QueryID) require.Equal(t, candidate, oev.To) } + +func TestRoutingExploreSendsEvent(t *testing.T) { + ctx := kadtest.CtxShort(t) + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeID + rt := nodes[0].RoutingTable + + exploreCfg := routing.DefaultExploreConfig() + exploreCfg.Clock = clk + + // make sure the explore starts as soon as the explore state machine is polled + schedule := routing.NewNoWaitExploreSchedule(14) + + explore, err := routing.NewExplore[kadt.Key](self, rt, cplutil.GenRandPeerID, schedule, exploreCfg) + require.NoError(t, err) + + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, slog.Default(), otel.Tracer("test")) + + routingBehaviour.Notify(ctx, &EventRoutingPoll{}) + + // collect the result of the notify + dev, ok := routingBehaviour.Perform(ctx) + require.True(t, ok) + + // include should be asking to send a message to the node + require.IsType(t, &EventOutboundGetCloserNodes{}, dev) + gcl := dev.(*EventOutboundGetCloserNodes) + + require.Equal(t, routing.ExploreQueryID, gcl.QueryID) + + // the message should be looking for nodes closer to a key that occupies cpl 14 + require.Equal(t, 14, self.Key().CommonPrefixLength(gcl.Target)) +} + +func TestRoutingExploreGetClosestNodesSuccess(t *testing.T) { + ctx := kadtest.CtxShort(t) + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeID + + // records the event passed to explore + explore := NewRecordingSM[routing.ExploreEvent, routing.ExploreState](&routing.StateExploreIdle{}) + + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, slog.Default(), otel.Tracer("test")) + + ev := &EventGetCloserNodesSuccess{ + QueryID: routing.ExploreQueryID, + To: nodes[1].NodeID, + Target: nodes[0].NodeID.Key(), + CloserNodes: []kadt.PeerID{nodes[2].NodeID}, + } + routingBehaviour.Notify(ctx, ev) + + // explore should receive message response event + require.IsType(t, &routing.EventExploreFindCloserResponse[kadt.Key, kadt.PeerID]{}, explore.Received) + + rev := explore.Received.(*routing.EventExploreFindCloserResponse[kadt.Key, kadt.PeerID]) + require.True(t, nodes[1].NodeID.Equal(rev.NodeID)) + require.Equal(t, ev.CloserNodes, rev.CloserNodes) +} + +func TestRoutingExploreGetClosestNodesFailure(t *testing.T) { + ctx := kadtest.CtxShort(t) + + clk := clock.NewMock() + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) + + self := nodes[0].NodeID + + // records the event passed to explore + explore := NewRecordingSM[routing.ExploreEvent, routing.ExploreState](&routing.StateExploreIdle{}) + + routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, slog.Default(), otel.Tracer("test")) + + failure := errors.New("failed") + ev := &EventGetCloserNodesFailure{ + QueryID: routing.ExploreQueryID, + To: nodes[1].NodeID, + Target: nodes[0].NodeID.Key(), + Err: failure, + } + + routingBehaviour.Notify(ctx, ev) + + // bootstrap should receive message response event + require.IsType(t, &routing.EventExploreFindCloserFailure[kadt.Key, kadt.PeerID]{}, explore.Received) + + rev := explore.Received.(*routing.EventExploreFindCloserFailure[kadt.Key, kadt.PeerID]) + require.Equal(t, peer.ID(nodes[1].NodeID), peer.ID(rev.NodeID)) + require.Equal(t, failure, rev.Error) +} From 09dd7b0b2f507652c7429e3d92fb3e7eab23004e Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 25 Sep 2023 14:46:14 +0100 Subject: [PATCH 54/64] Expose behaviour and state machine configs (#937) * Expose behaviour and state machine configs * Adjust test * Revert adjust test --- v2/dht.go | 8 +- v2/errs/errors.go | 22 ++ v2/internal/coord/coordinator.go | 155 +++------ v2/internal/coord/coordinator_test.go | 39 +-- v2/internal/coord/network.go | 3 +- v2/internal/coord/query.go | 127 ++++++- v2/internal/coord/query_test.go | 70 ++++ v2/internal/coord/routing.go | 345 +++++++++++++++++++- v2/internal/coord/routing/bootstrap.go | 32 +- v2/internal/coord/routing/bootstrap_test.go | 18 +- v2/internal/coord/routing/explore.go | 38 ++- v2/internal/coord/routing/explore_test.go | 16 + v2/internal/coord/routing/include.go | 10 +- v2/internal/coord/routing/probe.go | 10 +- v2/internal/coord/routing_test.go | 226 ++++++++++++- v2/tele/log.go | 11 + v2/tele/tele.go | 5 + 17 files changed, 893 insertions(+), 242 deletions(-) create mode 100644 v2/errs/errors.go create mode 100644 v2/internal/coord/query_test.go create mode 100644 v2/tele/log.go diff --git a/v2/dht.go b/v2/dht.go index 1dbcfecc..b9ec9993 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -106,10 +106,10 @@ func New(h host.Host, cfg *Config) (*DHT, error) { // instantiate a new Kademlia DHT coordinator. coordCfg := coord.DefaultCoordinatorConfig() - coordCfg.QueryConcurrency = cfg.Query.Concurrency - coordCfg.QueryTimeout = cfg.Query.Timeout - coordCfg.RequestConcurrency = cfg.Query.RequestConcurrency - coordCfg.RequestTimeout = cfg.Query.RequestTimeout + coordCfg.Query.Concurrency = cfg.Query.Concurrency + coordCfg.Query.Timeout = cfg.Query.Timeout + coordCfg.Query.RequestConcurrency = cfg.Query.RequestConcurrency + coordCfg.Query.RequestTimeout = cfg.Query.RequestTimeout coordCfg.Clock = cfg.Clock coordCfg.MeterProvider = cfg.MeterProvider coordCfg.TracerProvider = cfg.TracerProvider diff --git a/v2/errs/errors.go b/v2/errs/errors.go new file mode 100644 index 00000000..7ad9f474 --- /dev/null +++ b/v2/errs/errors.go @@ -0,0 +1,22 @@ +package errs + +import "fmt" + +// A ConfigurationError is returned when a component's configuration is found to be invalid or unusable. +type ConfigurationError struct { + Component string + Err error +} + +var _ error = (*ConfigurationError)(nil) + +func (e *ConfigurationError) Error() string { + if e.Err == nil { + return fmt.Sprintf("configuration error: %s", e.Component) + } + return fmt.Sprintf("configuration error: %s: %s", e.Component, e.Err.Error()) +} + +func (e *ConfigurationError) Unwrap() error { + return e.Err +} diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index d5838929..afe712d0 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -7,26 +7,22 @@ import ( "reflect" "sync" "sync/atomic" - "time" "github.com/benbjohnson/clock" - logging "github.com/ipfs/go-log/v2" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/kaderr" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/brdcst" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/cplutil" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT @@ -81,72 +77,50 @@ type RoutingNotifier interface { } type CoordinatorConfig struct { - Clock clock.Clock // a clock that may replaced by a mock when testing + // Clock is a clock that may replaced by a mock when testing + Clock clock.Clock - QueryConcurrency int // the maximum number of queries that may be waiting for message responses at any one time - QueryTimeout time.Duration // the time to wait before terminating a query that is not making progress + // Logger is a structured logger that will be used when logging. + Logger *slog.Logger - RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight - RequestTimeout time.Duration // the timeout queries should use for contacting a single node + // MeterProvider is the the meter provider to use when initialising metric instruments. + MeterProvider metric.MeterProvider - Logger *slog.Logger // a structured logger that should be used when logging. + // TracerProvider is the tracer provider to use when initialising tracing + TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider // the meter provider to use when initialising metric instruments - TracerProvider trace.TracerProvider // the tracer provider to use when initialising tracing + // Routing is the configuration used for the [RoutingBehaviour] which maintains the health of the routing table. + Routing RoutingConfig + + // Query is the configuration used for the [PooledQueryBehaviour] which manages the execution of user queries. + Query PooledQueryConfig } // Validate checks the configuration options and returns an error if any have invalid values. func (cfg *CoordinatorConfig) Validate() error { if cfg.Clock == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "CoordinatorConfig", Err: fmt.Errorf("clock must not be nil"), } } - if cfg.QueryConcurrency < 1 { - return &kaderr.ConfigurationError{ - Component: "CoordinatorConfig", - Err: fmt.Errorf("query concurrency must be greater than zero"), - } - } - if cfg.QueryTimeout < 1 { - return &kaderr.ConfigurationError{ - Component: "CoordinatorConfig", - Err: fmt.Errorf("query timeout must be greater than zero"), - } - } - - if cfg.RequestConcurrency < 1 { - return &kaderr.ConfigurationError{ - Component: "CoordinatorConfig", - Err: fmt.Errorf("request concurrency must be greater than zero"), - } - } - - if cfg.RequestTimeout < 1 { - return &kaderr.ConfigurationError{ - Component: "CoordinatorConfig", - Err: fmt.Errorf("request timeout must be greater than zero"), - } - } - if cfg.Logger == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "CoordinatorConfig", Err: fmt.Errorf("logger must not be nil"), } } if cfg.MeterProvider == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "CoordinatorConfig", Err: fmt.Errorf("meter provider must not be nil"), } } if cfg.TracerProvider == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "CoordinatorConfig", Err: fmt.Errorf("tracer provider must not be nil"), } @@ -156,16 +130,25 @@ func (cfg *CoordinatorConfig) Validate() error { } func DefaultCoordinatorConfig() *CoordinatorConfig { - return &CoordinatorConfig{ - Clock: clock.New(), - QueryConcurrency: 3, - QueryTimeout: 5 * time.Minute, - RequestConcurrency: 3, - RequestTimeout: time.Minute, - Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), - MeterProvider: otel.GetMeterProvider(), - TracerProvider: otel.GetTracerProvider(), + cfg := &CoordinatorConfig{ + Clock: clock.New(), + + Logger: tele.DefaultLogger("coord"), + MeterProvider: otel.GetMeterProvider(), + TracerProvider: otel.GetTracerProvider(), } + + cfg.Query = *DefaultPooledQueryConfig() + cfg.Query.Clock = cfg.Clock + cfg.Query.Logger = cfg.Logger.With("behaviour", "pooledquery") + cfg.Query.Tracer = cfg.TracerProvider.Tracer(tele.TracerName) + + cfg.Routing = *DefaultRoutingConfig() + cfg.Routing.Clock = cfg.Clock + cfg.Routing.Logger = cfg.Logger.With("behaviour", "routing") + cfg.Routing.Tracer = cfg.TracerProvider.Tracer(tele.TracerName) + + return cfg } func NewCoordinator(self kadt.PeerID, rtr coordt.Router[kadt.Key, kadt.PeerID, *pb.Message], rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *CoordinatorConfig) (*Coordinator, error) { @@ -181,72 +164,16 @@ func NewCoordinator(self kadt.PeerID, rtr coordt.Router[kadt.Key, kadt.PeerID, * return nil, fmt.Errorf("init telemetry: %w", err) } - qpCfg := query.DefaultPoolConfig() - qpCfg.Clock = cfg.Clock - qpCfg.Concurrency = cfg.QueryConcurrency - qpCfg.Timeout = cfg.QueryTimeout - qpCfg.QueryConcurrency = cfg.RequestConcurrency - qpCfg.RequestTimeout = cfg.RequestTimeout - - qp, err := query.NewPool[kadt.Key, kadt.PeerID, *pb.Message](self, qpCfg) - if err != nil { - return nil, fmt.Errorf("query pool: %w", err) - } - queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger, tele.Tracer) - - bootstrapCfg := routing.DefaultBootstrapConfig[kadt.Key]() - bootstrapCfg.Clock = cfg.Clock - bootstrapCfg.Timeout = cfg.QueryTimeout - bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency - bootstrapCfg.RequestTimeout = cfg.RequestTimeout - - bootstrap, err := routing.NewBootstrap(self, bootstrapCfg) + queryBehaviour, err := NewPooledQueryBehaviour(self, &cfg.Query) if err != nil { - return nil, fmt.Errorf("bootstrap: %w", err) + return nil, fmt.Errorf("query behaviour: %w", err) } - includeCfg := routing.DefaultIncludeConfig() - includeCfg.Clock = cfg.Clock - includeCfg.Timeout = cfg.QueryTimeout - - // TODO: expose config - // includeCfg.QueueCapacity = cfg.IncludeQueueCapacity - // includeCfg.Concurrency = cfg.IncludeConcurrency - // includeCfg.Timeout = cfg.IncludeTimeout - - include, err := routing.NewInclude[kadt.Key, kadt.PeerID](rt, includeCfg) + routingBehaviour, err := NewRoutingBehaviour(self, rt, &cfg.Routing) if err != nil { - return nil, fmt.Errorf("include: %w", err) + return nil, fmt.Errorf("routing behaviour: %w", err) } - probeCfg := routing.DefaultProbeConfig() - probeCfg.Clock = cfg.Clock - probeCfg.Timeout = cfg.QueryTimeout - - // TODO: expose config - // probeCfg.Concurrency = cfg.ProbeConcurrency - probe, err := routing.NewProbe[kadt.Key](rt, probeCfg) - if err != nil { - return nil, fmt.Errorf("probe: %w", err) - } - - exploreCfg := routing.DefaultExploreConfig() - exploreCfg.Clock = cfg.Clock - exploreCfg.Timeout = cfg.QueryTimeout - - schedule, err := routing.NewDynamicExploreSchedule(14, cfg.Clock.Now(), time.Hour, 1, 0) - if err != nil { - return nil, fmt.Errorf("explore schedule: %w", err) - } - - // TODO: expose more config - explore, err := routing.NewExplore[kadt.Key](self, rt, cplutil.GenRandPeerID, schedule, exploreCfg) - if err != nil { - return nil, fmt.Errorf("explore: %w", err) - } - - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, explore, cfg.Logger, tele.Tracer) - networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, tele.Tracer) b, err := brdcst.NewPool[kadt.Key, kadt.PeerID, *pb.Message](self, nil) diff --git a/v2/internal/coord/coordinator_test.go b/v2/internal/coord/coordinator_test.go index 716fe1e6..917b60ea 100644 --- a/v2/internal/coord/coordinator_test.go +++ b/v2/internal/coord/coordinator_test.go @@ -5,11 +5,10 @@ import ( "log" "testing" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" - "github.com/benbjohnson/clock" "github.com/stretchr/testify/require" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -30,42 +29,6 @@ func TestConfigValidate(t *testing.T) { require.Error(t, cfg.Validate()) }) - t.Run("query concurrency positive", func(t *testing.T) { - cfg := DefaultCoordinatorConfig() - - cfg.QueryConcurrency = 0 - require.Error(t, cfg.Validate()) - cfg.QueryConcurrency = -1 - require.Error(t, cfg.Validate()) - }) - - t.Run("query timeout positive", func(t *testing.T) { - cfg := DefaultCoordinatorConfig() - - cfg.QueryTimeout = 0 - require.Error(t, cfg.Validate()) - cfg.QueryTimeout = -1 - require.Error(t, cfg.Validate()) - }) - - t.Run("request concurrency positive", func(t *testing.T) { - cfg := DefaultCoordinatorConfig() - - cfg.RequestConcurrency = 0 - require.Error(t, cfg.Validate()) - cfg.RequestConcurrency = -1 - require.Error(t, cfg.Validate()) - }) - - t.Run("request timeout positive", func(t *testing.T) { - cfg := DefaultCoordinatorConfig() - - cfg.RequestTimeout = 0 - require.Error(t, cfg.Validate()) - cfg.RequestTimeout = -1 - require.Error(t, cfg.Validate()) - }) - t.Run("logger not nil", func(t *testing.T) { cfg := DefaultCoordinatorConfig() diff --git a/v2/internal/coord/network.go b/v2/internal/coord/network.go index 487a2506..7d9d8374 100644 --- a/v2/internal/coord/network.go +++ b/v2/internal/coord/network.go @@ -5,12 +5,11 @@ import ( "fmt" "sync" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" - "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) diff --git a/v2/internal/coord/query.go b/v2/internal/coord/query.go index 5d1df302..68ca59d5 100644 --- a/v2/internal/coord/query.go +++ b/v2/internal/coord/query.go @@ -4,10 +4,13 @@ import ( "context" "fmt" "sync" + "time" + "github.com/benbjohnson/clock" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -15,31 +18,135 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) +type PooledQueryConfig struct { + // Clock is a clock that may replaced by a mock when testing + Clock clock.Clock + + // Logger is a structured logger that will be used when logging. + Logger *slog.Logger + + // Tracer is the tracer that should be used to trace execution. + Tracer trace.Tracer + + // Concurrency is the maximum number of queries that may be waiting for message responses at any one time. + Concurrency int + + // Timeout the time to wait before terminating a query that is not making progress. + Timeout time.Duration + + // RequestConcurrency is the maximum number of concurrent requests that each query may have in flight. + RequestConcurrency int + + // RequestTimeout is the timeout queries should use for contacting a single node + RequestTimeout time.Duration +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *PooledQueryConfig) Validate() error { + if cfg.Clock == nil { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.Logger == nil { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("logger must not be nil"), + } + } + + if cfg.Tracer == nil { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("tracer must not be nil"), + } + } + + if cfg.Concurrency < 1 { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("query concurrency must be greater than zero"), + } + } + if cfg.Timeout < 1 { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("query timeout must be greater than zero"), + } + } + + if cfg.RequestConcurrency < 1 { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("request concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &errs.ConfigurationError{ + Component: "PooledQueryConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + + return nil +} + +func DefaultPooledQueryConfig() *PooledQueryConfig { + return &PooledQueryConfig{ + Clock: clock.New(), + Logger: tele.DefaultLogger("coord"), + Tracer: tele.NoopTracer(), + Concurrency: 3, // MAGIC + Timeout: 5 * time.Minute, // MAGIC + RequestConcurrency: 3, // MAGIC + RequestTimeout: time.Minute, // MAGIC + + } +} + type PooledQueryBehaviour struct { + cfg PooledQueryConfig pool *query.Pool[kadt.Key, kadt.PeerID, *pb.Message] waiters map[coordt.QueryID]NotifyCloser[BehaviourEvent] pendingMu sync.Mutex pending []BehaviourEvent ready chan struct{} - - logger *slog.Logger - tracer trace.Tracer } -func NewPooledQueryBehaviour(pool *query.Pool[kadt.Key, kadt.PeerID, *pb.Message], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { +func NewPooledQueryBehaviour(self kadt.PeerID, cfg *PooledQueryConfig) (*PooledQueryBehaviour, error) { + if cfg == nil { + cfg = DefaultPooledQueryConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + qpCfg := query.DefaultPoolConfig() + qpCfg.Clock = cfg.Clock + qpCfg.Concurrency = cfg.Concurrency + qpCfg.Timeout = cfg.Timeout + qpCfg.QueryConcurrency = cfg.RequestConcurrency + qpCfg.RequestTimeout = cfg.RequestTimeout + + pool, err := query.NewPool[kadt.Key, kadt.PeerID, *pb.Message](self, qpCfg) + if err != nil { + return nil, fmt.Errorf("query pool: %w", err) + } + h := &PooledQueryBehaviour{ + cfg: *cfg, pool: pool, waiters: make(map[coordt.QueryID]NotifyCloser[BehaviourEvent]), ready: make(chan struct{}, 1), - logger: logger.With("behaviour", "query"), - tracer: tracer, } - return h + return h, err } func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.Notify") + ctx, span := p.cfg.Tracer.Start(ctx, "PooledQueryBehaviour.Notify") defer span.End() p.pendingMu.Lock() @@ -155,7 +262,7 @@ func (p *PooledQueryBehaviour) Ready() <-chan struct{} { } func (p *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { - ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.Perform") + ctx, span := p.cfg.Tracer.Start(ctx, "PooledQueryBehaviour.Perform") defer span.End() // No inbound work can be done until Perform is complete @@ -190,7 +297,7 @@ func (p *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, boo } func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (out BehaviourEvent, term bool) { - ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.advancePool", trace.WithAttributes(tele.AttrInEvent(ev))) + ctx, span := p.cfg.Tracer.Start(ctx, "PooledQueryBehaviour.advancePool", trace.WithAttributes(tele.AttrInEvent(ev))) defer func() { span.SetAttributes(tele.AttrOutEvent(out)) span.End() diff --git a/v2/internal/coord/query_test.go b/v2/internal/coord/query_test.go new file mode 100644 index 00000000..74222a77 --- /dev/null +++ b/v2/internal/coord/query_test.go @@ -0,0 +1,70 @@ +package coord + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPooledQueryConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("logger not nil", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + cfg.Logger = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("tracer not nil", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + cfg.Tracer = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("query concurrency positive", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + + cfg.Concurrency = 0 + require.Error(t, cfg.Validate()) + cfg.Concurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("query timeout positive", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + + cfg.Timeout = 0 + require.Error(t, cfg.Validate()) + cfg.Timeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + + cfg.RequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.RequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultPooledQueryConfig() + + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) +} diff --git a/v2/internal/coord/routing.go b/v2/internal/coord/routing.go index 955b63cb..5e33e316 100644 --- a/v2/internal/coord/routing.go +++ b/v2/internal/coord/routing.go @@ -4,14 +4,19 @@ import ( "context" "fmt" "sync" + "time" + "github.com/benbjohnson/clock" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/cplutil" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) const ( @@ -24,11 +29,255 @@ const ( ProbeQueryID = coordt.QueryID("probe") ) +type RoutingConfig struct { + // Clock is a clock that may replaced by a mock when testing + Clock clock.Clock + + // Logger is a structured logger that will be used when logging. + Logger *slog.Logger + + // Tracer is the tracer that should be used to trace execution. + Tracer trace.Tracer + + // BootstrapTimeout is the time the behaviour should wait before terminating a bootstrap if it is not making progress. + BootstrapTimeout time.Duration + + // BootstrapRequestConcurrency is the maximum number of concurrent requests that the behaviour may have in flight during bootstrap. + BootstrapRequestConcurrency int + + // BootstrapRequestTimeout is the timeout the behaviour should use when attempting to contact a node during bootstrap. + BootstrapRequestTimeout time.Duration + + // ConnectivityCheckTimeout is the timeout the behaviour should use when performing a connectivity check. + ConnectivityCheckTimeout time.Duration + + // ProbeRequestConcurrency is the maximum number of concurrent requests that the behaviour may have in flight while performing + // connectivity checks for nodes in the routing table. + ProbeRequestConcurrency int + + // ProbeCheckInterval is the time interval the behaviour should use between connectivity checks for the same node in the routing table. + ProbeCheckInterval time.Duration + + // IncludeQueueCapacity is the maximum number of nodes the behaviour should keep queued as candidates for inclusion in the routing table. + IncludeQueueCapacity int + + // IncludeRequestConcurrency is the maximum number of concurrent requests that the behaviour may have in flight while performing + // connectivity checks for nodes in the inclusion candidate queue. + IncludeRequestConcurrency int + + // ExploreTimeout is the time the behaviour should wait before terminating an exploration of a routing table bucket if it is not making progress. + ExploreTimeout time.Duration + + // ExploreRequestConcurrency is the maximum number of concurrent requests that the behaviour may have in flight while exploring the + // network to increase routing table occupancy. + ExploreRequestConcurrency int + + // ExploreRequestTimeout is the timeout the behaviour should use when attempting to contact a node while exploring the + // network to increase routing table occupancy. + ExploreRequestTimeout time.Duration + + // ExploreMaximumCpl is the maximum CPL (common prefix length) the behaviour should explore to increase routing table occupancy. + // All CPLs from this value to zero will be explored on a repeating schedule. + ExploreMaximumCpl int + + // ExploreInterval is the base time interval the behaviour should leave between explorations of the same CPL. + // See the documentation for [routing.DynamicExploreSchedule] for the precise formula used to calculate explore intervals. + ExploreInterval time.Duration + + // ExploreIntervalMultiplier is a factor that is applied to the base time interval for CPLs lower than the maximum to increase the delay between + // explorations for lower CPLs. + // See the documentation for [routing.DynamicExploreSchedule] for the precise formula used to calculate explore intervals. + ExploreIntervalMultiplier float64 + + // ExploreIntervalJitter is a factor that is used to increase the calculated interval for an exploratiion by a small random amount. + // It must be between 0 and 0.05. When zero, no jitter is applied. + // See the documentation for [routing.DynamicExploreSchedule] for the precise formula used to calculate explore intervals. + ExploreIntervalJitter float64 +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *RoutingConfig) Validate() error { + if cfg.Clock == nil { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.Logger == nil { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("logger must not be nil"), + } + } + + if cfg.Tracer == nil { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("tracer must not be nil"), + } + } + + if cfg.BootstrapTimeout < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("bootstrap timeout must be greater than zero"), + } + } + + if cfg.BootstrapRequestConcurrency < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("bootstrap request concurrency must be greater than zero"), + } + } + + if cfg.BootstrapRequestTimeout < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("bootstrap request timeout must be greater than zero"), + } + } + + if cfg.ConnectivityCheckTimeout < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("connectivity check timeout must be greater than zero"), + } + } + + if cfg.ProbeRequestConcurrency < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("probe request concurrency must be greater than zero"), + } + } + + if cfg.ProbeCheckInterval < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("probe check interval must be greater than zero"), + } + } + + if cfg.IncludeQueueCapacity < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("include queue capacity must be greater than zero"), + } + } + + if cfg.IncludeRequestConcurrency < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("include request concurrency must be greater than zero"), + } + } + + if cfg.ExploreTimeout < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore timeout must be greater than zero"), + } + } + + if cfg.ExploreRequestConcurrency < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore request concurrency must be greater than zero"), + } + } + + if cfg.ExploreRequestTimeout < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore request timeout must be greater than zero"), + } + } + + if cfg.ExploreMaximumCpl < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore maximum cpl must be greater than zero"), + } + } + + // This limit exists because we can only generate 15 bit prefixes [cplutil.GenRandPeerID]. + if cfg.ExploreMaximumCpl > 15 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore maximum cpl must be 15 or less"), + } + } + + if cfg.ExploreInterval < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore interval must be greater than zero"), + } + } + + if cfg.ExploreIntervalMultiplier < 1 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore interval multiplier must be one or greater"), + } + } + + if cfg.ExploreIntervalJitter < 0 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore interval jitter must be greater than 0"), + } + } + + if cfg.ExploreIntervalJitter > 0.05 { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("explore interval jitter must be 0.05 or less"), + } + } + + return nil +} + +func DefaultRoutingConfig() *RoutingConfig { + return &RoutingConfig{ + Clock: clock.New(), + Logger: tele.DefaultLogger("coord"), + Tracer: tele.NoopTracer(), + + BootstrapTimeout: 5 * time.Minute, // MAGIC + BootstrapRequestConcurrency: 3, // MAGIC + BootstrapRequestTimeout: time.Minute, // MAGIC + + ConnectivityCheckTimeout: time.Minute, // MAGIC + + ProbeRequestConcurrency: 3, // MAGIC + ProbeCheckInterval: 6 * time.Hour, // MAGIC + + IncludeRequestConcurrency: 3, // MAGIC + IncludeQueueCapacity: 128, // MAGIC + + ExploreTimeout: 5 * time.Minute, // MAGIC + ExploreRequestConcurrency: 3, // MAGIC + ExploreRequestTimeout: time.Minute, // MAGIC + ExploreMaximumCpl: 14, + ExploreInterval: time.Hour, // MAGIC + ExploreIntervalMultiplier: 1, // MAGIC + ExploreIntervalJitter: 0, // MAGIC + + } +} + // A RoutingBehaviour provides the behaviours for bootstrapping and maintaining a DHT's routing table. type RoutingBehaviour struct { // self is the peer id of the system the dht is running on self kadt.PeerID + // cfg is a copy of the optional configuration supplied to the behaviour + cfg RoutingConfig + // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table bootstrap coordt.StateMachine[routing.BootstrapEvent, routing.BootstrapState] @@ -44,35 +293,97 @@ type RoutingBehaviour struct { pendingMu sync.Mutex pending []BehaviourEvent ready chan struct{} +} + +func NewRoutingBehaviour(self kadt.PeerID, rt routing.RoutingTableCpl[kadt.Key, kadt.PeerID], cfg *RoutingConfig) (*RoutingBehaviour, error) { + if cfg == nil { + cfg = DefaultRoutingConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + bootstrapCfg := routing.DefaultBootstrapConfig() + bootstrapCfg.Clock = cfg.Clock + bootstrapCfg.Timeout = cfg.BootstrapTimeout + bootstrapCfg.RequestConcurrency = cfg.BootstrapRequestConcurrency + bootstrapCfg.RequestTimeout = cfg.BootstrapRequestTimeout + + bootstrap, err := routing.NewBootstrap[kadt.Key](self, bootstrapCfg) + if err != nil { + return nil, fmt.Errorf("bootstrap: %w", err) + } - logger *slog.Logger - tracer trace.Tracer + includeCfg := routing.DefaultIncludeConfig() + includeCfg.Clock = cfg.Clock + includeCfg.Timeout = cfg.ConnectivityCheckTimeout + includeCfg.QueueCapacity = cfg.IncludeQueueCapacity + includeCfg.Concurrency = cfg.IncludeRequestConcurrency + + include, err := routing.NewInclude[kadt.Key, kadt.PeerID](rt, includeCfg) + if err != nil { + return nil, fmt.Errorf("include: %w", err) + } + + probeCfg := routing.DefaultProbeConfig() + probeCfg.Clock = cfg.Clock + probeCfg.Timeout = cfg.ConnectivityCheckTimeout + probeCfg.Concurrency = cfg.ProbeRequestConcurrency + probeCfg.CheckInterval = cfg.ProbeCheckInterval + + probe, err := routing.NewProbe[kadt.Key](rt, probeCfg) + if err != nil { + return nil, fmt.Errorf("probe: %w", err) + } + + exploreCfg := routing.DefaultExploreConfig() + exploreCfg.Clock = cfg.Clock + exploreCfg.Timeout = cfg.ExploreTimeout + exploreCfg.RequestConcurrency = cfg.ExploreRequestConcurrency + exploreCfg.RequestTimeout = cfg.ExploreRequestTimeout + + schedule, err := routing.NewDynamicExploreSchedule(cfg.ExploreMaximumCpl, cfg.Clock.Now(), cfg.ExploreInterval, cfg.ExploreIntervalMultiplier, cfg.ExploreIntervalJitter) + if err != nil { + return nil, fmt.Errorf("explore schedule: %w", err) + } + + explore, err := routing.NewExplore[kadt.Key](self, rt, cplutil.GenRandPeerID, schedule, exploreCfg) + if err != nil { + return nil, fmt.Errorf("explore: %w", err) + } + + return ComposeRoutingBehaviour(self, bootstrap, include, probe, explore, cfg) } -func NewRoutingBehaviour( +// ComposeRoutingBehaviour creates a [RoutingBehaviour] composed of the supplied state machines. +// The state machines are assumed to pre-configured so any [RoutingConfig] values relating to the state machines will not be applied. +func ComposeRoutingBehaviour( self kadt.PeerID, bootstrap coordt.StateMachine[routing.BootstrapEvent, routing.BootstrapState], include coordt.StateMachine[routing.IncludeEvent, routing.IncludeState], probe coordt.StateMachine[routing.ProbeEvent, routing.ProbeState], explore coordt.StateMachine[routing.ExploreEvent, routing.ExploreState], - logger *slog.Logger, - tracer trace.Tracer, -) *RoutingBehaviour { + cfg *RoutingConfig, +) (*RoutingBehaviour, error) { + if cfg == nil { + cfg = DefaultRoutingConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + r := &RoutingBehaviour{ self: self, + cfg: *cfg, bootstrap: bootstrap, include: include, probe: probe, explore: explore, ready: make(chan struct{}, 1), - logger: logger.With("behaviour", "routing"), - tracer: tracer, } - return r + return r, nil } func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.Notify") + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.Notify") defer span.End() r.pendingMu.Lock() @@ -82,7 +393,7 @@ func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { // notify must only be called while r.pendingMu is held func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.notify", trace.WithAttributes(attribute.String("event", fmt.Sprintf("%T", ev)))) + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.notify", trace.WithAttributes(attribute.String("event", fmt.Sprintf("%T", ev)))) defer span.End() switch ev := ev.(type) { @@ -302,7 +613,7 @@ func (r *RoutingBehaviour) Ready() <-chan struct{} { } func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.Perform") + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.Perform") defer span.End() // No inbound work can be done until Perform is complete @@ -358,7 +669,7 @@ func (r *RoutingBehaviour) pollChildren(ctx context.Context) { } func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (BehaviourEvent, bool) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceBootstrap") + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.advanceBootstrap") defer span.End() bstate := r.bootstrap.Advance(ctx, ev) switch st := bstate.(type) { @@ -387,7 +698,7 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot } func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (BehaviourEvent, bool) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceInclude") + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.advanceInclude") defer span.End() istate := r.include.Advance(ctx, ev) @@ -431,7 +742,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ } func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (BehaviourEvent, bool) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceProbe") + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.advanceProbe") defer span.End() st := r.probe.Advance(ctx, ev) switch st := st.(type) { @@ -472,7 +783,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve } func (r *RoutingBehaviour) advanceExplore(ctx context.Context, ev routing.ExploreEvent) (BehaviourEvent, bool) { - ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceExplore") + ctx, span := r.cfg.Tracer.Start(ctx, "RoutingBehaviour.advanceExplore") defer span.End() bstate := r.explore.Advance(ctx, ev) switch st := bstate.(type) { @@ -492,7 +803,7 @@ func (r *RoutingBehaviour) advanceExplore(ctx context.Context, ev routing.Explor case *routing.StateExploreQueryTimeout: // nothing to do except notify via telemetry case *routing.StateExploreFailure: - r.logger.Warn("explore failure", "cpl", st.Cpl, "error", st.Error) + r.cfg.Logger.Warn("explore failure", "cpl", st.Cpl, "error", st.Error) case *routing.StateExploreIdle: // bootstrap not running, nothing to do default: diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index 9b098f0b..ab234593 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -7,10 +7,10 @@ import ( "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/kaderr" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" @@ -27,11 +27,11 @@ type Bootstrap[K kad.Key[K], N kad.NodeID[K]] struct { qry *query.Query[K, N, any] // cfg is a copy of the optional configuration supplied to the Bootstrap - cfg BootstrapConfig[K] + cfg BootstrapConfig } // BootstrapConfig specifies optional configuration for a Bootstrap -type BootstrapConfig[K kad.Key[K]] struct { +type BootstrapConfig struct { Timeout time.Duration // the time to wait before terminating a query that is not making progress RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight RequestTimeout time.Duration // the timeout queries should use for contacting a single node @@ -39,30 +39,30 @@ type BootstrapConfig[K kad.Key[K]] struct { } // Validate checks the configuration options and returns an error if any have invalid values. -func (cfg *BootstrapConfig[K]) Validate() error { +func (cfg *BootstrapConfig) Validate() error { if cfg.Clock == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "BootstrapConfig", Err: fmt.Errorf("clock must not be nil"), } } if cfg.Timeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "BootstrapConfig", Err: fmt.Errorf("timeout must be greater than zero"), } } if cfg.RequestConcurrency < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "BootstrapConfig", Err: fmt.Errorf("request concurrency must be greater than zero"), } } if cfg.RequestTimeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "BootstrapConfig", Err: fmt.Errorf("request timeout must be greater than zero"), } @@ -73,18 +73,18 @@ func (cfg *BootstrapConfig[K]) Validate() error { // DefaultBootstrapConfig returns the default configuration options for a Bootstrap. // Options may be overridden before passing to NewBootstrap -func DefaultBootstrapConfig[K kad.Key[K]]() *BootstrapConfig[K] { - return &BootstrapConfig[K]{ - Clock: clock.New(), // use standard time - Timeout: 5 * time.Minute, - RequestConcurrency: 3, - RequestTimeout: time.Minute, +func DefaultBootstrapConfig() *BootstrapConfig { + return &BootstrapConfig{ + Clock: clock.New(), // use standard time + Timeout: 5 * time.Minute, // MAGIC + RequestConcurrency: 3, // MAGIC + RequestTimeout: time.Minute, // MAGIC } } -func NewBootstrap[K kad.Key[K], N kad.NodeID[K]](self N, cfg *BootstrapConfig[K]) (*Bootstrap[K, N], error) { +func NewBootstrap[K kad.Key[K], N kad.NodeID[K]](self N, cfg *BootstrapConfig) (*Bootstrap[K, N], error) { if cfg == nil { - cfg = DefaultBootstrapConfig[K]() + cfg = DefaultBootstrapConfig() } else if err := cfg.Validate(); err != nil { return nil, err } diff --git a/v2/internal/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go index 29123980..1dd6611e 100644 --- a/v2/internal/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -14,18 +14,18 @@ import ( func TestBootstrapConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() require.NoError(t, cfg.Validate()) }) t.Run("clock is not nil", func(t *testing.T) { - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.Clock = nil require.Error(t, cfg.Validate()) }) t.Run("timeout positive", func(t *testing.T) { - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.Timeout = 0 require.Error(t, cfg.Validate()) cfg.Timeout = -1 @@ -33,7 +33,7 @@ func TestBootstrapConfigValidate(t *testing.T) { }) t.Run("request concurrency positive", func(t *testing.T) { - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.RequestConcurrency = 0 require.Error(t, cfg.Validate()) cfg.RequestConcurrency = -1 @@ -41,7 +41,7 @@ func TestBootstrapConfigValidate(t *testing.T) { }) t.Run("request timeout positive", func(t *testing.T) { - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.RequestTimeout = 0 require.Error(t, cfg.Validate()) cfg.RequestTimeout = -1 @@ -52,7 +52,7 @@ func TestBootstrapConfigValidate(t *testing.T) { func TestBootstrapStartsIdle(t *testing.T) { ctx := context.Background() clk := clock.NewMock() - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.Clock = clk self := tiny.NewNode(0) @@ -66,7 +66,7 @@ func TestBootstrapStartsIdle(t *testing.T) { func TestBootstrapStart(t *testing.T) { ctx := context.Background() clk := clock.NewMock() - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.Clock = clk self := tiny.NewNode(0) @@ -101,7 +101,7 @@ func TestBootstrapStart(t *testing.T) { func TestBootstrapMessageResponse(t *testing.T) { ctx := context.Background() clk := clock.NewMock() - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.Clock = clk self := tiny.NewNode(0) @@ -137,7 +137,7 @@ func TestBootstrapMessageResponse(t *testing.T) { func TestBootstrapProgress(t *testing.T) { ctx := context.Background() clk := clock.NewMock() - cfg := DefaultBootstrapConfig[tiny.Key]() + cfg := DefaultBootstrapConfig() cfg.Clock = clk cfg.RequestConcurrency = 3 // 1 less than the 4 nodes to be visited diff --git a/v2/internal/coord/routing/explore.go b/v2/internal/coord/routing/explore.go index 954dd6aa..41a42ae1 100644 --- a/v2/internal/coord/routing/explore.go +++ b/v2/internal/coord/routing/explore.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/query" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" @@ -72,16 +73,39 @@ type ExploreConfig struct { // Timeout is maximum time to allow for performing an explore for a CPL. Timeout time.Duration + + RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight + RequestTimeout time.Duration // the timeout queries should use for contacting a single node } // Validate checks the configuration options and returns an error if any have invalid values. func (cfg *ExploreConfig) Validate() error { if cfg.Clock == nil { - return fmt.Errorf("clock must not be nil") + return &errs.ConfigurationError{ + Component: "ExploreConfig", + Err: fmt.Errorf("clock must not be nil"), + } } if cfg.Timeout < 1 { - return fmt.Errorf("timeout must be greater than zero") + return &errs.ConfigurationError{ + Component: "ExploreConfig", + Err: fmt.Errorf("timeout must be greater than zero"), + } + } + + if cfg.RequestConcurrency < 1 { + return &errs.ConfigurationError{ + Component: "ExploreConfig", + Err: fmt.Errorf("request concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &errs.ConfigurationError{ + Component: "ExploreConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } } return nil @@ -91,8 +115,10 @@ func (cfg *ExploreConfig) Validate() error { // Options may be overridden before passing to [NewExplore]. func DefaultExploreConfig() *ExploreConfig { return &ExploreConfig{ - Clock: clock.New(), // use standard time - Timeout: 10 * time.Minute, // MAGIC + Clock: clock.New(), // use standard time + Timeout: 10 * time.Minute, // MAGIC + RequestConcurrency: 3, // MAGIC + RequestTimeout: time.Minute, // MAGIC } } @@ -163,8 +189,8 @@ func (e *Explore[K, N]) Advance(ctx context.Context, ev ExploreEvent) ExploreSta qryCfg := query.DefaultQueryConfig() qryCfg.Clock = e.cfg.Clock - // qryCfg.Concurrency = b.cfg.RequestConcurrency - // qryCfg.RequestTimeout = b.cfg.RequestTimeout + qryCfg.Concurrency = e.cfg.RequestConcurrency + qryCfg.RequestTimeout = e.cfg.RequestTimeout qry, err := query.NewFindCloserQuery[K, N, any](e.self, ExploreQueryID, node.Key(), iter, seeds, qryCfg) if err != nil { diff --git a/v2/internal/coord/routing/explore_test.go b/v2/internal/coord/routing/explore_test.go index f2a6903d..6be7a4fd 100644 --- a/v2/internal/coord/routing/explore_test.go +++ b/v2/internal/coord/routing/explore_test.go @@ -32,6 +32,22 @@ func TestExploreConfigValidate(t *testing.T) { cfg.Timeout = -1 require.Error(t, cfg.Validate()) }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultExploreConfig() + cfg.RequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.RequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultExploreConfig() + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) } // maxCpl is 7 since we are using tiny 8-bit keys diff --git a/v2/internal/coord/routing/include.go b/v2/internal/coord/routing/include.go index 2ec3c5bd..4cc53f9e 100644 --- a/v2/internal/coord/routing/include.go +++ b/v2/internal/coord/routing/include.go @@ -7,10 +7,10 @@ import ( "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -42,28 +42,28 @@ type IncludeConfig struct { // Validate checks the configuration options and returns an error if any have invalid values. func (cfg *IncludeConfig) Validate() error { if cfg.Clock == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "IncludeConfig", Err: fmt.Errorf("clock must not be nil"), } } if cfg.Concurrency < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "IncludeConfig", Err: fmt.Errorf("concurrency must be greater than zero"), } } if cfg.Timeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "IncludeConfig", Err: fmt.Errorf("timeout must be greater than zero"), } } if cfg.QueueCapacity < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "IncludeConfig", Err: fmt.Errorf("queue size must be greater than zero"), } diff --git a/v2/internal/coord/routing/probe.go b/v2/internal/coord/routing/probe.go index ec686946..37856ee2 100644 --- a/v2/internal/coord/routing/probe.go +++ b/v2/internal/coord/routing/probe.go @@ -9,11 +9,11 @@ import ( "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -78,28 +78,28 @@ type ProbeConfig struct { // Validate checks the configuration options and returns an error if any have invalid values. func (cfg *ProbeConfig) Validate() error { if cfg.Clock == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "ProbeConfig", Err: fmt.Errorf("clock must not be nil"), } } if cfg.Concurrency < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "ProbeConfig", Err: fmt.Errorf("concurrency must be greater than zero"), } } if cfg.Timeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "ProbeConfig", Err: fmt.Errorf("timeout must be greater than zero"), } } if cfg.CheckInterval < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "ProbeConfig", Err: fmt.Errorf("revisit interval must be greater than zero"), } diff --git a/v2/internal/coord/routing_test.go b/v2/internal/coord/routing_test.go index dbc7b456..a13bdd4b 100644 --- a/v2/internal/coord/routing_test.go +++ b/v2/internal/coord/routing_test.go @@ -5,16 +5,12 @@ import ( "testing" "time" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" - "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/cplutil" - - "go.opentelemetry.io/otel" - "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/cplutil" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/nettest" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" @@ -41,6 +37,174 @@ func idleExplore() *RecordingSM[routing.ExploreEvent, routing.ExploreState] { return NewRecordingSM[routing.ExploreEvent, routing.ExploreState](&routing.StateExploreIdle{}) } +func TestRoutingConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("logger not nil", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.Logger = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("tracer not nil", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.Tracer = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("bootstrap timeout positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.BootstrapTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.BootstrapTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("bootstrap request concurrency positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.BootstrapRequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.BootstrapRequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("bootstrap request timeout positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.BootstrapRequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.BootstrapRequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("connectivity check timeout positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.ConnectivityCheckTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.ConnectivityCheckTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("probe request concurrency positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ProbeRequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.ProbeRequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("probe check interval positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.ProbeCheckInterval = 0 + require.Error(t, cfg.Validate()) + cfg.ProbeCheckInterval = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("include request concurrency positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.IncludeRequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.IncludeRequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("include queue capacity positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.IncludeQueueCapacity = 0 + require.Error(t, cfg.Validate()) + cfg.IncludeQueueCapacity = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore timeout positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.ExploreTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore request concurrency positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreRequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.ExploreRequestConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore request timeout positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreRequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.ExploreRequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore maximum cpl positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreMaximumCpl = 0 + require.Error(t, cfg.Validate()) + cfg.ExploreMaximumCpl = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore maximum 15 or less", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreMaximumCpl = 16 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore interval positive", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreInterval = 0 + require.Error(t, cfg.Validate()) + cfg.ExploreInterval = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore interval multiplier at least 1", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreIntervalMultiplier = 0 + require.Error(t, cfg.Validate()) + cfg.ExploreIntervalMultiplier = 0.9 + require.Error(t, cfg.Validate()) + cfg.ExploreIntervalMultiplier = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("explore interval between 0 and 0.05", func(t *testing.T) { + cfg := DefaultRoutingConfig() + + cfg.ExploreIntervalJitter = 0.1 + require.Error(t, cfg.Validate()) + cfg.ExploreIntervalJitter = 0.05001 + require.Error(t, cfg.Validate()) + cfg.ExploreIntervalJitter = -0.1 + require.Error(t, cfg.Validate()) + }) +} + func TestRoutingStartBootstrapSendsEvent(t *testing.T) { ctx := kadtest.CtxShort(t) @@ -53,7 +217,10 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), cfg) + require.NoError(t, err) ev := &EventStartBootstrap{ SeedNodes: []kadt.PeerID{nodes[1].NodeID}, @@ -80,7 +247,10 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), cfg) + require.NoError(t, err) ev := &EventGetCloserNodesSuccess{ QueryID: routing.BootstrapQueryID, @@ -111,7 +281,10 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, bootstrap, idleInclude(), idleProbe(), idleExplore(), cfg) + require.NoError(t, err) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ @@ -143,7 +316,10 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), cfg) + require.NoError(t, err) ev := &EventAddNode{ NodeID: nodes[2].NodeID, @@ -170,7 +346,10 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), cfg) + require.NoError(t, err) ev := &EventGetCloserNodesSuccess{ QueryID: coordt.QueryID("include"), @@ -200,7 +379,10 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), include, idleProbe(), idleExplore(), cfg) + require.NoError(t, err) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ @@ -241,7 +423,10 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { probe, err := routing.NewProbe[kadt.Key](rt, probeCfg) require.NoError(t, err) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), include, probe, idleExplore(), slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), include, probe, idleExplore(), cfg) + require.NoError(t, err) // a new node to be included candidate := nodes[len(nodes)-1].NodeID @@ -317,7 +502,10 @@ func TestRoutingExploreSendsEvent(t *testing.T) { explore, err := routing.NewExplore[kadt.Key](self, rt, cplutil.GenRandPeerID, schedule, exploreCfg) require.NoError(t, err) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, cfg) + require.NoError(t, err) routingBehaviour.Notify(ctx, &EventRoutingPoll{}) @@ -347,7 +535,10 @@ func TestRoutingExploreGetClosestNodesSuccess(t *testing.T) { // records the event passed to explore explore := NewRecordingSM[routing.ExploreEvent, routing.ExploreState](&routing.StateExploreIdle{}) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, cfg) + require.NoError(t, err) ev := &EventGetCloserNodesSuccess{ QueryID: routing.ExploreQueryID, @@ -377,7 +568,10 @@ func TestRoutingExploreGetClosestNodesFailure(t *testing.T) { // records the event passed to explore explore := NewRecordingSM[routing.ExploreEvent, routing.ExploreState](&routing.StateExploreIdle{}) - routingBehaviour := NewRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, slog.Default(), otel.Tracer("test")) + cfg := DefaultRoutingConfig() + cfg.Clock = clk + routingBehaviour, err := ComposeRoutingBehaviour(self, idleBootstrap(), idleInclude(), idleProbe(), explore, cfg) + require.NoError(t, err) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ diff --git a/v2/tele/log.go b/v2/tele/log.go new file mode 100644 index 00000000..cc65f7e4 --- /dev/null +++ b/v2/tele/log.go @@ -0,0 +1,11 @@ +package tele + +import ( + logging "github.com/ipfs/go-log/v2" + "go.uber.org/zap/exp/zapslog" + "golang.org/x/exp/slog" +) + +func DefaultLogger(system string) *slog.Logger { + return slog.New(zapslog.NewHandler(logging.Logger(system).Desugar().Core())) +} diff --git a/v2/tele/tele.go b/v2/tele/tele.go index 29b0e4ab..b905da6b 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -20,6 +20,11 @@ const ( TracerName = "go-libp2p-kad-dht" ) +// NoopTracer returns a tracer that does not emit traces. +func NoopTracer() trace.Tracer { + return trace.NewNoopTracerProvider().Tracer("") +} + // attrsCtxKey is the actual context key value that's used as a key for // metric values that are attached to a context. var attrsCtxKey = ctxKey{} From dd5e5373746c410a241d54a203861e19ddb647b4 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 26 Sep 2023 17:48:04 +0200 Subject: [PATCH 55/64] feat: findProvidersAsync (#938) * feat: findProvidersAsync * add: find providers tests * add config tests --- v2/backend.go | 3 +- v2/config_test.go | 27 +++ v2/handlers_test.go | 1 + v2/internal/coord/coordinator.go | 8 +- v2/internal/coord/query/query.go | 2 +- v2/routing.go | 99 ++++++++++- v2/routing_test.go | 278 +++++++++++++++++++++++++++++++ 7 files changed, 402 insertions(+), 16 deletions(-) diff --git a/v2/backend.go b/v2/backend.go index 4e5d313f..cccb2c36 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -52,7 +52,8 @@ type Backend interface { Store(ctx context.Context, key string, value any) (any, error) // Fetch returns the record for the given path or a [ds.ErrNotFound] if it - // wasn't found or another error if any occurred. + // wasn't found or another error if any occurred. key won't contain the + // namespace prefix. Fetch(ctx context.Context, key string) (any, error) } diff --git a/v2/config_test.go b/v2/config_test.go index 739216ab..ad84b8d4 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -78,6 +78,33 @@ func TestConfig_Validate(t *testing.T) { assert.Error(t, cfg.Validate()) }) + t.Run("backends for ipfs protocol (public key missing)", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends[namespaceProviders] = &RecordBackend{} + cfg.Backends[namespaceIPNS] = &RecordBackend{} + cfg.Backends["another"] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + + t.Run("backends for ipfs protocol (ipns missing)", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends[namespaceProviders] = &RecordBackend{} + cfg.Backends["another"] = &RecordBackend{} + cfg.Backends[namespacePublicKey] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + + t.Run("backends for ipfs protocol (providers missing)", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends["another"] = &RecordBackend{} + cfg.Backends[namespaceIPNS] = &RecordBackend{} + cfg.Backends[namespacePublicKey] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + t.Run("nil address filter", func(t *testing.T) { cfg := DefaultConfig() cfg.AddressFilter = nil diff --git a/v2/handlers_test.go b/v2/handlers_test.go index a94816e1..6910c347 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -34,6 +34,7 @@ var rng = rand.New(rand.NewSource(1337)) func newTestDHT(t testing.TB) *DHT { cfg := DefaultConfig() + cfg.Logger = devnull return newTestDHTWithConfig(t, cfg) } diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index afe712d0..b167c840 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -385,7 +385,7 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coor defer cancel() if numResults < 1 { - numResults = 20 + numResults = 20 // TODO: parameterize } seeds, err := c.GetClosestNodes(ctx, msg.Target(), numResults) @@ -424,7 +424,7 @@ func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) erro ctx, cancel := context.WithCancel(ctx) defer cancel() - seeds, err := c.GetClosestNodes(ctx, msg.Target(), 20) + seeds, err := c.GetClosestNodes(ctx, msg.Target(), 20) // TODO: parameterize if err != nil { return err } @@ -449,9 +449,7 @@ func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) erro // queue the start of the query c.brdcstBehaviour.Notify(ctx, cmd) - contacted, errs, err := c.waitForBroadcast(ctx, waiter) - fmt.Println(contacted) - fmt.Println(errs) + _, _, err = c.waitForBroadcast(ctx, waiter) return err } diff --git a/v2/internal/coord/query/query.go b/v2/internal/coord/query/query.go index 00168082..77a4dae8 100644 --- a/v2/internal/coord/query/query.go +++ b/v2/internal/coord/query/query.go @@ -84,7 +84,7 @@ type Query[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { findCloser bool stats QueryStats - // finished indicates that that the query has completed its work or has been stopped. + // finished indicates that the query has completed its work or has been stopped. finished bool // targetNodes is the set of responsive nodes thought to be closest to the target. diff --git a/v2/routing.go b/v2/routing.go index eec85c30..756a3d48 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -16,6 +16,7 @@ import ( "github.com/libp2p/go-libp2p/core/routing" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -110,20 +111,100 @@ func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { } func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { - _, span := d.tele.Tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) + peerOut := make(chan peer.AddrInfo) + go d.findProvidersAsyncRoutine(ctx, c, count, peerOut) + return peerOut +} + +func (d *DHT) findProvidersAsyncRoutine(ctx context.Context, c cid.Cid, count int, out chan peer.AddrInfo) { + _, span := d.tele.Tracer.Start(ctx, "DHT.findProvidersAsyncRoutine", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) defer span.End() - // verify if this DHT supports provider records by checking if a "providers" - // backend is registered. - _, found := d.backends[namespaceProviders] + defer close(out) + + // verify if this DHT supports provider records by checking + // if a "providers" backend is registered. + b, found := d.backends[namespaceProviders] if !found || !c.Defined() { - peerOut := make(chan peer.AddrInfo) - close(peerOut) - return peerOut + span.RecordError(fmt.Errorf("no providers backend registered or CID undefined")) + return } - // TODO reach out to Zikade - panic("implement me") + // first fetch the record locally + stored, err := b.Fetch(ctx, string(c.Hash())) + if err != nil { + span.RecordError(err) + d.log.Warn("Fetching value from provider store", slog.String("cid", c.String()), slog.String("err", err.Error())) + return + } + + ps, ok := stored.(*providerSet) + if !ok { + span.RecordError(err) + d.log.Warn("Stored value is not a provider set", slog.String("cid", c.String()), slog.String("type", fmt.Sprintf("%T", stored))) + return + } + + // send all providers onto the out channel until the desired count + // was reached. If no count was specified, continue with network lookup. + providers := map[peer.ID]struct{}{} + for _, provider := range ps.providers { + providers[provider.ID] = struct{}{} + + select { + case <-ctx.Done(): + return + case out <- provider: + } + + if count != 0 && len(providers) == count { + return + } + } + + // Craft message to send to other peers + msg := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: c.Hash(), + } + + // handle node response + fn := func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats coordt.QueryStats) error { + // loop through all providers that the remote peer returned + for _, provider := range resp.ProviderAddrInfos() { + + // if we had already sent that peer on the channel -> do nothing + if _, found := providers[provider.ID]; found { + continue + } + + // keep track that we will have sent this peer on the channel + providers[provider.ID] = struct{}{} + + // actually send the provider information to the user + select { + case <-ctx.Done(): + return coordt.ErrSkipRemaining + case out <- provider: + } + + // if count is 0, we will wait until the query has exhausted the keyspace + // if count isn't 0, we will stop if the number of providers we have sent + // equals the number that the user has requested. + if count != 0 && len(providers) == count { + return coordt.ErrSkipRemaining + } + } + + return nil + } + + _, err = d.kad.QueryMessage(ctx, msg, fn, 20) // TODO: parameterize + if err != nil { + span.RecordError(err) + d.log.Warn("Failed querying", slog.String("cid", c.String()), slog.String("err", err.Error())) + return + } } // PutValue satisfies the [routing.Routing] interface and will add the given diff --git a/v2/routing_test.go b/v2/routing_test.go index 8647b56e..d8aa1e90 100644 --- a/v2/routing_test.go +++ b/v2/routing_test.go @@ -1,11 +1,18 @@ package dht import ( + "context" + "crypto/rand" + "crypto/sha256" + "fmt" "testing" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore/failstore" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" + mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -93,3 +100,274 @@ func TestGetValueOnePeer(t *testing.T) { require.Equal(t, v, val) } + +// NewRandomContent reads 1024 bytes from crypto/rand and builds a content struct. +func newRandomContent(t testing.TB) cid.Cid { + raw := make([]byte, 1024) + _, err := rand.Read(raw) + require.NoError(t, err) + + hash := sha256.New() + hash.Write(raw) + + mhash, err := mh.Encode(hash.Sum(nil), mh.SHA2_256) + require.NoError(t, err) + + return cid.NewCidV0(mhash) +} + +func TestDHT_FindProvidersAsync_empty_routing_table(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + c := newRandomContent(t) + + out := d.FindProvidersAsync(ctx, c, 1) + select { + case _, more := <-out: + require.False(t, more) + case <-ctx.Done(): + t.Fatal("timeout") + } +} + +func TestDHT_FindProvidersAsync_dht_does_not_support_providers(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + fillRoutingTable(t, d, 250) + + delete(d.backends, namespaceProviders) + + out := d.FindProvidersAsync(ctx, newRandomContent(t), 1) + select { + case _, more := <-out: + require.False(t, more) + case <-ctx.Done(): + t.Fatal("timeout") + } +} + +func TestDHT_FindProvidersAsync_providers_stored_locally(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + fillRoutingTable(t, d, 250) + + c := newRandomContent(t) + provider := peer.AddrInfo{ID: newPeerID(t)} + _, err := d.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider) + require.NoError(t, err) + + out := d.FindProvidersAsync(ctx, c, 1) + for { + select { + case p, more := <-out: + if !more { + return + } + assert.Equal(t, provider.ID, p.ID) + case <-ctx.Done(): + t.Fatal("timeout") + } + } +} + +func TestDHT_FindProvidersAsync_returns_only_count_from_local_store(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + fillRoutingTable(t, d, 250) + + c := newRandomContent(t) + + storedCount := 5 + requestedCount := 3 + + // invariant for this test + assert.Less(t, requestedCount, storedCount) + + for i := 0; i < storedCount; i++ { + provider := peer.AddrInfo{ID: newPeerID(t)} + _, err := d.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider) + require.NoError(t, err) + } + + out := d.FindProvidersAsync(ctx, c, requestedCount) + + returnedCount := 0 +LOOP: + for { + select { + case _, more := <-out: + if !more { + break LOOP + } + returnedCount += 1 + case <-ctx.Done(): + t.Fatal("timeout") + } + } + assert.Equal(t, requestedCount, returnedCount) +} + +func TestDHT_FindProvidersAsync_queries_other_peers(t *testing.T) { + ctx := kadtest.CtxShort(t) + + c := newRandomContent(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + d3 := top.AddServer(nil) + + top.ConnectChain(ctx, d1, d2, d3) + + provider := peer.AddrInfo{ID: newPeerID(t)} + _, err := d3.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider) + require.NoError(t, err) + + out := d1.FindProvidersAsync(ctx, c, 1) + select { + case p, more := <-out: + require.True(t, more) + assert.Equal(t, provider.ID, p.ID) + case <-ctx.Done(): + t.Fatal("timeout") + } + + select { + case _, more := <-out: + assert.False(t, more) + case <-ctx.Done(): + t.Fatal("timeout") + } +} + +func TestDHT_FindProvidersAsync_respects_cancelled_context_for_local_query(t *testing.T) { + // Test strategy: + // We let d know about providersCount providers for the CID c + // Then we ask it to find providers but pass it a cancelled context. + // We assert that we are sending on the channel while also respecting a + // cancelled context by checking if the number of returned providers is + // less than the number of providers d knows about. Since it's random + // which channel gets selected on, providersCount must be a significantly + // large. This is a statistical test, and we should observe if it's a flaky + // one. + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + c := newRandomContent(t) + + providersCount := 50 + for i := 0; i < providersCount; i++ { + provider := peer.AddrInfo{ID: newPeerID(t)} + _, err := d.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider) + require.NoError(t, err) + } + + cancelledCtx, cancel := context.WithCancel(ctx) + cancel() + + out := d.FindProvidersAsync(cancelledCtx, c, 0) + + returnedCount := 0 +LOOP: + for { + select { + case _, more := <-out: + if !more { + break LOOP + } + returnedCount += 1 + case <-ctx.Done(): + t.Fatal("timeout") + } + } + assert.Less(t, returnedCount, providersCount) +} + +func TestDHT_FindProvidersAsync_does_not_return_same_record_twice(t *testing.T) { + // Test setup: + // There are two providers in the network for CID c. + // d1 has information about one provider locally. + // d2 has information about two providers. One of which is the one d1 knew about. + // We assert that the locally known provider is only returned once. + // The query should run until exhaustion. + ctx := kadtest.CtxShort(t) + + c := newRandomContent(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + + top.Connect(ctx, d1, d2) + + provider1 := peer.AddrInfo{ID: newPeerID(t)} + provider2 := peer.AddrInfo{ID: newPeerID(t)} + + // store provider1 with d1 + _, err := d1.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider1) + require.NoError(t, err) + + // store provider1 with d2 + _, err = d2.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider1) + require.NoError(t, err) + + // store provider2 with d2 + _, err = d2.backends[namespaceProviders].Store(ctx, string(c.Hash()), provider2) + require.NoError(t, err) + + out := d1.FindProvidersAsync(ctx, c, 0) + count := 0 +LOOP: + for { + select { + case p, more := <-out: + if !more { + break LOOP + } + count += 1 + assert.True(t, p.ID == provider1.ID || p.ID == provider2.ID) + case <-ctx.Done(): + t.Fatal("timeout") + } + } + assert.Equal(t, 2, count) +} + +func TestDHT_FindProvidersAsync_datastore_error(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + // construct a datastore that fails for any operation + memStore, err := InMemoryDatastore() + require.NoError(t, err) + + dstore := failstore.NewFailstore(memStore, func(s string) error { + return fmt.Errorf("some error") + }) + + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) + + be.datastore = dstore + + out := d.FindProvidersAsync(ctx, newRandomContent(t), 0) + select { + case _, more := <-out: + assert.False(t, more) + case <-ctx.Done(): + t.Fatal("timeout") + } +} + +func TestDHT_FindProvidersAsync_invalid_key(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + out := d.FindProvidersAsync(ctx, cid.Cid{}, 0) + select { + case _, more := <-out: + assert.False(t, more) + case <-ctx.Done(): + t.Fatal("timeout") + } +} From dedca8652d3db5c3c6029e00e13fd097d70ef104 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:11:42 +0100 Subject: [PATCH 56/64] Add metrics to routing state machines (#939) * Add metrics to routing state machines * Simplify use of gauges with atomics --- v2/internal/coord/coordinator.go | 1 + v2/internal/coord/routing.go | 20 +++ v2/internal/coord/routing/bootstrap.go | 115 +++++++++++++-- v2/internal/coord/routing/bootstrap_test.go | 12 ++ v2/internal/coord/routing/explore.go | 125 +++++++++++++++-- v2/internal/coord/routing/explore_test.go | 12 ++ v2/internal/coord/routing/include.go | 148 +++++++++++++++++--- v2/internal/coord/routing/include_test.go | 12 ++ v2/internal/coord/routing/probe.go | 111 +++++++++++++-- v2/internal/coord/routing/probe_test.go | 62 ++++---- v2/internal/coord/routing_test.go | 6 + v2/tele/tele.go | 7 + 12 files changed, 558 insertions(+), 73 deletions(-) diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index b167c840..4d568471 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -147,6 +147,7 @@ func DefaultCoordinatorConfig() *CoordinatorConfig { cfg.Routing.Clock = cfg.Clock cfg.Routing.Logger = cfg.Logger.With("behaviour", "routing") cfg.Routing.Tracer = cfg.TracerProvider.Tracer(tele.TracerName) + cfg.Routing.Meter = cfg.MeterProvider.Meter(tele.MeterName) return cfg } diff --git a/v2/internal/coord/routing.go b/v2/internal/coord/routing.go index 5e33e316..ab6918d5 100644 --- a/v2/internal/coord/routing.go +++ b/v2/internal/coord/routing.go @@ -8,6 +8,7 @@ import ( "github.com/benbjohnson/clock" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" @@ -39,6 +40,9 @@ type RoutingConfig struct { // Tracer is the tracer that should be used to trace execution. Tracer trace.Tracer + // Meter is the meter that should be used to record metrics. + Meter metric.Meter + // BootstrapTimeout is the time the behaviour should wait before terminating a bootstrap if it is not making progress. BootstrapTimeout time.Duration @@ -118,6 +122,13 @@ func (cfg *RoutingConfig) Validate() error { } } + if cfg.Meter == nil { + return &errs.ConfigurationError{ + Component: "RoutingConfig", + Err: fmt.Errorf("meter must not be nil"), + } + } + if cfg.BootstrapTimeout < 1 { return &errs.ConfigurationError{ Component: "RoutingConfig", @@ -246,6 +257,7 @@ func DefaultRoutingConfig() *RoutingConfig { Clock: clock.New(), Logger: tele.DefaultLogger("coord"), Tracer: tele.NoopTracer(), + Meter: tele.NoopMeter(), BootstrapTimeout: 5 * time.Minute, // MAGIC BootstrapRequestConcurrency: 3, // MAGIC @@ -304,6 +316,8 @@ func NewRoutingBehaviour(self kadt.PeerID, rt routing.RoutingTableCpl[kadt.Key, bootstrapCfg := routing.DefaultBootstrapConfig() bootstrapCfg.Clock = cfg.Clock + bootstrapCfg.Tracer = cfg.Tracer + bootstrapCfg.Meter = cfg.Meter bootstrapCfg.Timeout = cfg.BootstrapTimeout bootstrapCfg.RequestConcurrency = cfg.BootstrapRequestConcurrency bootstrapCfg.RequestTimeout = cfg.BootstrapRequestTimeout @@ -315,6 +329,8 @@ func NewRoutingBehaviour(self kadt.PeerID, rt routing.RoutingTableCpl[kadt.Key, includeCfg := routing.DefaultIncludeConfig() includeCfg.Clock = cfg.Clock + includeCfg.Tracer = cfg.Tracer + includeCfg.Meter = cfg.Meter includeCfg.Timeout = cfg.ConnectivityCheckTimeout includeCfg.QueueCapacity = cfg.IncludeQueueCapacity includeCfg.Concurrency = cfg.IncludeRequestConcurrency @@ -326,6 +342,8 @@ func NewRoutingBehaviour(self kadt.PeerID, rt routing.RoutingTableCpl[kadt.Key, probeCfg := routing.DefaultProbeConfig() probeCfg.Clock = cfg.Clock + probeCfg.Tracer = cfg.Tracer + probeCfg.Meter = cfg.Meter probeCfg.Timeout = cfg.ConnectivityCheckTimeout probeCfg.Concurrency = cfg.ProbeRequestConcurrency probeCfg.CheckInterval = cfg.ProbeCheckInterval @@ -337,6 +355,8 @@ func NewRoutingBehaviour(self kadt.PeerID, rt routing.RoutingTableCpl[kadt.Key, exploreCfg := routing.DefaultExploreConfig() exploreCfg.Clock = cfg.Clock + exploreCfg.Tracer = cfg.Tracer + exploreCfg.Meter = cfg.Meter exploreCfg.Timeout = cfg.ExploreTimeout exploreCfg.RequestConcurrency = cfg.ExploreRequestConcurrency exploreCfg.RequestTimeout = cfg.ExploreRequestTimeout diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index ab234593..5b77a440 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -3,11 +3,13 @@ package routing import ( "context" "fmt" + "sync/atomic" "time" "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/errs" @@ -28,6 +30,21 @@ type Bootstrap[K kad.Key[K], N kad.NodeID[K]] struct { // cfg is a copy of the optional configuration supplied to the Bootstrap cfg BootstrapConfig + + // counterFindSent is a counter that tracks the number of requests to find closer nodes sent. + counterFindSent metric.Int64Counter + + // counterFindSucceeded is a counter that tracks the number of requests to find closer nodes that succeeded. + counterFindSucceeded metric.Int64Counter + + // counterFindFailed is a counter that tracks the number of requests to find closer nodes that failed. + counterFindFailed metric.Int64Counter + + // gaugeRunning is a gauge that tracks whether the bootstrap is running. + gaugeRunning metric.Int64ObservableGauge + + // running records whether the bootstrap is running after the last state change so that it can be read asynchronously by gaugeRunning + running atomic.Bool } // BootstrapConfig specifies optional configuration for a Bootstrap @@ -36,6 +53,12 @@ type BootstrapConfig struct { RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight RequestTimeout time.Duration // the timeout queries should use for contacting a single node Clock clock.Clock // a clock that may replaced by a mock when testing + + // Tracer is the tracer that should be used to trace execution. + Tracer trace.Tracer + + // Meter is the meter that should be used to record metrics. + Meter metric.Meter } // Validate checks the configuration options and returns an error if any have invalid values. @@ -47,6 +70,20 @@ func (cfg *BootstrapConfig) Validate() error { } } + if cfg.Tracer == nil { + return &errs.ConfigurationError{ + Component: "BootstrapConfig", + Err: fmt.Errorf("tracer must not be nil"), + } + } + + if cfg.Meter == nil { + return &errs.ConfigurationError{ + Component: "BootstrapConfig", + Err: fmt.Errorf("meter must not be nil"), + } + } + if cfg.Timeout < 1 { return &errs.ConfigurationError{ Component: "BootstrapConfig", @@ -75,7 +112,10 @@ func (cfg *BootstrapConfig) Validate() error { // Options may be overridden before passing to NewBootstrap func DefaultBootstrapConfig() *BootstrapConfig { return &BootstrapConfig{ - Clock: clock.New(), // use standard time + Clock: clock.New(), // use standard time + Tracer: tele.NoopTracer(), + Meter: tele.NoopMeter(), + Timeout: 5 * time.Minute, // MAGIC RequestConcurrency: 3, // MAGIC RequestTimeout: time.Minute, // MAGIC @@ -89,20 +129,74 @@ func NewBootstrap[K kad.Key[K], N kad.NodeID[K]](self N, cfg *BootstrapConfig) ( return nil, err } - return &Bootstrap[K, N]{ + b := &Bootstrap[K, N]{ self: self, cfg: *cfg, - }, nil + } + + var err error + b.counterFindSent, err = cfg.Meter.Int64Counter( + "bootstrap_find_sent", + metric.WithDescription("Total number of find closer nodes requests sent by the bootstrap state machine"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create bootstrap_find_sent counter: %w", err) + } + + b.counterFindSucceeded, err = cfg.Meter.Int64Counter( + "bootstrap_find_succeeded", + metric.WithDescription("Total number of find closer nodes requests sent by the bootstrap state machine that were successful"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create bootstrap_find_succeeded counter: %w", err) + } + + b.counterFindFailed, err = cfg.Meter.Int64Counter( + "bootstrap_find_failed", + metric.WithDescription("Total number of find closer nodes requests sent by the bootstrap state machine that failed"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create bootstrap_find_failed counter: %w", err) + } + + b.gaugeRunning, err = cfg.Meter.Int64ObservableGauge( + "bootstrap_running", + metric.WithDescription("Whether or not the bootstrap is running"), + metric.WithUnit("1"), + metric.WithInt64Callback(func(ctx context.Context, o metric.Int64Observer) error { + if b.running.Load() { + o.Observe(1) + } else { + o.Observe(0) + } + return nil + }), + ) + if err != nil { + return nil, fmt.Errorf("create bootstrap_running gauge: %w", err) + } + + return b, nil } // Advance advances the state of the bootstrap by attempting to advance its query if running. -func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) BootstrapState { - ctx, span := tele.StartSpan(ctx, "Bootstrap.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) - defer span.End() +func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) (out BootstrapState) { + ctx, span := b.cfg.Tracer.Start(ctx, "Bootstrap.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + b.running.Store(b.qry != nil) // record whether the bootstrap is still running for metrics + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() switch tev := ev.(type) { case *EventBootstrapStart[K, N]: - // TODO: ignore start event if query is already in progress + if b.qry != nil { + return b.advanceQuery(ctx, &query.EventQueryPoll{}) + } + iter := query.NewClosestNodesIter[K, N](b.self.Key()) qryCfg := query.DefaultQueryConfig() @@ -119,11 +213,13 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst return b.advanceQuery(ctx, &query.EventQueryPoll{}) case *EventBootstrapFindCloserResponse[K, N]: + b.counterFindSucceeded.Add(ctx, 1) return b.advanceQuery(ctx, &query.EventQueryNodeResponse[K, N]{ NodeID: tev.NodeID, CloserNodes: tev.CloserNodes, }) case *EventBootstrapFindCloserFailure[K, N]: + b.counterFindFailed.Add(ctx, 1) span.RecordError(tev.Error) return b.advanceQuery(ctx, &query.EventQueryNodeFailure[K, N]{ NodeID: tev.NodeID, @@ -144,11 +240,12 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) Bootst } func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) BootstrapState { - ctx, span := tele.StartSpan(ctx, "Bootstrap.advanceQuery") + ctx, span := b.cfg.Tracer.Start(ctx, "Bootstrap.advanceQuery") defer span.End() state := b.qry.Advance(ctx, qev) switch st := state.(type) { case *query.StateQueryFindCloser[K, N]: + b.counterFindSent.Add(ctx, 1) span.SetAttributes(attribute.String("out_state", "StateQueryFindCloser")) return &StateBootstrapFindCloser[K, N]{ QueryID: st.QueryID, @@ -164,6 +261,7 @@ func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent case *query.StateQueryWaitingAtCapacity: elapsed := b.cfg.Clock.Since(st.Stats.Start) if elapsed > b.cfg.Timeout { + b.counterFindFailed.Add(ctx, 1) span.SetAttributes(attribute.String("out_state", "StateBootstrapTimeout")) return &StateBootstrapTimeout{ Stats: st.Stats, @@ -176,6 +274,7 @@ func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent case *query.StateQueryWaitingWithCapacity: elapsed := b.cfg.Clock.Since(st.Stats.Start) if elapsed > b.cfg.Timeout { + b.counterFindFailed.Add(ctx, 1) span.SetAttributes(attribute.String("out_state", "StateBootstrapTimeout")) return &StateBootstrapTimeout{ Stats: st.Stats, diff --git a/v2/internal/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go index 1dd6611e..bbb25f6d 100644 --- a/v2/internal/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -24,6 +24,18 @@ func TestBootstrapConfigValidate(t *testing.T) { require.Error(t, cfg.Validate()) }) + t.Run("tracer is not nil", func(t *testing.T) { + cfg := DefaultBootstrapConfig() + cfg.Tracer = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("meter is not nil", func(t *testing.T) { + cfg := DefaultBootstrapConfig() + cfg.Meter = nil + require.Error(t, cfg.Validate()) + }) + t.Run("timeout positive", func(t *testing.T) { cfg := DefaultBootstrapConfig() cfg.Timeout = 0 diff --git a/v2/internal/coord/routing/explore.go b/v2/internal/coord/routing/explore.go index 41a42ae1..cf731aa5 100644 --- a/v2/internal/coord/routing/explore.go +++ b/v2/internal/coord/routing/explore.go @@ -5,11 +5,13 @@ import ( "context" "fmt" "math/rand" + "sync/atomic" "time" "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/errs" @@ -51,6 +53,24 @@ type Explore[K kad.Key[K], N kad.NodeID[K]] struct { cfg ExploreConfig schedule ExploreSchedule + + // counterFindSent is a counter that tracks the number of requests to find closer nodes sent. + counterFindSent metric.Int64Counter + + // counterFindSucceeded is a counter that tracks the number of requests to find closer nodes that succeeded. + counterFindSucceeded metric.Int64Counter + + // counterFindFailed is a counter that tracks the number of requests to find closer nodes that failed. + counterFindFailed metric.Int64Counter + + // gaugeRunning is a gauge that tracks whether an explore is running. + gaugeRunning metric.Int64ObservableGauge + + // running records whether an explore is running after the last state change so that it can be read asynchronously by gaugeRunning + running atomic.Bool + + // cplAttributeSet holds the current cpl being explored in an attribute that may be used with metrics + cplAttributeSet atomic.Value // holds a [attribute.Set] } // NodeIDForCplFunc is a function that given a cpl generates a [kad.NodeID] with a key that has @@ -71,6 +91,12 @@ type ExploreConfig struct { // Clock is a clock that may replaced by a mock when testing Clock clock.Clock + // Tracer is the tracer that should be used to trace execution. + Tracer trace.Tracer + + // Meter is the meter that should be used to record metrics. + Meter metric.Meter + // Timeout is maximum time to allow for performing an explore for a CPL. Timeout time.Duration @@ -87,6 +113,20 @@ func (cfg *ExploreConfig) Validate() error { } } + if cfg.Tracer == nil { + return &errs.ConfigurationError{ + Component: "ExploreConfig", + Err: fmt.Errorf("tracer must not be nil"), + } + } + + if cfg.Meter == nil { + return &errs.ConfigurationError{ + Component: "ExploreConfig", + Err: fmt.Errorf("meter must not be nil"), + } + } + if cfg.Timeout < 1 { return &errs.ConfigurationError{ Component: "ExploreConfig", @@ -115,7 +155,10 @@ func (cfg *ExploreConfig) Validate() error { // Options may be overridden before passing to [NewExplore]. func DefaultExploreConfig() *ExploreConfig { return &ExploreConfig{ - Clock: clock.New(), // use standard time + Clock: clock.New(), // use standard time + Tracer: tele.NoopTracer(), + Meter: tele.NoopMeter(), + Timeout: 10 * time.Minute, // MAGIC RequestConcurrency: 3, // MAGIC RequestTimeout: time.Minute, // MAGIC @@ -137,24 +180,76 @@ func NewExplore[K kad.Key[K], N kad.NodeID[K]](self N, rt RoutingTableCpl[K, N], qryCpl: -1, schedule: schedule, } + e.cplAttributeSet.Store(attribute.NewSet()) + + var err error + e.counterFindSent, err = cfg.Meter.Int64Counter( + "explore_find_sent", + metric.WithDescription("Total number of find closer nodes requests sent by the explore state machine"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create explore_find_sent counter: %w", err) + } + + e.counterFindSucceeded, err = cfg.Meter.Int64Counter( + "explore_find_succeeded", + metric.WithDescription("Total number of find closer nodes requests sent by the explore state machine that were successful"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create explore_find_succeeded counter: %w", err) + } + + e.counterFindFailed, err = cfg.Meter.Int64Counter( + "explore_find_failed", + metric.WithDescription("Total number of find closer nodes requests sent by the explore state machine that failed"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create explore_find_failed counter: %w", err) + } + + e.gaugeRunning, err = cfg.Meter.Int64ObservableGauge( + "explore_running", + metric.WithDescription("Whether or not the an explore is running for a cpl"), + metric.WithUnit("1"), + metric.WithInt64Callback(func(ctx context.Context, o metric.Int64Observer) error { + if e.running.Load() { + o.Observe(1, metric.WithAttributeSet(e.cplAttributeSet.Load().(attribute.Set))) + } else { + o.Observe(0) + } + return nil + }), + ) + if err != nil { + return nil, fmt.Errorf("create explore_running gauge: %w", err) + } return e, nil } // Advance advances the state of the explore by attempting to advance its query if running. -func (e *Explore[K, N]) Advance(ctx context.Context, ev ExploreEvent) ExploreState { - ctx, span := tele.StartSpan(ctx, "Explore.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) - defer span.End() +func (e *Explore[K, N]) Advance(ctx context.Context, ev ExploreEvent) (out ExploreState) { + ctx, span := e.cfg.Tracer.Start(ctx, "Explore.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + e.running.Store(e.qry != nil) + span.SetAttributes(tele.AttrOutEvent(out)) + span.End() + }() switch tev := ev.(type) { case *EventExplorePoll: // ignore, nothing to do case *EventExploreFindCloserResponse[K, N]: + e.counterFindSucceeded.Add(ctx, 1, metric.WithAttributeSet(e.cplAttributeSet.Load().(attribute.Set))) return e.advanceQuery(ctx, &query.EventQueryNodeResponse[K, N]{ NodeID: tev.NodeID, CloserNodes: tev.CloserNodes, }) case *EventExploreFindCloserFailure[K, N]: + e.counterFindFailed.Add(ctx, 1, metric.WithAttributeSet(e.cplAttributeSet.Load().(attribute.Set))) span.RecordError(tev.Error) return e.advanceQuery(ctx, &query.EventQueryNodeFailure[K, N]{ NodeID: tev.NodeID, @@ -201,16 +296,19 @@ func (e *Explore[K, N]) Advance(ctx context.Context, ev ExploreEvent) ExploreSta } e.qry = qry e.qryCpl = cpl + e.cplAttributeSet.Store(attribute.NewSet(attribute.Int("cpl", cpl))) return e.advanceQuery(ctx, &query.EventQueryPoll{}) } func (e *Explore[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) ExploreState { - ctx, span := tele.StartSpan(ctx, "Explore.advanceQuery") + ctx, span := e.cfg.Tracer.Start(ctx, "Explore.advanceQuery") defer span.End() + state := e.qry.Advance(ctx, qev) switch st := state.(type) { case *query.StateQueryFindCloser[K, N]: + e.counterFindSent.Add(ctx, 1, metric.WithAttributeSet(e.cplAttributeSet.Load().(attribute.Set))) return &StateExploreFindCloser[K, N]{ Cpl: e.qryCpl, QueryID: st.QueryID, @@ -220,8 +318,7 @@ func (e *Explore[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) } case *query.StateQueryFinished[K, N]: span.SetAttributes(attribute.String("out_state", "StateExploreFinished")) - e.qry = nil - e.qryCpl = -1 + e.clearQuery() return &StateExploreQueryFinished{ Cpl: e.qryCpl, Stats: st.Stats, @@ -229,9 +326,9 @@ func (e *Explore[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) case *query.StateQueryWaitingAtCapacity: elapsed := e.cfg.Clock.Since(st.Stats.Start) if elapsed > e.cfg.Timeout { + e.counterFindFailed.Add(ctx, 1, metric.WithAttributeSet(e.cplAttributeSet.Load().(attribute.Set))) span.SetAttributes(attribute.String("out_state", "StateExploreTimeout")) - e.qry = nil - e.qryCpl = -1 + e.clearQuery() return &StateExploreQueryTimeout{ Cpl: e.qryCpl, Stats: st.Stats, @@ -245,9 +342,9 @@ func (e *Explore[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) case *query.StateQueryWaitingWithCapacity: elapsed := e.cfg.Clock.Since(st.Stats.Start) if elapsed > e.cfg.Timeout { + e.counterFindFailed.Add(ctx, 1, metric.WithAttributeSet(e.cplAttributeSet.Load().(attribute.Set))) span.SetAttributes(attribute.String("out_state", "StateExploreTimeout")) - e.qry = nil - e.qryCpl = -1 + e.clearQuery() return &StateExploreQueryTimeout{ Cpl: e.qryCpl, Stats: st.Stats, @@ -263,6 +360,12 @@ func (e *Explore[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent) } } +func (e *Explore[K, N]) clearQuery() { + e.qry = nil + e.qryCpl = -1 + e.cplAttributeSet.Store(attribute.NewSet()) +} + // ExploreState is the state of an [Explore]. type ExploreState interface { exploreState() diff --git a/v2/internal/coord/routing/explore_test.go b/v2/internal/coord/routing/explore_test.go index 6be7a4fd..29d8653b 100644 --- a/v2/internal/coord/routing/explore_test.go +++ b/v2/internal/coord/routing/explore_test.go @@ -25,6 +25,18 @@ func TestExploreConfigValidate(t *testing.T) { require.Error(t, cfg.Validate()) }) + t.Run("tracer is not nil", func(t *testing.T) { + cfg := DefaultExploreConfig() + cfg.Tracer = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("meter is not nil", func(t *testing.T) { + cfg := DefaultExploreConfig() + cfg.Meter = nil + require.Error(t, cfg.Validate()) + }) + t.Run("timeout positive", func(t *testing.T) { cfg := DefaultExploreConfig() cfg.Timeout = 0 diff --git a/v2/internal/coord/routing/include.go b/v2/internal/coord/routing/include.go index 4cc53f9e..4aad5383 100644 --- a/v2/internal/coord/routing/include.go +++ b/v2/internal/coord/routing/include.go @@ -3,11 +3,13 @@ package routing import ( "context" "fmt" + "sync/atomic" "time" "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/errs" @@ -25,10 +27,32 @@ type Include[K kad.Key[K], N kad.NodeID[K]] struct { // checks is an index of checks in progress checks map[string]check[K, N] + // candidates is a list of nodes that are candidates for adding to the routing table candidates *nodeQueue[K, N] // cfg is a copy of the optional configuration supplied to the Include cfg IncludeConfig + + // counterChecksSent is a counter that tracks the number of connectivity checks sent. + counterChecksSent metric.Int64Counter + + // counterChecksPassed is a counter that tracks the number of connectivity checks that have passed. + counterChecksPassed metric.Int64Counter + + // counterChecksFailed is a counter that tracks the number of connectivity checks that have failed. + counterChecksFailed metric.Int64Counter + + // counterCandidatesDroppedCapacity is a counter that tracks the number of nodes that were not added to the candidate + // queue because it was already at maximum capacity. If this rises or remains high then it could indicate that + // the include state machine cannot keep up with the rate of new nodes being added. This could be affected by + // the configured maximum number of concurrent checks and the timeout used for terminating slow checks. + counterCandidatesDroppedCapacity metric.Int64Counter + + // gaugeCandidateCount is a gauge that tracks the number of nodes in the probe's pending queue of scheduled checks. + gaugeCandidateCount metric.Int64ObservableGauge + + // candidateCount holds the number of candidate nodes after the last state change so that it can be read asynchronously by gaugeCandidateCount + candidateCount atomic.Int64 } // IncludeConfig specifies optional configuration for an Include @@ -37,6 +61,12 @@ type IncludeConfig struct { Concurrency int // the maximum number of include checks that may be in progress at any one time Timeout time.Duration // the time to wait before terminating a check that is not making progress Clock clock.Clock // a clock that may replaced by a mock when testing + + // Tracer is the tracer that should be used to trace execution. + Tracer trace.Tracer + + // Meter is the meter that should be used to record metrics. + Meter metric.Meter } // Validate checks the configuration options and returns an error if any have invalid values. @@ -69,6 +99,20 @@ func (cfg *IncludeConfig) Validate() error { } } + if cfg.Tracer == nil { + return &errs.ConfigurationError{ + Component: "IncludeConfig", + Err: fmt.Errorf("tracer must not be nil"), + } + } + + if cfg.Meter == nil { + return &errs.ConfigurationError{ + Component: "IncludeConfig", + Err: fmt.Errorf("meter must not be nil"), + } + } + return nil } @@ -76,7 +120,10 @@ func (cfg *IncludeConfig) Validate() error { // Options may be overridden before passing to NewInclude func DefaultIncludeConfig() *IncludeConfig { return &IncludeConfig{ - Clock: clock.New(), // use standard time + Clock: clock.New(), // use standard time + Tracer: tele.NoopTracer(), + Meter: tele.NoopMeter(), + Concurrency: 3, Timeout: time.Minute, QueueCapacity: 128, @@ -90,18 +137,72 @@ func NewInclude[K kad.Key[K], N kad.NodeID[K]](rt kad.RoutingTable[K, N], cfg *I return nil, err } - return &Include[K, N]{ + in := &Include[K, N]{ candidates: newNodeQueue[K, N](cfg.QueueCapacity), cfg: *cfg, rt: rt, checks: make(map[string]check[K, N], cfg.Concurrency), - }, nil + } + + // initialise metrics + var err error + in.counterChecksSent, err = cfg.Meter.Int64Counter( + "include_checks_sent", + metric.WithDescription("Total number of connectivity checks sent by the include state machine"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create include_checks_sent counter: %w", err) + } + + in.counterChecksPassed, err = cfg.Meter.Int64Counter( + "include_checks_passed", + metric.WithDescription("Total number of connectivity checks sent by the include state machine that were successful"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create include_checks_passed counter: %w", err) + } + + in.counterChecksFailed, err = cfg.Meter.Int64Counter( + "include_checks_failed", + metric.WithDescription("Total number of connectivity checks sent by the include state machine that failed"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create include_checks_failed counter: %w", err) + } + + in.counterCandidatesDroppedCapacity, err = cfg.Meter.Int64Counter( + "include_candidates_dropped_capacity", + metric.WithDescription("Total number of nodes that were not added to the candidate queue because it was already at maximum capacity"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create include_candidates_dropped_capacity counter: %w", err) + } + + in.gaugeCandidateCount, err = cfg.Meter.Int64ObservableGauge( + "include_candidate_count", + metric.WithDescription("Total number of nodes in the include state machine's candidate queue"), + metric.WithUnit("1"), + metric.WithInt64Callback(func(ctx context.Context, o metric.Int64Observer) error { + o.Observe(in.candidateCount.Load()) + return nil + }), + ) + if err != nil { + return nil, fmt.Errorf("create include_candidate_count counter: %w", err) + } + + return in, nil } // Advance advances the state of the include state machine by attempting to advance its query if running. -func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) (out IncludeState) { - ctx, span := tele.StartSpan(ctx, "Include.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) +func (in *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) (out IncludeState) { + ctx, span := in.cfg.Tracer.Start(ctx, "Include.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) defer func() { + in.candidateCount.Store(int64(in.candidates.Len())) span.SetAttributes(tele.AttrOutEvent(out)) span.End() }() @@ -109,35 +210,38 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) (out Inclu switch tev := ev.(type) { case *EventIncludeAddCandidate[K, N]: // Ignore if already running a check - _, checking := b.checks[key.HexString(tev.NodeID.Key())] + _, checking := in.checks[key.HexString(tev.NodeID.Key())] if checking { break } // Ignore if node already in routing table - if _, exists := b.rt.GetNode(tev.NodeID.Key()); exists { + if _, exists := in.rt.GetNode(tev.NodeID.Key()); exists { break } // TODO: potentially time out a check and make room in the queue - if !b.candidates.HasCapacity() { + if !in.candidates.HasCapacity() { + in.counterCandidatesDroppedCapacity.Add(ctx, 1) return &StateIncludeWaitingFull{} } - b.candidates.Enqueue(ctx, tev.NodeID) + in.candidates.Enqueue(ctx, tev.NodeID) case *EventIncludeConnectivityCheckSuccess[K, N]: - ch, ok := b.checks[key.HexString(tev.NodeID.Key())] + in.counterChecksPassed.Add(ctx, 1) + ch, ok := in.checks[key.HexString(tev.NodeID.Key())] if ok { - delete(b.checks, key.HexString(tev.NodeID.Key())) - if b.rt.AddNode(tev.NodeID) { + delete(in.checks, key.HexString(tev.NodeID.Key())) + if in.rt.AddNode(tev.NodeID) { return &StateIncludeRoutingUpdated[K, N]{ NodeID: ch.NodeID, } } } case *EventIncludeConnectivityCheckFailure[K, N]: + in.counterChecksFailed.Add(ctx, 1) span.RecordError(tev.Error) - delete(b.checks, key.HexString(tev.NodeID.Key())) + delete(in.checks, key.HexString(tev.NodeID.Key())) case *EventIncludePoll: // ignore, nothing to do @@ -145,28 +249,30 @@ func (b *Include[K, N]) Advance(ctx context.Context, ev IncludeEvent) (out Inclu panic(fmt.Sprintf("unexpected event: %T", tev)) } - if len(b.checks) == b.cfg.Concurrency { - if !b.candidates.HasCapacity() { + if len(in.checks) == in.cfg.Concurrency { + if !in.candidates.HasCapacity() { + in.counterCandidatesDroppedCapacity.Add(ctx, 1) return &StateIncludeWaitingFull{} } return &StateIncludeWaitingAtCapacity{} } - candidate, ok := b.candidates.Dequeue(ctx) + candidate, ok := in.candidates.Dequeue(ctx) if !ok { // No candidate in queue - if len(b.checks) > 0 { + if len(in.checks) > 0 { return &StateIncludeWaitingWithCapacity{} } return &StateIncludeIdle{} } - b.checks[key.HexString(candidate.Key())] = check[K, N]{ + in.checks[key.HexString(candidate.Key())] = check[K, N]{ NodeID: candidate, - Started: b.cfg.Clock.Now(), + Started: in.cfg.Clock.Now(), } // Ask the node to find itself + in.counterChecksSent.Add(ctx, 1) return &StateIncludeConnectivityCheck[K, N]{ NodeID: candidate, } @@ -222,6 +328,10 @@ func (q *nodeQueue[K, N]) HasCapacity() bool { return len(q.nodes) < q.capacity } +func (q *nodeQueue[K, N]) Len() int { + return len(q.nodes) +} + // IncludeState is the state of a include. type IncludeState interface { includeState() diff --git a/v2/internal/coord/routing/include_test.go b/v2/internal/coord/routing/include_test.go index a02def62..b727a88c 100644 --- a/v2/internal/coord/routing/include_test.go +++ b/v2/internal/coord/routing/include_test.go @@ -24,6 +24,18 @@ func TestIncludeConfigValidate(t *testing.T) { require.Error(t, cfg.Validate()) }) + t.Run("tracer is not nil", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Tracer = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("meter is not nil", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Meter = nil + require.Error(t, cfg.Validate()) + }) + t.Run("timeout positive", func(t *testing.T) { cfg := DefaultIncludeConfig() cfg.Timeout = 0 diff --git a/v2/internal/coord/routing/probe.go b/v2/internal/coord/routing/probe.go index 37856ee2..56f31146 100644 --- a/v2/internal/coord/routing/probe.go +++ b/v2/internal/coord/routing/probe.go @@ -5,12 +5,14 @@ import ( "context" "errors" "fmt" + "sync/atomic" "time" "github.com/benbjohnson/clock" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/errs" @@ -65,6 +67,21 @@ type Probe[K kad.Key[K], N kad.NodeID[K]] struct { // cfg is a copy of the optional configuration supplied to the Probe cfg ProbeConfig + + // counterChecksSent is a counter that tracks the number of connectivity checks sent. + counterChecksSent metric.Int64Counter + + // counterChecksPassed is a counter that tracks the number of connectivity checks that have passed. + counterChecksPassed metric.Int64Counter + + // counterChecksFailed is a counter that tracks the number of connectivity checks that have failed. + counterChecksFailed metric.Int64Counter + + // gaugePendingCount is a gauge that tracks the number of nodes in the probe's pending queue of scheduled checks. + gaugePendingCount metric.Int64ObservableGauge + + // pendingCount holds the number of pending nodes after the last state change so that it can be read asynchronously by gaugePendingCount + pendingCount atomic.Int64 } // ProbeConfig specifies optional configuration for a Probe @@ -73,6 +90,12 @@ type ProbeConfig struct { Concurrency int // the maximum number of probe checks that may be in progress at any one time Timeout time.Duration // the time to wait before terminating a check that is not making progress Clock clock.Clock // a clock that may be replaced by a mock when testing + + // Tracer is the tracer that should be used to trace execution. + Tracer trace.Tracer + + // Meter is the meter that should be used to record metrics. + Meter metric.Meter } // Validate checks the configuration options and returns an error if any have invalid values. @@ -84,6 +107,20 @@ func (cfg *ProbeConfig) Validate() error { } } + if cfg.Tracer == nil { + return &errs.ConfigurationError{ + Component: "ProbeConfig", + Err: fmt.Errorf("tracer must not be nil"), + } + } + + if cfg.Meter == nil { + return &errs.ConfigurationError{ + Component: "ProbeConfig", + Err: fmt.Errorf("meter must not be nil"), + } + } + if cfg.Concurrency < 1 { return &errs.ConfigurationError{ Component: "ProbeConfig", @@ -112,7 +149,10 @@ func (cfg *ProbeConfig) Validate() error { // Options may be overridden before passing to NewProbe func DefaultProbeConfig() *ProbeConfig { return &ProbeConfig{ - Clock: clock.New(), // use standard time + Clock: clock.New(), // use standard time + Tracer: tele.NoopTracer(), + Meter: tele.NoopMeter(), + Concurrency: 3, // MAGIC Timeout: time.Minute, // MAGIC CheckInterval: 6 * time.Hour, // MAGIC @@ -126,17 +166,63 @@ func NewProbe[K kad.Key[K], N kad.NodeID[K]](rt RoutingTableCpl[K, N], cfg *Prob return nil, err } - return &Probe[K, N]{ + p := &Probe[K, N]{ cfg: *cfg, rt: rt, nvl: NewNodeValueList[K, N](), - }, nil + } + + // initialise metrics + var err error + p.counterChecksSent, err = cfg.Meter.Int64Counter( + "probe_checks_sent", + metric.WithDescription("Total number of connectivity checks sent by the probe state machine"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create probe_checks_sent counter: %w", err) + } + + p.counterChecksPassed, err = cfg.Meter.Int64Counter( + "probe_checks_passed", + metric.WithDescription("Total number of connectivity checks sent by the probe state machine that were successful"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create probe_checks_passed counter: %w", err) + } + + p.counterChecksFailed, err = cfg.Meter.Int64Counter( + "probe_checks_failed", + metric.WithDescription("Total number of connectivity checks sent by the probe state machine that failed"), + metric.WithUnit("1"), + ) + if err != nil { + return nil, fmt.Errorf("create probe_checks_failed counter: %w", err) + } + + p.gaugePendingCount, err = cfg.Meter.Int64ObservableGauge( + "probe_pending_count", + metric.WithDescription("Total number of nodes being monitored by the probe state machine"), + metric.WithUnit("1"), + metric.WithInt64Callback(func(ctx context.Context, o metric.Int64Observer) error { + o.Observe(p.pendingCount.Load()) + return nil + }), + ) + if err != nil { + return nil, fmt.Errorf("create probe_pending_count gauge: %w", err) + } + + return p, nil } // Advance advances the state of the probe state machine by attempting to advance its query if running. func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeState) { - _, span := tele.StartSpan(ctx, "Probe.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + _, span := p.cfg.Tracer.Start(ctx, "Probe.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) defer func() { + // update the pending count so gauge can read it asynchronously + p.pendingCount.Store(int64(p.nvl.pendingCount())) span.SetAttributes(tele.AttrOutEvent(out)) span.End() }() @@ -171,6 +257,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeStat } case *EventProbeConnectivityCheckSuccess[K, N]: + p.counterChecksPassed.Add(ctx, 1) span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) nv, found := p.nvl.Get(tev.NodeID) if !found { @@ -186,6 +273,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeStat case *EventProbeConnectivityCheckFailure[K, N]: // probe failed, so remove from routing table and from list + p.counterChecksFailed.Add(ctx, 1) span.SetAttributes(attribute.String("nodeid", tev.NodeID.String())) span.RecordError(tev.Error) @@ -212,7 +300,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeStat } // Check if there is capacity - if p.cfg.Concurrency <= p.nvl.OngoingCount() { + if p.cfg.Concurrency <= p.nvl.ongoingCount() { // see if a check can be timed out to free capacity candidate, found := p.nvl.FindCheckPastDeadline(p.cfg.Clock.Now()) if !found { @@ -232,7 +320,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeStat // there is capacity to start a new check next, ok := p.nvl.PeekNext(p.cfg.Clock.Now()) if !ok { - if p.nvl.OngoingCount() > 0 { + if p.nvl.ongoingCount() > 0 { // waiting for a check but nothing else to do return &StateProbeWaitingWithCapacity{} } @@ -243,6 +331,7 @@ func (p *Probe[K, N]) Advance(ctx context.Context, ev ProbeEvent) (out ProbeStat p.nvl.MarkOngoing(next.NodeID, p.cfg.Clock.Now().Add(p.cfg.Timeout)) // Ask the node to find itself + p.counterChecksSent.Add(ctx, 1) return &StateProbeConnectivityCheck[K, N]{ NodeID: next.NodeID, } @@ -339,8 +428,10 @@ type nodeValueEntry[K kad.Key[K], N kad.NodeID[K]] struct { type nodeValueList[K kad.Key[K], N kad.NodeID[K]] struct { nodes map[string]*nodeValueEntry[K, N] + // pending is a list of nodes ordered by the time of the next check pending *nodeValuePendingList[K, N] + // ongoing is a list of nodes with ongoing/in-progress probes, loosely ordered earliest to most recent ongoing []N } @@ -373,8 +464,8 @@ func (l *nodeValueList[K, N]) Put(nv *nodeValue[K, N]) { if nve.index == -1 { heap.Push(l.pending, nve) } - heap.Fix(l.pending, nve.index) + l.removeFromOngoing(nv.NodeID) } @@ -387,15 +478,15 @@ func (l *nodeValueList[K, N]) Get(n N) (*nodeValue[K, N], bool) { return nve.nv, true } -func (l *nodeValueList[K, N]) PendingCount() int { +func (l *nodeValueList[K, N]) pendingCount() int { return len(*l.pending) } -func (l *nodeValueList[K, N]) OngoingCount() int { +func (l *nodeValueList[K, N]) ongoingCount() int { return len(l.ongoing) } -func (l *nodeValueList[K, N]) NodeCount() int { +func (l *nodeValueList[K, N]) nodeCount() int { return len(l.nodes) } diff --git a/v2/internal/coord/routing/probe_test.go b/v2/internal/coord/routing/probe_test.go index e97ddce3..9c29a0e8 100644 --- a/v2/internal/coord/routing/probe_test.go +++ b/v2/internal/coord/routing/probe_test.go @@ -28,6 +28,18 @@ func TestProbeConfigValidate(t *testing.T) { require.Error(t, cfg.Validate()) }) + t.Run("tracer is not nil", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Tracer = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("meter is not nil", func(t *testing.T) { + cfg := DefaultProbeConfig() + cfg.Meter = nil + require.Error(t, cfg.Validate()) + }) + t.Run("timeout positive", func(t *testing.T) { cfg := DefaultProbeConfig() cfg.Timeout = 0 @@ -362,8 +374,8 @@ func TestNodeValueList(t *testing.T) { l.Put(nv) - require.Equal(t, 1, l.PendingCount()) - require.Equal(t, 1, l.NodeCount()) + require.Equal(t, 1, l.pendingCount()) + require.Equal(t, 1, l.nodeCount()) _, found := l.Get(tiny.NewNode(4)) require.True(t, found) @@ -372,8 +384,8 @@ func TestNodeValueList(t *testing.T) { _, found = l.Get(tiny.NewNode(4)) require.False(t, found) - require.Equal(t, 0, l.PendingCount()) - require.Equal(t, 0, l.NodeCount()) + require.Equal(t, 0, l.pendingCount()) + require.Equal(t, 0, l.nodeCount()) }) t.Run("remove not-existing", func(t *testing.T) { @@ -508,14 +520,14 @@ func TestNodeValueList(t *testing.T) { NextCheckDue: clk.Now().Add(time.Minute), } l.Put(nv1) - require.Equal(t, 1, l.PendingCount()) - require.Equal(t, 0, l.OngoingCount()) - require.Equal(t, 1, l.NodeCount()) + require.Equal(t, 1, l.pendingCount()) + require.Equal(t, 0, l.ongoingCount()) + require.Equal(t, 1, l.nodeCount()) l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) - require.Equal(t, 0, l.PendingCount()) - require.Equal(t, 1, l.OngoingCount()) - require.Equal(t, 1, l.NodeCount()) + require.Equal(t, 0, l.pendingCount()) + require.Equal(t, 1, l.ongoingCount()) + require.Equal(t, 1, l.nodeCount()) }) t.Run("mark ongoing changes next", func(t *testing.T) { @@ -535,9 +547,9 @@ func TestNodeValueList(t *testing.T) { } l.Put(nv2) - require.Equal(t, 2, l.PendingCount()) - require.Equal(t, 0, l.OngoingCount()) - require.Equal(t, 2, l.NodeCount()) + require.Equal(t, 2, l.pendingCount()) + require.Equal(t, 0, l.ongoingCount()) + require.Equal(t, 2, l.nodeCount()) // nv1 is the next node due got, found := l.PeekNext(clk.Now()) @@ -545,9 +557,9 @@ func TestNodeValueList(t *testing.T) { require.True(t, key.Equal(got.NodeID.Key(), nv1.NodeID.Key())) l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) - require.Equal(t, 1, l.PendingCount()) - require.Equal(t, 1, l.OngoingCount()) - require.Equal(t, 2, l.NodeCount()) + require.Equal(t, 1, l.pendingCount()) + require.Equal(t, 1, l.ongoingCount()) + require.Equal(t, 2, l.nodeCount()) // nv2 is now the next node due got, found = l.PeekNext(clk.Now()) @@ -566,21 +578,21 @@ func TestNodeValueList(t *testing.T) { } l.Put(nv1) - require.Equal(t, 1, l.PendingCount()) - require.Equal(t, 0, l.OngoingCount()) - require.Equal(t, 1, l.NodeCount()) + require.Equal(t, 1, l.pendingCount()) + require.Equal(t, 0, l.ongoingCount()) + require.Equal(t, 1, l.nodeCount()) l.MarkOngoing(nv1.NodeID, clk.Now().Add(time.Minute)) - require.Equal(t, 0, l.PendingCount()) - require.Equal(t, 1, l.OngoingCount()) - require.Equal(t, 1, l.NodeCount()) + require.Equal(t, 0, l.pendingCount()) + require.Equal(t, 1, l.ongoingCount()) + require.Equal(t, 1, l.nodeCount()) l.Put(nv1) - require.Equal(t, 1, l.PendingCount()) - require.Equal(t, 0, l.OngoingCount()) - require.Equal(t, 1, l.NodeCount()) + require.Equal(t, 1, l.pendingCount()) + require.Equal(t, 0, l.ongoingCount()) + require.Equal(t, 1, l.nodeCount()) }) t.Run("mark ongoing pending mixed", func(t *testing.T) { diff --git a/v2/internal/coord/routing_test.go b/v2/internal/coord/routing_test.go index a13bdd4b..e51c6274 100644 --- a/v2/internal/coord/routing_test.go +++ b/v2/internal/coord/routing_test.go @@ -63,6 +63,12 @@ func TestRoutingConfigValidate(t *testing.T) { require.Error(t, cfg.Validate()) }) + t.Run("meter is not nil", func(t *testing.T) { + cfg := DefaultRoutingConfig() + cfg.Meter = nil + require.Error(t, cfg.Validate()) + }) + t.Run("bootstrap timeout positive", func(t *testing.T) { cfg := DefaultRoutingConfig() cfg.BootstrapTimeout = 0 diff --git a/v2/tele/tele.go b/v2/tele/tele.go index b905da6b..0cd4fac7 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -6,6 +6,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/sdk/instrumentation" motel "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/trace" @@ -25,6 +27,11 @@ func NoopTracer() trace.Tracer { return trace.NewNoopTracerProvider().Tracer("") } +// NoopMeterProvider returns a meter provider that does not record or emit metrics. +func NoopMeter() metric.Meter { + return noop.NewMeterProvider().Meter("") +} + // attrsCtxKey is the actual context key value that's used as a key for // metric values that are attached to a context. var attrsCtxKey = ctxKey{} From e4b1034cc06e5e0b6ba662cee43655a3dd1831fb Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 27 Sep 2023 13:18:52 +0200 Subject: [PATCH 57/64] add provide routing tests (#940) --- v2/internal/coord/brdcst/followup.go | 11 ++++ v2/internal/coord/brdcst/pool_test.go | 27 +++++++++ v2/internal/coord/coordinator.go | 13 ++++- v2/routing_test.go | 84 ++++++++++++++++++++++++--- 4 files changed, 125 insertions(+), 10 deletions(-) diff --git a/v2/internal/coord/brdcst/followup.go b/v2/internal/coord/brdcst/followup.go index 27d14d30..7769f961 100644 --- a/v2/internal/coord/brdcst/followup.go +++ b/v2/internal/coord/brdcst/followup.go @@ -221,6 +221,17 @@ func (f *FollowUp[K, N, M]) advancePool(ctx context.Context, ev query.PoolEvent) QueryID: f.queryID, }, true case *query.StatePoolQueryFinished[K, N]: + if len(st.ClosestNodes) == 0 { + return &StateBroadcastFinished[K, N]{ + QueryID: f.queryID, + Contacted: make([]N, 0), + Errors: map[string]struct { + Node N + Err error + }{}, + }, true + } + f.closest = st.ClosestNodes for _, n := range st.ClosestNodes { diff --git a/v2/internal/coord/brdcst/pool_test.go b/v2/internal/coord/brdcst/pool_test.go index f9404f3a..e4e7409c 100644 --- a/v2/internal/coord/brdcst/pool_test.go +++ b/v2/internal/coord/brdcst/pool_test.go @@ -272,6 +272,33 @@ func TestPool_FollowUp_stop_during_followup_phase(t *testing.T) { require.Len(t, st.Errors, 2) } +func TestPool_FollowUp_empty_seed(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + msg := tiny.Message{Content: "store this"} + target := tiny.Key(0b00000001) + + queryID := coordt.QueryID("test") + + state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{}, + Config: DefaultConfigFollowUp(), + }) + require.IsType(t, &StatePoolBroadcastFinished[tiny.Key, tiny.Node]{}, state) + + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) +} + func TestPoolState_interface_conformance(t *testing.T) { states := []PoolState{ &StatePoolIdle{}, diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index 4d568471..e8e1428b 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -450,9 +450,18 @@ func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) erro // queue the start of the query c.brdcstBehaviour.Notify(ctx, cmd) - _, _, err = c.waitForBroadcast(ctx, waiter) + contacted, _, err := c.waitForBroadcast(ctx, waiter) + if err != nil { + return err + } - return err + if len(contacted) == 0 { + return fmt.Errorf("no peers contacted") + } + + // TODO: define threshold below which we consider the provide to have failed + + return nil } func (c *Coordinator) waitForQuery(ctx context.Context, queryID coordt.QueryID, waiter *Waiter[BehaviourEvent], fn coordt.QueryFunc) ([]kadt.PeerID, coordt.QueryStats, error) { diff --git a/v2/routing_test.go b/v2/routing_test.go index d8aa1e90..19209fd4 100644 --- a/v2/routing_test.go +++ b/v2/routing_test.go @@ -19,6 +19,21 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" ) +// newRandomContent reads 1024 bytes from crypto/rand and builds a content struct. +func newRandomContent(t testing.TB) cid.Cid { + raw := make([]byte, 1024) + _, err := rand.Read(raw) + require.NoError(t, err) + + hash := sha256.New() + hash.Write(raw) + + mhash, err := mh.Encode(hash.Sum(nil), mh.SHA2_256) + require.NoError(t, err) + + return cid.NewCidV0(mhash) +} + func makePkKeyValue(t *testing.T) (string, []byte) { t.Helper() @@ -101,19 +116,72 @@ func TestGetValueOnePeer(t *testing.T) { require.Equal(t, v, val) } -// NewRandomContent reads 1024 bytes from crypto/rand and builds a content struct. -func newRandomContent(t testing.TB) cid.Cid { - raw := make([]byte, 1024) - _, err := rand.Read(raw) +func TestDHT_Provide_no_providers_backend_registered(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + delete(d.backends, namespaceProviders) + err := d.Provide(ctx, newRandomContent(t), true) + assert.ErrorIs(t, err, routing.ErrNotSupported) +} + +func TestDHT_Provide_undefined_cid(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + err := d.Provide(ctx, cid.Cid{}, true) + assert.ErrorContains(t, err, "invalid cid") +} + +func TestDHT_Provide_erroneous_datastore(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + testErr := fmt.Errorf("some error") + + // construct a datastore that fails for any operation + memStore, err := InMemoryDatastore() require.NoError(t, err) - hash := sha256.New() - hash.Write(raw) + dstore := failstore.NewFailstore(memStore, func(s string) error { + return testErr + }) - mhash, err := mh.Encode(hash.Sum(nil), mh.SHA2_256) + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) require.NoError(t, err) - return cid.NewCidV0(mhash) + be.datastore = dstore + + err = d.Provide(ctx, newRandomContent(t), true) + assert.ErrorIs(t, err, testErr) +} + +func TestDHT_Provide_does_nothing_if_broadcast_is_false(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) // unconnected DHT + + c := newRandomContent(t) + err := d.Provide(ctx, c, false) + assert.NoError(t, err) + + // still stored locally + be, err := typedBackend[*ProvidersBackend](d, namespaceProviders) + require.NoError(t, err) + val, err := be.Fetch(ctx, string(c.Hash())) + require.NoError(t, err) + + ps, ok := val.(*providerSet) + require.True(t, ok) + require.Len(t, ps.providers, 1) + assert.Equal(t, d.host.ID(), ps.providers[0].ID) +} + +func TestDHT_Provide_fails_if_routing_table_is_empty(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + err := d.Provide(ctx, newRandomContent(t), true) + assert.Error(t, err) } func TestDHT_FindProvidersAsync_empty_routing_table(t *testing.T) { From 4fa560fe4100fa71fe8c1c4d9d62c0db9fccaadf Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Wed, 27 Sep 2023 16:08:03 +0100 Subject: [PATCH 58/64] fix: bootstrap state machine goes idle after completion (#943) --- v2/internal/coord/routing/bootstrap.go | 1 + v2/internal/coord/routing/bootstrap_test.go | 38 +++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index 5b77a440..e941ae0f 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -255,6 +255,7 @@ func (b *Bootstrap[K, N]) advanceQuery(ctx context.Context, qev query.QueryEvent } case *query.StateQueryFinished[K, N]: span.SetAttributes(attribute.String("out_state", "StateBootstrapFinished")) + b.qry = nil return &StateBootstrapFinished{ Stats: st.Stats, } diff --git a/v2/internal/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go index bbb25f6d..6adcb74c 100644 --- a/v2/internal/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -232,3 +232,41 @@ func TestBootstrapProgress(t *testing.T) { require.Equal(t, 4, stf.Stats.Requests) require.Equal(t, 4, stf.Stats.Success) } + +func TestBootstrapFinishesThenGoesIdle(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig() + cfg.Clock = clk + + self := tiny.NewNode(0) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + a := tiny.NewNode(0b00000100) // 4 + + // start the bootstrap + state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ + KnownClosestNodes: []tiny.Node{a}, + }) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // the bootstrap should attempt to contact the node it was given + st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, coordt.QueryID("bootstrap"), st.QueryID) + require.Equal(t, a, st.NodeID) + + // notify bootstrap that node was contacted successfully, but no closer nodes + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // bootstrap should respond that its query has finished + require.IsType(t, &StateBootstrapFinished{}, state) + + // poll bootstrap + state = bs.Advance(ctx, &EventBootstrapPoll{}) + + // bootstrap should now be idle + require.IsType(t, &StateBootstrapIdle{}, state) +} From 6a4249cae85b39e23f3fcd7565cdb9cbc1d49158 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Thu, 28 Sep 2023 11:24:47 +0100 Subject: [PATCH 59/64] Logging improvements (#941) * Loggiing improvements * Add more debug logging * Add more debug logging * Use logging helpers --- v2/dht.go | 57 ++++++++++++++++++++++++-------- v2/internal/coord/coordinator.go | 15 +++++++++ v2/internal/coord/event.go | 4 +-- v2/internal/coord/query.go | 2 ++ v2/internal/coord/routing.go | 10 +++++- v2/routing.go | 1 + v2/stream.go | 6 ++-- v2/tele/log.go | 27 +++++++++++++-- v2/tele/tele.go | 8 ++--- 9 files changed, 105 insertions(+), 25 deletions(-) diff --git a/v2/dht.go b/v2/dht.go index b9ec9993..b5229c9e 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // DHT is an implementation of Kademlia with S/Kademlia modifications. @@ -106,13 +107,23 @@ func New(h host.Host, cfg *Config) (*DHT, error) { // instantiate a new Kademlia DHT coordinator. coordCfg := coord.DefaultCoordinatorConfig() + coordCfg.Clock = cfg.Clock + coordCfg.Logger = cfg.Logger + coordCfg.MeterProvider = cfg.MeterProvider + coordCfg.TracerProvider = cfg.TracerProvider + + coordCfg.Query.Clock = cfg.Clock + coordCfg.Query.Logger = cfg.Logger.With("behaviour", "pooledquery") + coordCfg.Query.Tracer = cfg.TracerProvider.Tracer(tele.TracerName) coordCfg.Query.Concurrency = cfg.Query.Concurrency coordCfg.Query.Timeout = cfg.Query.Timeout coordCfg.Query.RequestConcurrency = cfg.Query.RequestConcurrency coordCfg.Query.RequestTimeout = cfg.Query.RequestTimeout - coordCfg.Clock = cfg.Clock - coordCfg.MeterProvider = cfg.MeterProvider - coordCfg.TracerProvider = cfg.TracerProvider + + coordCfg.Routing.Clock = cfg.Clock + coordCfg.Routing.Logger = cfg.Logger.With("behaviour", "routing") + coordCfg.Routing.Tracer = cfg.TracerProvider.Tracer(tele.TracerName) + coordCfg.Routing.Meter = cfg.MeterProvider.Meter(tele.MeterName) d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &router{host: h, ProtocolID: cfg.ProtocolID}, d.rt, coordCfg) if err != nil { @@ -202,11 +213,11 @@ func (d *DHT) initAminoBackends() (map[string]Backend, error) { // Close cleans up all resources associated with this DHT. func (d *DHT) Close() error { if err := d.sub.Close(); err != nil { - d.log.With("err", err).Debug("failed closing event bus subscription") + d.debugErr(err, "failed closing event bus subscription") } if err := d.kad.Close(); err != nil { - d.log.With("err", err).Debug("failed closing coordinator") + d.debugErr(err, "failed closing coordinator") } for ns, b := range d.backends { @@ -216,7 +227,7 @@ func (d *DHT) Close() error { } if err := closer.Close(); err != nil { - d.log.Warn("failed closing backend", "namespace", ns, "err", err.Error()) + d.warnErr(err, "failed closing backend", "namespace", ns) } } @@ -230,7 +241,7 @@ func (d *DHT) Close() error { if d.cfg.ProtocolID == ProtocolIPFS && d.cfg.Datastore == nil { if pbe, err := typedBackend[*ProvidersBackend](d, namespaceProviders); err == nil { if err := pbe.datastore.Close(); err != nil { - d.log.Warn("failed closing in memory datastore", "err", err.Error()) + d.warnErr(err, "failed closing in memory datastore") } } } @@ -244,7 +255,7 @@ func (d *DHT) Close() error { } if err := s.Reset(); err != nil { - d.log.With("err", err).Debug("failed closing stream") + d.debugErr(err, "failed closing stream") } } } @@ -302,21 +313,41 @@ func (d *DHT) setClientMode() { } if err := s.Reset(); err != nil { - d.log.With("err", err).Debug("failed closing stream") + d.debugErr(err, "failed closing stream") } } } } -// logErr is a helper method that uses the slogger of the DHT and writes a -// warning log line with the given message alongside the error. If the error +// warnErr is a helper method that uses the slogger of the DHT and writes a +// warning log line with the given message alongside the error. args is a list of +// key/value pairs or slog.Attrs that will be included with the log message. If the error // is nil, this method is a no-op. -func (d *DHT) logErr(err error, msg string) { +func (d *DHT) warnErr(err error, msg string, args ...any) { if err == nil { return } - d.log.Warn(msg, "err", err.Error()) + if len(args) == 0 { + d.log.Warn(msg, tele.LogAttrError(err)) + return + } + d.log.With(args...).Warn(msg, tele.LogAttrError(err)) +} + +// debugErr is a helper method that uses the slogger of the DHT and writes a +// debug log line with the given message alongside the error. args is a list of +// key/value pairs or slog.Attrs that will be included with the log message. If the error +// is nil, this method is a no-op. +func (d *DHT) debugErr(err error, msg string, args ...any) { + if err == nil { + return + } + if len(args) == 0 { + d.log.Debug(msg, tele.LogAttrError(err)) + return + } + d.log.With(args...).Debug(msg, tele.LogAttrError(err)) } // AddAddresses suggests peers and their associated addresses to be added to the routing table. diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index e8e1428b..d4dda7fa 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -336,6 +336,7 @@ func (c *Coordinator) PutValue(ctx context.Context, r coordt.Value, q int) error func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn coordt.QueryFunc, numResults int) ([]kadt.PeerID, coordt.QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.Query") defer span.End() + c.cfg.Logger.Debug("starting query for closest nodes", tele.LogAttrKey(target)) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -381,6 +382,10 @@ func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn coor func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coordt.QueryFunc, numResults int) (coordt.QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.QueryMessage") defer span.End() + if msg == nil { + return coordt.QueryStats{}, fmt.Errorf("no message supplied for query") + } + c.cfg.Logger.Debug("starting query with message", tele.LogAttrKey(msg.Target()), slog.String("type", msg.Type.String())) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -421,6 +426,10 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coor func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) error { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.BroadcastRecord") defer span.End() + if msg == nil { + return fmt.Errorf("no message supplied for broadcast") + } + c.cfg.Logger.Debug("starting broadcast with message", tele.LogAttrKey(msg.Target()), slog.String("type", msg.Type.String())) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -474,6 +483,7 @@ func (c *Coordinator) waitForQuery(ctx context.Context, queryID coordt.QueryID, ctx, ev := wev.Ctx, wev.Event switch ev := ev.(type) { case *EventQueryProgressed: + c.cfg.Logger.Debug("query made progress", "query_id", queryID, tele.LogAttrPeerID(ev.NodeID), slog.Duration("elapsed", c.cfg.Clock.Since(ev.Stats.Start)), slog.Int("requests", ev.Stats.Requests), slog.Int("failures", ev.Stats.Failure)) lastStats = coordt.QueryStats{ Start: ev.Stats.Start, Requests: ev.Stats.Requests, @@ -483,12 +493,14 @@ func (c *Coordinator) waitForQuery(ctx context.Context, queryID coordt.QueryID, nh, err := c.networkBehaviour.getNodeHandler(ctx, ev.NodeID) if err != nil { // ignore unknown node + c.cfg.Logger.Debug("node handler not found", "query_id", queryID, tele.LogAttrError, err) break } err = fn(ctx, nh.ID(), ev.Response, lastStats) if errors.Is(err, coordt.ErrSkipRemaining) { // done + c.cfg.Logger.Debug("query done", "query_id", queryID) c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) return nil, lastStats, nil } @@ -501,6 +513,7 @@ func (c *Coordinator) waitForQuery(ctx context.Context, queryID coordt.QueryID, case *EventQueryFinished: // query is done lastStats.Exhausted = true + c.cfg.Logger.Debug("query ran to exhaustion", "query_id", queryID, slog.Duration("elapsed", ev.Stats.End.Sub(ev.Stats.Start)), slog.Int("requests", ev.Stats.Requests), slog.Int("failures", ev.Stats.Failure)) return ev.ClosestNodes, lastStats, nil default: @@ -571,6 +584,7 @@ func (c *Coordinator) NotifyConnectivity(ctx context.Context, id kadt.PeerID) er ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyConnectivity") defer span.End() + c.cfg.Logger.Debug("peer has connectivity", tele.LogAttrPeerID(id), "source", "notify") c.routingBehaviour.Notify(ctx, &EventNotifyConnectivity{ NodeID: id, }) @@ -584,6 +598,7 @@ func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyNonConnectivity") defer span.End() + c.cfg.Logger.Debug("peer has no connectivity", tele.LogAttrPeerID(id), "source", "notify") c.routingBehaviour.Notify(ctx, &EventNotifyNonConnectivity{ NodeID: id, }) diff --git a/v2/internal/coord/event.go b/v2/internal/coord/event.go index 69f86a33..20810ce0 100644 --- a/v2/internal/coord/event.go +++ b/v2/internal/coord/event.go @@ -221,8 +221,8 @@ type EventNotifyConnectivity struct { NodeID kadt.PeerID } -func (*EventNotifyConnectivity) behaviourEvent() {} -func (*EventNotifyConnectivity) routingNotification() {} +func (*EventNotifyConnectivity) behaviourEvent() {} +func (*EventNotifyConnectivity) routingCommand() {} // EventNotifyNonConnectivity notifies a behaviour that a peer does not have connectivity and/or does not support // finding closer nodes is known. diff --git a/v2/internal/coord/query.go b/v2/internal/coord/query.go index 68ca59d5..7bae8ac0 100644 --- a/v2/internal/coord/query.go +++ b/v2/internal/coord/query.go @@ -200,6 +200,7 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { } case *EventGetCloserNodesFailure: // queue an event that will notify the routing behaviour of a failed node + p.cfg.Logger.Debug("peer has no connectivity", tele.LogAttrPeerID(ev.To), "source", "query") p.pending = append(p.pending, &EventNotifyNonConnectivity{ ev.To, }) @@ -231,6 +232,7 @@ func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { } case *EventSendMessageFailure: // queue an event that will notify the routing behaviour of a failed node + p.cfg.Logger.Debug("peer has no connectivity", tele.LogAttrPeerID(ev.To), "source", "query") p.pending = append(p.pending, &EventNotifyNonConnectivity{ ev.To, }) diff --git a/v2/internal/coord/routing.go b/v2/internal/coord/routing.go index ab6918d5..bf019752 100644 --- a/v2/internal/coord/routing.go +++ b/v2/internal/coord/routing.go @@ -585,6 +585,8 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { if r.self.Equal(ev.NodeID) { break } + r.cfg.Logger.Debug("peer has connectivity", tele.LogAttrPeerID(ev.NodeID)) + // tell the include state machine in case this is a new peer that could be added to the routing table cmd := &routing.EventIncludeAddCandidate[kadt.Key, kadt.PeerID]{ NodeID: ev.NodeID, @@ -705,6 +707,7 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot case *routing.StateBootstrapWaiting: // bootstrap waiting for a message response, nothing to do case *routing.StateBootstrapFinished: + r.cfg.Logger.Debug("bootstrap finished", slog.Duration("elapsed", st.Stats.End.Sub(st.Stats.Start)), slog.Int("requests", st.Stats.Requests), slog.Int("failures", st.Stats.Failure)) return &EventBootstrapFinished{ Stats: st.Stats, }, true @@ -726,6 +729,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ case *routing.StateIncludeConnectivityCheck[kadt.Key, kadt.PeerID]: span.SetAttributes(attribute.String("out_event", "EventOutboundGetCloserNodes")) // include wants to send a find node message to a node + r.cfg.Logger.Debug("starting connectivity check", tele.LogAttrPeerID(st.NodeID), "source", "include") return &EventOutboundGetCloserNodes{ QueryID: IncludeQueryID, To: st.NodeID, @@ -743,6 +747,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ // return the event to notify outwards too span.SetAttributes(attribute.String("out_event", "EventRoutingUpdated")) + r.cfg.Logger.Debug("peer added to routing table", tele.LogAttrPeerID(st.NodeID)) return &EventRoutingUpdated{ NodeID: st.NodeID, }, true @@ -768,6 +773,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve switch st := st.(type) { case *routing.StateProbeConnectivityCheck[kadt.Key, kadt.PeerID]: // include wants to send a find node message to a node + r.cfg.Logger.Debug("starting connectivity check", tele.LogAttrPeerID(st.NodeID), "source", "probe") return &EventOutboundGetCloserNodes{ QueryID: ProbeQueryID, To: st.NodeID, @@ -778,6 +784,7 @@ func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEve // a node has failed a connectivity check and been removed from the routing table and the probe list // emit an EventRoutingRemoved event to notify clients that the node has been removed + r.cfg.Logger.Debug("peer removed from routing table", tele.LogAttrPeerID(st.NodeID)) r.pending = append(r.pending, &EventRoutingRemoved{ NodeID: st.NodeID, }) @@ -809,6 +816,7 @@ func (r *RoutingBehaviour) advanceExplore(ctx context.Context, ev routing.Explor switch st := bstate.(type) { case *routing.StateExploreFindCloser[kadt.Key, kadt.PeerID]: + r.cfg.Logger.Debug("starting explore", slog.Int("cpl", st.Cpl), tele.LogAttrPeerID(st.NodeID)) return &EventOutboundGetCloserNodes{ QueryID: routing.ExploreQueryID, To: st.NodeID, @@ -823,7 +831,7 @@ func (r *RoutingBehaviour) advanceExplore(ctx context.Context, ev routing.Explor case *routing.StateExploreQueryTimeout: // nothing to do except notify via telemetry case *routing.StateExploreFailure: - r.cfg.Logger.Warn("explore failure", "cpl", st.Cpl, "error", st.Error) + r.cfg.Logger.Warn("explore failure", slog.Int("cpl", st.Cpl), tele.LogAttrError(st.Error)) case *routing.StateExploreIdle: // bootstrap not running, nothing to do default: diff --git a/v2/routing.go b/v2/routing.go index 756a3d48..ed66a559 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -358,6 +358,7 @@ func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Optio func (d *DHT) Bootstrap(ctx context.Context) error { ctx, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") defer span.End() + d.log.Info("Starting bootstrap") seed := make([]kadt.PeerID, len(d.cfg.BootstrapPeers)) for i, addrInfo := range d.cfg.BootstrapPeers { diff --git a/v2/stream.go b/v2/stream.go index e9e747a3..35aee6e2 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -42,18 +42,18 @@ func (d *DHT) streamHandler(s network.Stream) { if err := s.Scope().SetService(ServiceName); err != nil { d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("err", err.Error())) - d.logErr(s.Reset(), "failed to reset stream") + d.warnErr(s.Reset(), "failed to reset stream") span.RecordError(err) return } if err := d.handleNewStream(ctx, s); err != nil { // If we exited with an error, let the remote peer know. - d.logErr(s.Reset(), "failed to reset stream") + d.warnErr(s.Reset(), "failed to reset stream") span.RecordError(err) } else { // If we exited without an error, close gracefully. - d.logErr(s.Close(), "failed to close stream") + d.warnErr(s.Close(), "failed to close stream") } } diff --git a/v2/tele/log.go b/v2/tele/log.go index cc65f7e4..b91426a9 100644 --- a/v2/tele/log.go +++ b/v2/tele/log.go @@ -2,10 +2,33 @@ package tele import ( logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" ) -func DefaultLogger(system string) *slog.Logger { - return slog.New(zapslog.NewHandler(logging.Logger(system).Desugar().Core())) +func DefaultLogger(name string) *slog.Logger { + return slog.New(zapslog.NewHandler(logging.Logger(name).Desugar().Core())) +} + +// Attributes that can be used with logging or tracing +const ( + AttrKeyError = "error" + AttrKeyPeerID = "peer_id" + AttrKeyKey = "key" + AttrKeyCacheHit = "hit" + AttrKeyInEvent = "in_event" + AttrKeyOutEvent = "out_event" +) + +func LogAttrError(err error) slog.Attr { + return slog.Attr{Key: AttrKeyError, Value: slog.AnyValue(err)} +} + +func LogAttrPeerID(id kadt.PeerID) slog.Attr { + return slog.String(AttrKeyPeerID, id.String()) +} + +func LogAttrKey(kk kadt.Key) slog.Attr { + return slog.String(AttrKeyKey, kk.HexString()) } diff --git a/v2/tele/tele.go b/v2/tele/tele.go index 0cd4fac7..78aecc98 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -79,11 +79,11 @@ func AttrInstanceID(instanceID string) attribute.KeyValue { } func AttrPeerID(pid string) attribute.KeyValue { - return attribute.String("peer_id", pid) + return attribute.String(AttrKeyPeerID, pid) } func AttrCacheHit(hit bool) attribute.KeyValue { - return attribute.Bool("hit", hit) + return attribute.Bool(AttrKeyCacheHit, hit) } // AttrRecordType is currently only used for the provider backend LRU cache @@ -101,12 +101,12 @@ func AttrKey(val string) attribute.KeyValue { // AttrInEvent creates an attribute that records the type of an event func AttrInEvent(t any) attribute.KeyValue { - return attribute.String("in_event", fmt.Sprintf("%T", t)) + return attribute.String(AttrKeyInEvent, fmt.Sprintf("%T", t)) } // AttrOutEvent creates an attribute that records the type of an event being returned func AttrOutEvent(t any) attribute.KeyValue { - return attribute.String("out_event", fmt.Sprintf("%T", t)) + return attribute.String(AttrKeyOutEvent, fmt.Sprintf("%T", t)) } // WithAttributes is a function that attaches the provided attributes to the From 03adce677e4ecf1eb1cfdb386efa7ac50e8c762c Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 28 Sep 2023 12:48:08 +0200 Subject: [PATCH 60/64] Implement SearchValue/GetValue (#942) --- v2/backend.go | 12 + v2/backend_provider.go | 54 +- v2/backend_provider_test.go | 63 +++ v2/backend_record.go | 51 ++ v2/backend_record_test.go | 114 +++++ v2/backend_trace.go | 15 + v2/config.go | 25 + v2/config_test.go | 30 ++ v2/dht.go | 15 +- v2/dht_test.go | 15 + v2/handlers.go | 18 +- v2/handlers_test.go | 31 +- v2/internal/coord/brdcst/brdcst.go | 2 +- v2/internal/coord/brdcst/config.go | 23 +- v2/internal/coord/brdcst/config_test.go | 1 + v2/internal/coord/brdcst/pool.go | 14 +- v2/internal/coord/brdcst/pool_test.go | 112 ++++- v2/internal/coord/brdcst/static.go | 143 ++++++ v2/internal/coord/coordinator.go | 54 +- v2/router.go | 38 +- v2/routing.go | 251 ++++++++-- v2/routing_test.go | 634 +++++++++++++++++++++--- v2/tele/tele.go | 2 +- v2/topology_test.go | 4 - 24 files changed, 1543 insertions(+), 178 deletions(-) create mode 100644 v2/backend_record_test.go create mode 100644 v2/internal/coord/brdcst/static.go diff --git a/v2/backend.go b/v2/backend.go index cccb2c36..ed25de43 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -55,6 +55,18 @@ type Backend interface { // wasn't found or another error if any occurred. key won't contain the // namespace prefix. Fetch(ctx context.Context, key string) (any, error) + + // Validate validates the given values and returns the index of the "best" + // value or an error and -1 if all values are invalid. If the method is used + // with a single value, it will return 0 and no error if it is valid or an + // error and -1 if it is invalid. For multiple values, it will select the + // "best" value based on user-defined logic and return its index in the + // original values list. If we receive a request for /ipns/$binary_id, the + // key parameter will be set to $binary_id. Decisions about which value is + // the "best" from the given list must be stable. So if there are multiple + // equally good values, the implementation must always return the same + // index - for example, always the first good or last good value. + Validate(ctx context.Context, key string, values ...any) (int, error) } // NewBackendIPNS initializes a new backend for the "ipns" namespace that can diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 3be9d88a..703fa1ee 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -1,11 +1,11 @@ package dht import ( + "bytes" "context" "encoding/binary" "fmt" "io" - "path" "strings" "sync" "time" @@ -226,13 +226,50 @@ func (p *ProvidersBackend) Fetch(ctx context.Context, key string) (any, error) { out.addProvider(addrInfo, rec.expiry) } - if len(out.providers) > 0 { + if len(out.providers) == 0 { + return nil, ds.ErrNotFound + } else { p.cache.Add(qKey.String(), *out) } return out, nil } +// Validate verifies that the given values are of type [peer.AddrInfo]. Then it +// decides based on the number of attached multi addresses which value is +// "better" than the other. If there is a tie, Validate will return the index +// of the earliest occurrence. +func (p *ProvidersBackend) Validate(ctx context.Context, key string, values ...any) (int, error) { + // short circuit if it's just a single value + if len(values) == 1 { + _, ok := values[0].(peer.AddrInfo) + if !ok { + return -1, fmt.Errorf("invalid type %T", values[0]) + } + return 0, nil + } + + bestIdx := -1 + for i, value := range values { + addrInfo, ok := value.(peer.AddrInfo) + if !ok { + continue + } + + if bestIdx == -1 { + bestIdx = i + } else if len(values[bestIdx].(peer.AddrInfo).Addrs) < len(addrInfo.Addrs) { + bestIdx = i + } + } + + if bestIdx == -1 { + return -1, fmt.Errorf("no value of correct type") + } + + return bestIdx, nil +} + // Close is here to implement the [io.Closer] interface. This will get called // when the [DHT] "shuts down"/closes. func (p *ProvidersBackend) Close() error { @@ -431,5 +468,16 @@ func newDatastoreKey(namespace string, binStrs ...string) ds.Key { for i, bin := range binStrs { elems[i+1] = base32.RawStdEncoding.EncodeToString([]byte(bin)) } - return ds.NewKey("/" + path.Join(elems...)) + + return ds.NewKey("/" + strings.Join(elems, "/")) +} + +// newRoutingKey uses the given namespace and binary string key and constructs +// a new string of the format: /$namespace/$binStr +func newRoutingKey(namespace string, binStr string) string { + buf := make([]byte, 0, 2+len(namespace)+len(binStr)) + buffer := bytes.NewBuffer(buf) + buffer.WriteString("/" + namespace + "/") + buffer.Write([]byte(binStr)) + return buffer.String() } diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index d3ab465d..37e01368 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -10,9 +10,13 @@ import ( "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" ) var devnull = slog.New(slog.NewTextHandler(io.Discard, nil)) @@ -115,3 +119,62 @@ func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) assert.Nil(t, b.gcCancel) assert.Nil(t, b.gcDone) } + +func TestProvidersBackend_Validate(t *testing.T) { + ctx := kadtest.CtxShort(t) + + b := newBackendProvider(t, nil) + + pid := newPeerID(t) + peer1 := peer.AddrInfo{ID: pid, Addrs: make([]multiaddr.Multiaddr, 0)} + peer2 := peer.AddrInfo{ID: pid, Addrs: make([]multiaddr.Multiaddr, 1)} + peer3 := peer.AddrInfo{ID: pid, Addrs: make([]multiaddr.Multiaddr, 2)} + + t.Run("no values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key") + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("nil value", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", nil) + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("nil values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", nil, nil) + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("single valid value", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", peer1) + assert.NoError(t, err) + assert.Equal(t, 0, idx) + }) + + t.Run("increasing better values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", peer1, peer2, peer3) + assert.NoError(t, err) + assert.Equal(t, 2, idx) + }) + + t.Run("mixed better values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", peer1, peer3, peer2) + assert.NoError(t, err) + assert.Equal(t, 1, idx) + }) + + t.Run("mixed invalid values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", peer1, nil, peer2, nil) + assert.NoError(t, err) + assert.Equal(t, 2, idx) + }) + + t.Run("identically good values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", peer1, peer1) + assert.NoError(t, err) + assert.Equal(t, 0, idx) + }) +} diff --git a/v2/backend_record.go b/v2/backend_record.go index ba4a94ba..e0c4284d 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -131,6 +131,57 @@ func (r *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { return rec, nil } +func (r *RecordBackend) Validate(ctx context.Context, key string, values ...any) (int, error) { + k := newRoutingKey(r.namespace, key) + + // short circuit if it's just a single value + if len(values) == 1 { + data, ok := values[0].([]byte) + if !ok { + return -1, fmt.Errorf("value not byte slice") + } + + if err := r.validator.Validate(k, data); err != nil { + return -1, err + } + + return 0, nil + } + + // In case there are invalid values in the slice, we still want to return + // the index in the original list of values. The Select method below will + // return the index of the "best" value in the slice of valid values. This + // slice can have a different length and therefore that method will return + // an index that doesn't match the values slice that's passed into this + // method. origIdx stores the original index + origIdx := map[int]int{} + validValues := [][]byte{} + for i, value := range values { + data, ok := value.([]byte) + if !ok { + continue + } + + if err := r.validator.Validate(k, data); err != nil { + continue + } + + origIdx[len(validValues)] = i + validValues = append(validValues, data) + } + + if len(validValues) == 0 { + return -1, fmt.Errorf("no valid values") + } + + sel, err := r.validator.Select(k, validValues) + if err != nil { + return -1, err + } + + return origIdx[sel], nil +} + // shouldReplaceExistingRecord returns true if the given record should replace any // existing one in the local datastore. It queries the datastore, unmarshalls // the record, validates it, and compares it to the incoming record. If the diff --git a/v2/backend_record_test.go b/v2/backend_record_test.go new file mode 100644 index 00000000..eb772a3a --- /dev/null +++ b/v2/backend_record_test.go @@ -0,0 +1,114 @@ +package dht + +import ( + "fmt" + "strconv" + "strings" + "testing" + + record "github.com/libp2p/go-libp2p-record" + "github.com/stretchr/testify/assert" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" +) + +// testValidator is a validator that considers all values valid that have a +// "valid-" prefix. Then the suffix will determine which value is better. For +// example, "valid-2" is better than "valid-1". +type testValidator struct{} + +var _ record.Validator = (*testValidator)(nil) + +func (t testValidator) Validate(key string, value []byte) error { + if strings.HasPrefix(string(value), "valid-") { + return nil + } + return fmt.Errorf("invalid value") +} + +func (t testValidator) Select(key string, values [][]byte) (int, error) { + idx := -1 + best := -1 + for i, val := range values { + if !strings.HasPrefix(string(val), "valid-") { + continue + } + newBest, err := strconv.Atoi(string(val)[6:]) + if err != nil { + continue + } + if newBest > best { + idx = i + best = newBest + } + } + + if idx == -1 { + return idx, fmt.Errorf("no valid value") + } + + return idx, nil +} + +func TestRecordBackend_Validate(t *testing.T) { + ctx := kadtest.CtxShort(t) + + b := &RecordBackend{ + namespace: "test", + validator: &testValidator{}, + } + + t.Run("no values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key") + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("nil value", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", nil) + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("nil values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", nil, nil) + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("single valid value", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", []byte("valid-0")) + assert.NoError(t, err) + assert.Equal(t, 0, idx) + }) + + t.Run("increasing better values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", []byte("valid-0"), []byte("valid-1"), []byte("valid-2")) + assert.NoError(t, err) + assert.Equal(t, 2, idx) + }) + + t.Run("mixed better values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", []byte("valid-0"), []byte("valid-2"), []byte("valid-1")) + assert.NoError(t, err) + assert.Equal(t, 1, idx) + }) + + t.Run("mixed invalid values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", []byte("valid-0"), []byte("invalid"), []byte("valid-2"), []byte("invalid")) + assert.NoError(t, err) + assert.Equal(t, 2, idx) + }) + + t.Run("only invalid values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", []byte("invalid"), nil) + assert.Error(t, err) + assert.Equal(t, -1, idx) + }) + + t.Run("identically good values", func(t *testing.T) { + idx, err := b.Validate(ctx, "some-key", []byte("valid-0"), []byte("valid-0")) + assert.NoError(t, err) + assert.Equal(t, 0, idx) + }) +} diff --git a/v2/backend_trace.go b/v2/backend_trace.go index 72335c35..f09dfd0d 100644 --- a/v2/backend_trace.go +++ b/v2/backend_trace.go @@ -57,6 +57,21 @@ func (t *tracedBackend) Fetch(ctx context.Context, key string) (any, error) { return result, err } +func (t *tracedBackend) Validate(ctx context.Context, key string, values ...any) (int, error) { + ctx, span := t.tracer.Start(ctx, "Validate", t.traceAttributes(key)) + defer span.End() + + idx, err := t.backend.Validate(ctx, key, values...) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } else { + span.SetAttributes(attribute.Int("idx", idx)) + } + + return idx, err +} + // traceAttributes is a helper to build the trace attributes. func (t *tracedBackend) traceAttributes(key string) trace.SpanStartEventOption { return trace.WithAttributes(attribute.String("namespace", t.namespace), attribute.String("key", key)) diff --git a/v2/config.go b/v2/config.go index 696b8dfe..b7314ecf 100644 --- a/v2/config.go +++ b/v2/config.go @@ -269,6 +269,15 @@ func (c *Config) Validate() error { } } + for _, bp := range c.BootstrapPeers { + if len(bp.Addrs) == 0 { + return &ConfigurationError{ + Component: "Config", + Err: fmt.Errorf("bootstrap peer with no address"), + } + } + } + if c.ProtocolID == "" { return &ConfigurationError{ Component: "Config", @@ -378,6 +387,14 @@ type QueryConfig struct { // RequestTimeout defines the time to wait before terminating a request to a node that has not responded. RequestTimeout time.Duration + + // DefaultQuorum specifies the minimum number of identical responses before + // a SearchValue/GetValue operation returns. The responses must not only be + // identical, but the responses must also correspond to the "best" records + // we have observed in the network during the SearchValue/GetValue + // operation. A DefaultQuorum of 0 means that we search the network until + // we have exhausted the keyspace. + DefaultQuorum int } // DefaultQueryConfig returns the default query configuration options for a DHT. @@ -387,6 +404,7 @@ func DefaultQueryConfig() *QueryConfig { Timeout: 5 * time.Minute, // MAGIC RequestConcurrency: 3, // MAGIC RequestTimeout: time.Minute, // MAGIC + DefaultQuorum: 0, // MAGIC } } @@ -419,5 +437,12 @@ func (cfg *QueryConfig) Validate() error { } } + if cfg.DefaultQuorum < 0 { + return &ConfigurationError{ + Component: "QueryConfig", + Err: fmt.Errorf("default quorum must not be negative"), + } + } + return nil } diff --git a/v2/config_test.go b/v2/config_test.go index ad84b8d4..5b49f9eb 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -5,7 +5,9 @@ import ( "time" "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestConfig_Validate(t *testing.T) { @@ -140,6 +142,25 @@ func TestConfig_Validate(t *testing.T) { cfg.BootstrapPeers = []peer.AddrInfo{} assert.Error(t, cfg.Validate()) }) + + t.Run("bootstrap peers no addresses", func(t *testing.T) { + cfg := DefaultConfig() + cfg.BootstrapPeers = []peer.AddrInfo{ + {ID: newPeerID(t), Addrs: []ma.Multiaddr{}}, + } + assert.Error(t, cfg.Validate()) + }) + + t.Run("bootstrap peers mixed no addresses", func(t *testing.T) { + cfg := DefaultConfig() + maddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/1234") + require.NoError(t, err) + cfg.BootstrapPeers = []peer.AddrInfo{ + {ID: newPeerID(t), Addrs: []ma.Multiaddr{}}, + {ID: newPeerID(t), Addrs: []ma.Multiaddr{maddr}}, + } + assert.Error(t, cfg.Validate()) // still an error + }) } func TestQueryConfig_Validate(t *testing.T) { @@ -183,4 +204,13 @@ func TestQueryConfig_Validate(t *testing.T) { cfg.RequestTimeout = -1 assert.Error(t, cfg.Validate()) }) + + t.Run("negative default quorum", func(t *testing.T) { + cfg := DefaultQueryConfig() + + cfg.DefaultQuorum = 0 + assert.NoError(t, cfg.Validate()) + cfg.DefaultQuorum = -1 + assert.Error(t, cfg.Validate()) + }) } diff --git a/v2/dht.go b/v2/dht.go index b5229c9e..86e3b97f 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "sync" + "sync/atomic" "time" "github.com/ipfs/go-datastore/trace" @@ -56,6 +57,9 @@ type DHT struct { // tele holds a reference to a telemetry struct tele *Telemetry + + // indicates whether this DHT instance was stopped ([DHT.Close] was called). + stopped atomic.Bool } // New constructs a new [DHT] for the given underlying host and with the given @@ -125,7 +129,12 @@ func New(h host.Host, cfg *Config) (*DHT, error) { coordCfg.Routing.Tracer = cfg.TracerProvider.Tracer(tele.TracerName) coordCfg.Routing.Meter = cfg.MeterProvider.Meter(tele.MeterName) - d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), &router{host: h, ProtocolID: cfg.ProtocolID}, d.rt, coordCfg) + rtr := &router{ + host: h, + protocolID: cfg.ProtocolID, + tracer: d.tele.Tracer, + } + d.kad, err = coord.NewCoordinator(kadt.PeerID(d.host.ID()), rtr, d.rt, coordCfg) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } @@ -212,6 +221,10 @@ func (d *DHT) initAminoBackends() (map[string]Backend, error) { // Close cleans up all resources associated with this DHT. func (d *DHT) Close() error { + if d.stopped.Swap(true) { + return nil + } + if err := d.sub.Close(); err != nil { d.debugErr(err, "failed closing event bus subscription") } diff --git a/v2/dht_test.go b/v2/dht_test.go index 0ee635df..ad53e2f7 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -1,6 +1,7 @@ package dht import ( + "sync" "testing" "time" @@ -103,3 +104,17 @@ func TestAddAddresses(t *testing.T) { _, err = local.kad.GetNode(ctx, kadt.PeerID(remote.host.ID())) require.NoError(t, err) } + +func TestDHT_Close_idempotent(t *testing.T) { + d := newTestDHT(t) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + assert.NoError(t, d.Close()) + wg.Done() + }() + } + wg.Wait() +} diff --git a/v2/handlers.go b/v2/handlers.go index 74e55a2b..aebd9a68 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -83,6 +83,8 @@ func (d *DHT) handlePutValue(ctx context.Context, remote peer.ID, req *pb.Messag return nil, fmt.Errorf("key doesn't match record key") } + // TODO: use putValueLocal? + // key is /$namespace/$binary_id ns, path, err := record.SplitKey(k) // get namespace (prefix of the key) if err != nil || len(path) == 0 { @@ -208,8 +210,17 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("unsupported record type: %s", namespaceProviders) } + resp := &pb.Message{ + Type: pb.Message_GET_PROVIDERS, + Key: k, + CloserPeers: d.closerPeers(ctx, remote, kadt.NewKey(k)), + } + fetched, err := backend.Fetch(ctx, string(req.GetKey())) if err != nil { + if errors.Is(err, ds.ErrNotFound) { + return resp, nil + } return nil, fmt.Errorf("fetch providers from datastore: %w", err) } @@ -223,12 +234,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me pbProviders[i] = pb.FromAddrInfo(p) } - resp := &pb.Message{ - Type: pb.Message_GET_PROVIDERS, - Key: k, - CloserPeers: d.closerPeers(ctx, remote, kadt.NewKey(k)), - ProviderPeers: pbProviders, - } + resp.ProviderPeers = pbProviders return resp, nil } diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 6910c347..f838aaf8 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -436,6 +436,24 @@ func BenchmarkDHT_handlePing(b *testing.B) { func newPutIPNSRequest(t testing.TB, clk clock.Clock, priv crypto.PrivKey, seq uint64, ttl time.Duration) *pb.Message { t.Helper() + keyStr, value := makeIPNSKeyValue(t, clk, priv, seq, ttl) + + req := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: []byte(keyStr), + Record: &recpb.Record{ + Key: []byte(keyStr), + Value: value, + TimeReceived: clk.Now().Format(time.RFC3339Nano), + }, + } + + return req +} + +func makeIPNSKeyValue(t testing.TB, clk clock.Clock, priv crypto.PrivKey, seq uint64, ttl time.Duration) (string, []byte) { + t.Helper() + testPath := path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") rec, err := ipns.NewRecord(priv, testPath, seq, clk.Now().Add(ttl), ttl) @@ -447,18 +465,7 @@ func newPutIPNSRequest(t testing.TB, clk clock.Clock, priv crypto.PrivKey, seq u data, err := ipns.MarshalRecord(rec) require.NoError(t, err) - key := ipns.NameFromPeer(remote).RoutingKey() - req := &pb.Message{ - Type: pb.Message_PUT_VALUE, - Key: key, - Record: &recpb.Record{ - Key: key, - Value: data, - TimeReceived: time.Now().Format(time.RFC3339Nano), - }, - } - - return req + return string(ipns.NameFromPeer(remote).RoutingKey()), data } func BenchmarkDHT_handlePutValue_unique_peers(b *testing.B) { diff --git a/v2/internal/coord/brdcst/brdcst.go b/v2/internal/coord/brdcst/brdcst.go index 5d16b973..8711a1b3 100644 --- a/v2/internal/coord/brdcst/brdcst.go +++ b/v2/internal/coord/brdcst/brdcst.go @@ -75,7 +75,7 @@ func (*StateBroadcastIdle) broadcastState() {} // implement this interface. An "Event" is the opposite of a "State." An "Event" // flows into the state machine and a "State" flows out of it. // -// Currently, there are the [FollowUp] and [Optimistic] state machines. +// Currently, there are the [FollowUp] and [Static] state machines. type BroadcastEvent interface { broadcastEvent() } diff --git a/v2/internal/coord/brdcst/config.go b/v2/internal/coord/brdcst/config.go index 4d6d425b..70ca0c85 100644 --- a/v2/internal/coord/brdcst/config.go +++ b/v2/internal/coord/brdcst/config.go @@ -31,16 +31,17 @@ func DefaultConfigPool() *ConfigPool { // Config is an interface that all broadcast configurations must implement. // Because we have multiple ways of broadcasting records to the network, like -// [FollowUp] or [Optimistic], the [EventPoolStartBroadcast] has a configuration +// [FollowUp] or [Static], the [EventPoolStartBroadcast] has a configuration // field that depending on the concrete type of [Config] initializes the -// respective state machine. Then the broadcast operation will performed based -// on the encoded rules in that state machine. +// respective state machine. Then the broadcast operation will be performed +// based on the encoded rules in that state machine. type Config interface { broadcastConfig() } func (c *ConfigFollowUp) broadcastConfig() {} func (c *ConfigOptimistic) broadcastConfig() {} +func (c *ConfigStatic) broadcastConfig() {} // ConfigFollowUp specifies the configuration for the [FollowUp] state machine. type ConfigFollowUp struct{} @@ -72,3 +73,19 @@ func (c *ConfigOptimistic) Validate() error { func DefaultConfigOptimistic() *ConfigOptimistic { return &ConfigOptimistic{} } + +// ConfigStatic specifies the configuration for the [Static] state +// machine. +type ConfigStatic struct{} + +// Validate checks the configuration options and returns an error if any have +// invalid values. +func (c *ConfigStatic) Validate() error { + return nil +} + +// DefaultConfigStatic returns the default configuration options for the +// [Static] state machine. +func DefaultConfigStatic() *ConfigStatic { + return &ConfigStatic{} +} diff --git a/v2/internal/coord/brdcst/config_test.go b/v2/internal/coord/brdcst/config_test.go index 68447a1f..43779523 100644 --- a/v2/internal/coord/brdcst/config_test.go +++ b/v2/internal/coord/brdcst/config_test.go @@ -37,6 +37,7 @@ func TestConfig_interface_conformance(t *testing.T) { configs := []Config{ &ConfigFollowUp{}, &ConfigOptimistic{}, + &ConfigStatic{}, } for _, c := range configs { c.broadcastConfig() // drives test coverage diff --git a/v2/internal/coord/brdcst/pool.go b/v2/internal/coord/brdcst/pool.go index bba83dad..71d4e936 100644 --- a/v2/internal/coord/brdcst/pool.go +++ b/v2/internal/coord/brdcst/pool.go @@ -14,7 +14,7 @@ import ( // Broadcast is a type alias for a specific kind of state machine that any // kind of broadcast strategy state machine must implement. Currently, there -// are the [FollowUp] and [Optimistic] state machines. +// are the [FollowUp] and [Static] state machines. type Broadcast = coordt.StateMachine[BroadcastEvent, BroadcastState] // Pool is a [coordt.StateMachine] that manages all running broadcast @@ -26,7 +26,7 @@ type Broadcast = coordt.StateMachine[BroadcastEvent, BroadcastState] // // Conceptually, a broadcast consists of finding the closest nodes to a certain // key and then storing the record with them. There are a few different -// strategies that can be applied. For now, these are the [FollowUp] and the [Optimistic] +// strategies that can be applied. For now, these are the [FollowUp] and the [Static] // strategies. In the future, we also want to support [Reprovide Sweep]. // However, this requires a different type of query as we are not looking for // the closest nodes but rather enumerating the keyspace. In any case, this @@ -104,7 +104,7 @@ func (p *Pool[K, N, M]) Advance(ctx context.Context, ev PoolEvent) (out PoolStat } // handleEvent receives a broadcast [PoolEvent] and returns the corresponding -// broadcast state machine [FollowUp] or [Optimistic] plus the event for that +// broadcast state machine [FollowUp] or [Static] plus the event for that // state machine. If any return parameter is nil, either the pool event was for // an unknown query or the event doesn't need to be forwarded to the state // machine. @@ -120,7 +120,9 @@ func (p *Pool[K, N, M]) handleEvent(ctx context.Context, ev PoolEvent) (sm Broad // first initialize the state machine for the broadcast desired strategy switch cfg := ev.Config.(type) { case *ConfigFollowUp: - p.bcs[ev.QueryID] = NewFollowUp(ev.QueryID, p.qp, ev.Message, cfg) + p.bcs[ev.QueryID] = NewFollowUp[K, N, M](ev.QueryID, p.qp, ev.Message, cfg) + case *ConfigStatic: + p.bcs[ev.QueryID] = NewStatic[K, N, M](ev.QueryID, ev.Message, cfg) case *ConfigOptimistic: panic("implement me") } @@ -171,7 +173,7 @@ func (p *Pool[K, N, M]) handleEvent(ctx context.Context, ev PoolEvent) (sm Broad } // advanceBroadcast advances the given broadcast state machine ([FollowUp] or -// [Optimistic]) and returns the new [Pool] state ([PoolState]). The additional +// [Static]) and returns the new [Pool] state ([PoolState]). The additional // boolean value indicates whether the returned [PoolState] should be ignored. func (p *Pool[K, N, M]) advanceBroadcast(ctx context.Context, sm Broadcast, bev BroadcastEvent) (PoolState, bool) { ctx, span := tele.StartSpan(ctx, "Pool.advanceBroadcast", trace.WithAttributes(tele.AttrInEvent(bev))) @@ -284,7 +286,7 @@ type EventPoolStartBroadcast[K kad.Key[K], N kad.NodeID[K], M coordt.Message] st Target K // the key we want to store the record for Message M // the message that we want to send to the closest peers (this encapsulates the payload we want to store) Seed []N // the closest nodes we know so far and from where we start the operation - Config Config // the configuration for this operation. Most importantly, this defines the broadcast strategy ([FollowUp] or [Optimistic]) + Config Config // the configuration for this operation. Most importantly, this defines the broadcast strategy ([FollowUp] or [Static]) } // EventPoolStopBroadcast notifies broadcast [Pool] to stop a broadcast diff --git a/v2/internal/coord/brdcst/pool_test.go b/v2/internal/coord/brdcst/pool_test.go index e4e7409c..aaad7dc9 100644 --- a/v2/internal/coord/brdcst/pool_test.go +++ b/v2/internal/coord/brdcst/pool_test.go @@ -272,7 +272,7 @@ func TestPool_FollowUp_stop_during_followup_phase(t *testing.T) { require.Len(t, st.Errors, 2) } -func TestPool_FollowUp_empty_seed(t *testing.T) { +func TestPool_empty_seed(t *testing.T) { ctx := context.Background() cfg := DefaultConfigPool() @@ -286,17 +286,121 @@ func TestPool_FollowUp_empty_seed(t *testing.T) { queryID := coordt.QueryID("test") - state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + startEvt := &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ QueryID: queryID, Target: target, Message: msg, Seed: []tiny.Node{}, - Config: DefaultConfigFollowUp(), + } + + t.Run("follow up", func(t *testing.T) { + startEvt.Config = DefaultConfigFollowUp() + + state := p.Advance(ctx, startEvt) + require.IsType(t, &StatePoolBroadcastFinished[tiny.Key, tiny.Node]{}, state) + + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) + }) + + t.Run("static", func(t *testing.T) { + startEvt.Config = DefaultConfigStatic() + state := p.Advance(ctx, startEvt) + require.IsType(t, &StatePoolBroadcastFinished[tiny.Key, tiny.Node]{}, state) + + state = p.Advance(ctx, &EventPoolPoll{}) + require.IsType(t, &StatePoolIdle{}, state) + }) +} + +func TestPool_Static_happy_path(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + msg := tiny.Message{Content: "store this"} + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00000011) // 3 + c := tiny.NewNode(0b00000010) // 2 + + queryID := coordt.QueryID("test") + + state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{a, b, c}, + Config: DefaultConfigStatic(), + }) + spsr, ok := state.(*StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]) + require.True(t, ok, "state is %T", state) + first := spsr.NodeID + + state = p.Advance(ctx, &EventPoolPoll{}) + spsr, ok = state.(*StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]) + require.True(t, ok, "state is %T", state) + second := spsr.NodeID + + state = p.Advance(ctx, &EventPoolStoreRecordSuccess[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + NodeID: first, + Request: msg, + }) + spsr, ok = state.(*StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]) + require.True(t, ok, "state is %T", state) + third := spsr.NodeID + + state = p.Advance(ctx, &EventPoolStoreRecordFailure[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + NodeID: second, + Request: msg, + }) + require.IsType(t, &StatePoolWaiting{}, state) + + state = p.Advance(ctx, &EventPoolStoreRecordSuccess[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + NodeID: third, + Request: msg, }) require.IsType(t, &StatePoolBroadcastFinished[tiny.Key, tiny.Node]{}, state) +} + +func TestPool_Static_stop_mid_flight(t *testing.T) { + ctx := context.Background() + cfg := DefaultConfigPool() + + self := tiny.NewNode(0) + + p, err := NewPool[tiny.Key, tiny.Node, tiny.Message](self, cfg) + require.NoError(t, err) + + msg := tiny.Message{Content: "store this"} + target := tiny.Key(0b00000001) + a := tiny.NewNode(0b00000100) // 4 + b := tiny.NewNode(0b00000011) // 3 + c := tiny.NewNode(0b00000010) // 2 + + queryID := coordt.QueryID("test") + + state := p.Advance(ctx, &EventPoolStartBroadcast[tiny.Key, tiny.Node, tiny.Message]{ + QueryID: queryID, + Target: target, + Message: msg, + Seed: []tiny.Node{a, b, c}, + Config: DefaultConfigStatic(), + }) + require.IsType(t, &StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]{}, state) state = p.Advance(ctx, &EventPoolPoll{}) - require.IsType(t, &StatePoolIdle{}, state) + require.IsType(t, &StatePoolStoreRecord[tiny.Key, tiny.Node, tiny.Message]{}, state) + + state = p.Advance(ctx, &EventPoolStopBroadcast{QueryID: queryID}) + require.IsType(t, &StatePoolBroadcastFinished[tiny.Key, tiny.Node]{}, state) } func TestPoolState_interface_conformance(t *testing.T) { diff --git a/v2/internal/coord/brdcst/static.go b/v2/internal/coord/brdcst/static.go new file mode 100644 index 00000000..0a36721b --- /dev/null +++ b/v2/internal/coord/brdcst/static.go @@ -0,0 +1,143 @@ +package brdcst + +import ( + "context" + "fmt" + + "github.com/plprobelab/go-kademlia/kad" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" +) + +// Static is a [Broadcast] state machine and encapsulates the logic around +// doing a put operation to a static set of nodes. That static set of nodes +// is given by the list of seed nodes in the [EventBroadcastStart] event. +type Static[K kad.Key[K], N kad.NodeID[K], M coordt.Message] struct { + // the unique ID for this broadcast operation + queryID coordt.QueryID + + // a struct holding configuration options + cfg *ConfigStatic + + // the message that we will send to the closest nodes in the follow-up phase + msg M + + // nodes we still need to store records with. This map will be filled with + // all the closest nodes after the query has finished. + todo map[string]N + + // nodes we have contacted to store the record but haven't heard a response yet + waiting map[string]N + + // nodes that successfully hold the record for us + success map[string]N + + // nodes that failed to hold the record for us + failed map[string]struct { + Node N + Err error + } +} + +// NewStatic initializes a new [Static] struct. +func NewStatic[K kad.Key[K], N kad.NodeID[K], M coordt.Message](qid coordt.QueryID, msg M, cfg *ConfigStatic) *Static[K, N, M] { + return &Static[K, N, M]{ + queryID: qid, + cfg: cfg, + msg: msg, + todo: map[string]N{}, + waiting: map[string]N{}, + success: map[string]N{}, + failed: map[string]struct { + Node N + Err error + }{}, + } +} + +// Advance advances the state of the [Static] [Broadcast] state machine. +func (f *Static[K, N, M]) Advance(ctx context.Context, ev BroadcastEvent) (out BroadcastState) { + _, span := tele.StartSpan(ctx, "Static.Advance", trace.WithAttributes(tele.AttrInEvent(ev))) + defer func() { + span.SetAttributes( + tele.AttrOutEvent(out), + attribute.Int("todo", len(f.todo)), + attribute.Int("waiting", len(f.waiting)), + attribute.Int("success", len(f.success)), + attribute.Int("failed", len(f.failed)), + ) + span.End() + }() + + switch ev := ev.(type) { + case *EventBroadcastStart[K, N]: + span.SetAttributes(attribute.Int("seed", len(ev.Seed))) + for _, seed := range ev.Seed { + f.todo[seed.String()] = seed + } + case *EventBroadcastStop: + for _, n := range f.todo { + delete(f.todo, n.String()) + f.failed[n.String()] = struct { + Node N + Err error + }{Node: n, Err: fmt.Errorf("cancelled")} + } + + for _, n := range f.waiting { + delete(f.waiting, n.String()) + f.failed[n.String()] = struct { + Node N + Err error + }{Node: n, Err: fmt.Errorf("cancelled")} + } + case *EventBroadcastStoreRecordSuccess[K, N, M]: + delete(f.waiting, ev.NodeID.String()) + f.success[ev.NodeID.String()] = ev.NodeID + case *EventBroadcastStoreRecordFailure[K, N, M]: + delete(f.waiting, ev.NodeID.String()) + f.failed[ev.NodeID.String()] = struct { + Node N + Err error + }{Node: ev.NodeID, Err: ev.Error} + case *EventBroadcastPoll: + // ignore, nothing to do + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } + + for k, n := range f.todo { + delete(f.todo, k) + f.waiting[k] = n + return &StateBroadcastStoreRecord[K, N, M]{ + QueryID: f.queryID, + NodeID: n, + Message: f.msg, + } + } + + if len(f.waiting) > 0 { + return &StateBroadcastWaiting{} + } + + if len(f.todo) == 0 { + contacted := make([]N, 0, len(f.success)+len(f.failed)) + for _, n := range f.success { + contacted = append(contacted, n) + } + for _, n := range f.failed { + contacted = append(contacted, n.Node) + } + + return &StateBroadcastFinished[K, N]{ + QueryID: f.queryID, + Contacted: contacted, + Errors: f.failed, + } + } + + return &StateBroadcastIdle{} +} diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index d4dda7fa..f932f164 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -379,7 +379,7 @@ func (c *Coordinator) QueryClosest(ctx context.Context, target kadt.Key, fn coor // numResults specifies the minimum number of nodes to successfully contact before considering iteration complete. // The query is considered to be exhausted when it has received responses from at least this number of nodes // and there are no closer nodes remaining to be contacted. A default of 20 is used if this value is less than 1. -func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coordt.QueryFunc, numResults int) (coordt.QueryStats, error) { +func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coordt.QueryFunc, numResults int) ([]kadt.PeerID, coordt.QueryStats, error) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.QueryMessage") defer span.End() if msg == nil { @@ -396,7 +396,7 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coor seeds, err := c.GetClosestNodes(ctx, msg.Target(), numResults) if err != nil { - return coordt.QueryStats{}, err + return nil, coordt.QueryStats{}, err } seedIDs := make([]kadt.PeerID, 0, len(seeds)) @@ -419,8 +419,8 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coor // queue the start of the query c.queryBehaviour.Notify(ctx, cmd) - _, stats, err := c.waitForQuery(ctx, queryID, waiter, fn) - return stats, err + closest, stats, err := c.waitForQuery(ctx, queryID, waiter, fn) + return closest, stats, err } func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) error { @@ -434,15 +434,30 @@ func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) erro ctx, cancel := context.WithCancel(ctx) defer cancel() - seeds, err := c.GetClosestNodes(ctx, msg.Target(), 20) // TODO: parameterize + seedNodes, err := c.GetClosestNodes(ctx, msg.Target(), 20) // TODO: parameterize if err != nil { return err } - seedIDs := make([]kadt.PeerID, 0, len(seeds)) - for _, s := range seeds { - seedIDs = append(seedIDs, s.ID()) + seeds := make([]kadt.PeerID, 0, len(seedNodes)) + for _, s := range seedNodes { + seeds = append(seeds, s.ID()) } + return c.broadcast(ctx, msg, seeds, brdcst.DefaultConfigFollowUp()) +} + +func (c *Coordinator) BroadcastStatic(ctx context.Context, msg *pb.Message, seeds []kadt.PeerID) error { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.BroadcastStatic") + defer span.End() + return c.broadcast(ctx, msg, seeds, brdcst.DefaultConfigStatic()) +} + +func (c *Coordinator) broadcast(ctx context.Context, msg *pb.Message, seeds []kadt.PeerID, cfg brdcst.Config) error { + ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.broadcast") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() waiter := NewWaiter[BehaviourEvent]() queryID := c.newOperationID() @@ -451,9 +466,9 @@ func (c *Coordinator) BroadcastRecord(ctx context.Context, msg *pb.Message) erro QueryID: queryID, Target: msg.Target(), Message: msg, - Seed: seedIDs, + Seed: seeds, Notify: waiter, - Config: brdcst.DefaultConfigFollowUp(), + Config: cfg, } // queue the start of the query @@ -479,7 +494,10 @@ func (c *Coordinator) waitForQuery(ctx context.Context, queryID coordt.QueryID, select { case <-ctx.Done(): return nil, lastStats, ctx.Err() - case wev := <-waiter.Chan(): + case wev, more := <-waiter.Chan(): + if !more { + return nil, lastStats, ctx.Err() + } ctx, ev := wev.Ctx, wev.Event switch ev := ev.(type) { case *EventQueryProgressed: @@ -532,7 +550,11 @@ func (c *Coordinator) waitForBroadcast(ctx context.Context, waiter *Waiter[Behav select { case <-ctx.Done(): return nil, nil, ctx.Err() - case wev := <-waiter.Chan(): + case wev, more := <-waiter.Chan(): + if !more { + return nil, nil, ctx.Err() + } + switch ev := wev.Event.(type) { case *EventQueryProgressed: case *EventBroadcastFinished: @@ -580,7 +602,7 @@ func (c *Coordinator) Bootstrap(ctx context.Context, seeds []kadt.PeerID) error // NotifyConnectivity notifies the coordinator that a peer has passed a connectivity check // which means it is connected and supports finding closer nodes -func (c *Coordinator) NotifyConnectivity(ctx context.Context, id kadt.PeerID) error { +func (c *Coordinator) NotifyConnectivity(ctx context.Context, id kadt.PeerID) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyConnectivity") defer span.End() @@ -588,13 +610,11 @@ func (c *Coordinator) NotifyConnectivity(ctx context.Context, id kadt.PeerID) er c.routingBehaviour.Notify(ctx, &EventNotifyConnectivity{ NodeID: id, }) - - return nil } // NotifyNonConnectivity notifies the coordinator that a peer has failed a connectivity check // which means it is not connected and/or it doesn't support finding closer nodes -func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) error { +func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) { ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.NotifyNonConnectivity") defer span.End() @@ -602,8 +622,6 @@ func (c *Coordinator) NotifyNonConnectivity(ctx context.Context, id kadt.PeerID) c.routingBehaviour.Notify(ctx, &EventNotifyNonConnectivity{ NodeID: id, }) - - return nil } func (c *Coordinator) newOperationID() coordt.QueryID { diff --git a/v2/router.go b/v2/router.go index bc586a39..999be273 100644 --- a/v2/router.go +++ b/v2/router.go @@ -2,6 +2,7 @@ package dht import ( "context" + "encoding/base64" "fmt" "time" @@ -11,25 +12,46 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-msgio" "github.com/libp2p/go-msgio/pbio" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "google.golang.org/protobuf/proto" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) type router struct { + // the libp2p host to use for sending messages host host.Host - // ProtocolID represents the DHT [protocol] we can query with and respond to. + + // protocolID represents the DHT [protocol] we can query with and respond to. // // [protocol]: https://docs.libp2p.io/concepts/fundamentals/protocols/ - ProtocolID protocol.ID + protocolID protocol.ID + + // an open telemetry tacer instance + tracer trace.Tracer } var _ coordt.Router[kadt.Key, kadt.PeerID, *pb.Message] = (*router)(nil) -func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { - // TODO: what to do with addresses in peer.AddrInfo? +func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Message) (resp *pb.Message, err error) { + spanOpts := []trace.SpanStartOption{ + trace.WithAttributes(tele.AttrMessageType(req.GetType().String())), + trace.WithAttributes(tele.AttrPeerID(to.String())), + trace.WithAttributes(tele.AttrKey(base64.RawStdEncoding.EncodeToString(req.GetKey()))), + } + ctx, span := r.tracer.Start(ctx, "router.SendMessage", spanOpts...) + defer func() { + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + span.End() + }() + if len(r.host.Peerstore().Addrs(peer.ID(to))) == 0 { return nil, fmt.Errorf("no address for peer %s", to) } @@ -38,10 +60,8 @@ func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Messag ctx, cancel = context.WithCancel(ctx) defer cancel() - var err error - var s network.Stream - s, err = r.host.NewStream(ctx, peer.ID(to), r.ProtocolID) + s, err = r.host.NewStream(ctx, peer.ID(to), r.protocolID) if err != nil { return nil, fmt.Errorf("stream creation: %w", err) } @@ -59,10 +79,14 @@ func (r *router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Messag return nil, nil } + span.End() + ctx, span = r.tracer.Start(ctx, "router.ReadMessage", spanOpts...) + data, err := reader.ReadMsg() if err != nil { return nil, fmt.Errorf("read message: %w", err) } + protoResp := pb.Message{} if err = proto.Unmarshal(data, &protoResp); err != nil { return nil, err diff --git a/v2/routing.go b/v2/routing.go index ed66a559..a0745945 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -1,6 +1,7 @@ package dht import ( + "bytes" "context" "errors" "fmt" @@ -39,11 +40,9 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { return addrInfo, nil } default: - // we're + // we're not connected or were recently connected } - target := kadt.PeerID(id) - var foundPeer peer.ID fn := func(ctx context.Context, visited kadt.PeerID, msg *pb.Message, stats coordt.QueryStats) error { if peer.ID(visited) == id { @@ -53,7 +52,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { return nil } - _, _, err := d.kad.QueryClosest(ctx, target.Key(), fn, 20) + _, _, err := d.kad.QueryClosest(ctx, kadt.PeerID(id).Key(), fn, 20) if err != nil { return peer.AddrInfo{}, fmt.Errorf("failed to run query: %w", err) } @@ -116,7 +115,7 @@ func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-ch return peerOut } -func (d *DHT) findProvidersAsyncRoutine(ctx context.Context, c cid.Cid, count int, out chan peer.AddrInfo) { +func (d *DHT) findProvidersAsyncRoutine(ctx context.Context, c cid.Cid, count int, out chan<- peer.AddrInfo) { _, span := d.tele.Tracer.Start(ctx, "DHT.findProvidersAsyncRoutine", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) defer span.End() @@ -130,12 +129,20 @@ func (d *DHT) findProvidersAsyncRoutine(ctx context.Context, c cid.Cid, count in return } + // send all providers onto the out channel until the desired count + // was reached. If no count was specified, continue with network lookup. + providers := map[peer.ID]struct{}{} + // first fetch the record locally stored, err := b.Fetch(ctx, string(c.Hash())) if err != nil { - span.RecordError(err) - d.log.Warn("Fetching value from provider store", slog.String("cid", c.String()), slog.String("err", err.Error())) - return + if !errors.Is(err, ds.ErrNotFound) { + span.RecordError(err) + d.log.Warn("Fetching value from provider store", slog.String("cid", c.String()), slog.String("err", err.Error())) + return + } + + stored = &providerSet{} } ps, ok := stored.(*providerSet) @@ -145,9 +152,6 @@ func (d *DHT) findProvidersAsyncRoutine(ctx context.Context, c cid.Cid, count in return } - // send all providers onto the out channel until the desired count - // was reached. If no count was specified, continue with network lookup. - providers := map[peer.ID]struct{}{} for _, provider := range ps.providers { providers[provider.ID] = struct{}{} @@ -199,7 +203,7 @@ func (d *DHT) findProvidersAsyncRoutine(ctx context.Context, c cid.Cid, count in return nil } - _, err = d.kad.QueryMessage(ctx, msg, fn, 20) // TODO: parameterize + _, _, err = d.kad.QueryMessage(ctx, msg, fn, d.cfg.BucketSize) if err != nil { span.RecordError(err) d.log.Warn("Failed querying", slog.String("cid", c.String()), slog.String("err", err.Error())) @@ -249,7 +253,8 @@ func (d *DHT) PutValue(ctx context.Context, keyStr string, value []byte, opts .. return nil } -// putValueLocal stores a value in the local datastore without querying the network. +// putValueLocal stores a value in the local datastore without reaching out to +// the network. func (d *DHT) putValueLocal(ctx context.Context, key string, value []byte) error { ctx, span := d.tele.Tracer.Start(ctx, "DHT.PutValueLocal") defer span.End() @@ -265,7 +270,7 @@ func (d *DHT) putValueLocal(ctx context.Context, key string, value []byte) error } rec := record.MakePutRecord(key, value) - rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + rec.TimeReceived = d.cfg.Clock.Now().UTC().Format(time.RFC3339Nano) _, err = b.Store(ctx, path, rec) if err != nil { @@ -275,57 +280,44 @@ func (d *DHT) putValueLocal(ctx context.Context, key string, value []byte) error return nil } -func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option) ([]byte, error) { +func (d *DHT) GetValue(ctx context.Context, key string, opts ...routing.Option) ([]byte, error) { ctx, span := d.tele.Tracer.Start(ctx, "DHT.GetValue") defer span.End() - v, err := d.getValueLocal(ctx, key) - if err == nil { - return v, nil - } - if !errors.Is(err, ds.ErrNotFound) { - return nil, fmt.Errorf("put value locally: %w", err) + valueChan, err := d.SearchValue(ctx, key, opts...) + if err != nil { + return nil, err } - req := &pb.Message{ - Type: pb.Message_GET_VALUE, - Key: []byte(key), + var best []byte + for val := range valueChan { + best = val } - // TODO: quorum - var value []byte - fn := func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats coordt.QueryStats) error { - if resp == nil { - return nil - } - - if resp.GetType() != pb.Message_GET_VALUE { - return nil - } - - if string(resp.GetKey()) != key { - return nil - } - - value = resp.GetRecord().GetValue() - - return coordt.ErrSkipRemaining + if ctx.Err() != nil { + return best, ctx.Err() } - _, err = d.kad.QueryMessage(ctx, req, fn, d.cfg.BucketSize) - if err != nil { - return nil, fmt.Errorf("failed to run query: %w", err) + if best == nil { + return nil, routing.ErrNotFound } - return value, nil + return best, nil } -// getValueLocal retrieves a value from the local datastore without querying the network. -func (d *DHT) getValueLocal(ctx context.Context, key string) ([]byte, error) { - ctx, span := d.tele.Tracer.Start(ctx, "DHT.GetValueLocal") +// SearchValue will search in the DHT for keyStr. keyStr must have the form +// `/$namespace/$binary_id` +func (d *DHT) SearchValue(ctx context.Context, keyStr string, options ...routing.Option) (<-chan []byte, error) { + _, span := d.tele.Tracer.Start(ctx, "DHT.SearchValue") defer span.End() - ns, path, err := record.SplitKey(key) + // first parse the routing options + rOpt := &routing.Options{} // routing config + if err := rOpt.Apply(options...); err != nil { + return nil, fmt.Errorf("apply routing options: %w", err) + } + + ns, path, err := record.SplitKey(keyStr) if err != nil { return nil, fmt.Errorf("splitting key: %w", err) } @@ -337,7 +329,17 @@ func (d *DHT) getValueLocal(ctx context.Context, key string) ([]byte, error) { val, err := b.Fetch(ctx, path) if err != nil { - return nil, fmt.Errorf("fetch from backend: %w", err) + if !errors.Is(err, ds.ErrNotFound) { + return nil, fmt.Errorf("fetch from backend: %w", err) + } + + if rOpt.Offline { + return nil, routing.ErrNotFound + } + + out := make(chan []byte) + go d.searchValueRoutine(ctx, b, ns, path, rOpt, out) + return out, nil } rec, ok := val.(*recpb.Record) @@ -345,14 +347,153 @@ func (d *DHT) getValueLocal(ctx context.Context, key string) ([]byte, error) { return nil, fmt.Errorf("expected *recpb.Record from backend, got: %T", val) } - return rec.GetValue(), nil + if rOpt.Offline { + out := make(chan []byte, 1) + defer close(out) + out <- rec.GetValue() + return out, nil + } + + out := make(chan []byte) + go func() { + out <- rec.GetValue() + d.searchValueRoutine(ctx, b, ns, path, rOpt, out) + }() + + return out, nil } -func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { - _, span := d.tele.Tracer.Start(ctx, "DHT.SearchValue") +func (d *DHT) searchValueRoutine(ctx context.Context, backend Backend, ns string, path string, ropt *routing.Options, out chan<- []byte) { + _, span := d.tele.Tracer.Start(ctx, "DHT.searchValueRoutine") defer span.End() + defer close(out) + + routingKey := []byte(newRoutingKey(ns, path)) + + req := &pb.Message{ + Type: pb.Message_GET_VALUE, + Key: routingKey, + } + + // The currently known best value for /$ns/$path + var best []byte + + // Peers that we identified to hold stale records + var fixupPeers []kadt.PeerID + + // The peers that returned the best value + quorumPeers := map[kadt.PeerID]struct{}{} + + // The quorum that we require for terminating the query. This number tells + // us how many peers must have responded with the "best" value before we + // cancel the query. + quorum := d.getQuorum(ropt) + + fn := func(ctx context.Context, id kadt.PeerID, resp *pb.Message, stats coordt.QueryStats) error { + rec := resp.GetRecord() + if rec == nil { + return nil + } + + if !bytes.Equal(routingKey, rec.GetKey()) { + return nil + } + + idx, _ := backend.Validate(ctx, path, best, rec.GetValue()) + switch idx { + case 0: // "best" is still the best value + if bytes.Equal(best, rec.GetValue()) { + quorumPeers[id] = struct{}{} + } + + case 1: // rec.GetValue() is better than our current "best" + + // We have identified a better record. All peers that were currently + // in our set of quorum peers need to be updated wit this new record + for p := range quorumPeers { + fixupPeers = append(fixupPeers, p) + } + + // re-initialize the quorum peers set for this new record + quorumPeers = map[kadt.PeerID]struct{}{} + quorumPeers[id] = struct{}{} + + // submit the new value to the user + best = rec.GetValue() + out <- best + case -1: // "best" and rec.GetValue() are both invalid + return nil + + default: + d.log.Warn("unexpected validate index", slog.Int("idx", idx)) + } + + // Check if we have reached the quorum + if len(quorumPeers) == quorum { + return coordt.ErrSkipRemaining + } + + return nil + } + + _, _, err := d.kad.QueryMessage(ctx, req, fn, d.cfg.BucketSize) + if err != nil { + d.logErr(err, "Search value query failed") + return + } + + // check if we have peers that we found to hold stale records. If so, + // update them asynchronously. + if len(fixupPeers) == 0 { + return + } + + go func() { + msg := &pb.Message{ + Type: pb.Message_PUT_VALUE, + Key: routingKey, + Record: record.MakePutRecord(string(routingKey), best), + } + + if err := d.kad.BroadcastStatic(ctx, msg, fixupPeers); err != nil { + d.log.Warn("Failed updating peer") + } + }() +} + +// quorumOptionKey is a struct that is used as a routing options key to pass +// the desired quorum value into, e.g., SearchValue or GetValue. +type quorumOptionKey struct{} + +// RoutingQuorum accepts the desired quorum that is required to terminate the +// search query. The quorum value must not be negative but can be 0 in which +// case we continue the query until we have exhausted the keyspace. If no +// quorum is specified, the [Config.DefaultQuorum] value will be used. +func RoutingQuorum(n int) routing.Option { + return func(opts *routing.Options) error { + if n < 0 { + return fmt.Errorf("quorum must not be negative") + } + + if opts.Other == nil { + opts.Other = make(map[interface{}]interface{}, 1) + } + + opts.Other[quorumOptionKey{}] = n + + return nil + } +} + +// getQuorum extracts the quorum value from the given routing options and +// returns [Config.DefaultQuorum] if no quorum value is present. +func (d *DHT) getQuorum(opts *routing.Options) int { + quorum, ok := opts.Other[quorumOptionKey{}].(int) + if !ok { + quorum = d.cfg.Query.DefaultQuorum + } - panic("implement me") + return quorum } func (d *DHT) Bootstrap(ctx context.Context) error { diff --git a/v2/routing_test.go b/v2/routing_test.go index 19209fd4..50d77895 100644 --- a/v2/routing_test.go +++ b/v2/routing_test.go @@ -6,7 +6,9 @@ import ( "crypto/sha256" "fmt" "testing" + "time" + "github.com/benbjohnson/clock" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore/failstore" "github.com/libp2p/go-libp2p/core/crypto" @@ -15,8 +17,10 @@ import ( mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // newRandomContent reads 1024 bytes from crypto/rand and builds a content struct. @@ -34,7 +38,7 @@ func newRandomContent(t testing.TB) cid.Cid { return cid.NewCidV0(mhash) } -func makePkKeyValue(t *testing.T) (string, []byte) { +func makePkKeyValue(t testing.TB) (string, []byte) { t.Helper() _, pub, _ := crypto.GenerateEd25519Key(rng) @@ -47,6 +51,87 @@ func makePkKeyValue(t *testing.T) (string, []byte) { return routing.KeyForPublicKey(id), v } +func TestDHT_FindPeer_happy_path(t *testing.T) { + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + d3 := top.AddServer(nil) + d4 := top.AddServer(nil) + top.ConnectChain(ctx, d1, d2, d3, d4) + + addrInfo, err := d1.FindPeer(ctx, d4.host.ID()) + require.NoError(t, err) + assert.Equal(t, d4.host.ID(), addrInfo.ID) +} + +func TestDHT_FindPeer_not_found(t *testing.T) { + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + d3 := top.AddServer(nil) + d4 := top.AddServer(nil) + top.ConnectChain(ctx, d1, d2, d3) + + _, err := d1.FindPeer(ctx, d4.host.ID()) + assert.Error(t, err) +} + +func TestDHT_FindPeer_already_connected(t *testing.T) { + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + d3 := top.AddServer(nil) + d4 := top.AddServer(nil) + top.ConnectChain(ctx, d1, d2, d3) + + err := d1.host.Connect(ctx, peer.AddrInfo{ + ID: d4.host.ID(), + Addrs: d4.host.Addrs(), + }) + require.NoError(t, err) + + _, err = d1.FindPeer(ctx, d4.host.ID()) + assert.NoError(t, err) +} + +func TestDHT_PutValue_happy_path(t *testing.T) { + // TIMING: this test is based on timeouts - so might become flaky! + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + + top.ConnectChain(ctx, d1, d2) + + k, v := makePkKeyValue(t) + + err := d1.PutValue(ctx, k, v) + require.NoError(t, err) + + deadline, hasDeadline := ctx.Deadline() + if !hasDeadline { + deadline = time.Now().Add(5 * time.Second) + } + + // putting data to a remote peer is an asynchronous operation. Even after + // PutValue returns, and although we have closed the stream on our end, an + // acknowledgement that the other peer has received the data is not + // guaranteed. The data will be flushed at this point, but the remote might + // not have handled it yet. Therefore, we use "EventuallyWithT" here. + assert.EventuallyWithT(t, func(t *assert.CollectT) { + val, err := d2.GetValue(ctx, k, routing.Offline) + assert.NoError(t, err) + assert.Equal(t, v, val) + }, time.Until(deadline), 10*time.Millisecond) +} + func TestDHT_PutValue_local_only(t *testing.T) { ctx := kadtest.CtxShort(t) @@ -78,21 +163,16 @@ func TestDHT_PutValue_invalid_key(t *testing.T) { }) } -func TestGetSetValueLocal(t *testing.T) { +func TestDHT_PutValue_routing_option_returns_error(t *testing.T) { ctx := kadtest.CtxShort(t) + d := newTestDHT(t) - top := NewTopology(t) - d := top.AddServer(nil) - - key, v := makePkKeyValue(t) - - err := d.putValueLocal(ctx, key, v) - require.NoError(t, err) - - val, err := d.getValueLocal(ctx, key) - require.NoError(t, err) + errOption := func(opts *routing.Options) error { + return fmt.Errorf("some error") + } - require.Equal(t, v, val) + err := d.PutValue(ctx, "/ipns/some-key", []byte("some value"), errOption) + assert.ErrorContains(t, err, "routing options") } func TestGetValueOnePeer(t *testing.T) { @@ -190,12 +270,7 @@ func TestDHT_FindProvidersAsync_empty_routing_table(t *testing.T) { c := newRandomContent(t) out := d.FindProvidersAsync(ctx, c, 1) - select { - case _, more := <-out: - require.False(t, more) - case <-ctx.Done(): - t.Fatal("timeout") - } + assertClosed(t, ctx, out) } func TestDHT_FindProvidersAsync_dht_does_not_support_providers(t *testing.T) { @@ -206,12 +281,7 @@ func TestDHT_FindProvidersAsync_dht_does_not_support_providers(t *testing.T) { delete(d.backends, namespaceProviders) out := d.FindProvidersAsync(ctx, newRandomContent(t), 1) - select { - case _, more := <-out: - require.False(t, more) - case <-ctx.Done(): - t.Fatal("timeout") - } + assertClosed(t, ctx, out) } func TestDHT_FindProvidersAsync_providers_stored_locally(t *testing.T) { @@ -225,17 +295,11 @@ func TestDHT_FindProvidersAsync_providers_stored_locally(t *testing.T) { require.NoError(t, err) out := d.FindProvidersAsync(ctx, c, 1) - for { - select { - case p, more := <-out: - if !more { - return - } - assert.Equal(t, provider.ID, p.ID) - case <-ctx.Done(): - t.Fatal("timeout") - } - } + + val := readItem(t, ctx, out) + assert.Equal(t, provider.ID, val.ID) + + assertClosed(t, ctx, out) } func TestDHT_FindProvidersAsync_returns_only_count_from_local_store(t *testing.T) { @@ -292,20 +356,11 @@ func TestDHT_FindProvidersAsync_queries_other_peers(t *testing.T) { require.NoError(t, err) out := d1.FindProvidersAsync(ctx, c, 1) - select { - case p, more := <-out: - require.True(t, more) - assert.Equal(t, provider.ID, p.ID) - case <-ctx.Done(): - t.Fatal("timeout") - } - select { - case _, more := <-out: - assert.False(t, more) - case <-ctx.Done(): - t.Fatal("timeout") - } + val := readItem(t, ctx, out) + assert.Equal(t, provider.ID, val.ID) + + assertClosed(t, ctx, out) } func TestDHT_FindProvidersAsync_respects_cancelled_context_for_local_query(t *testing.T) { @@ -419,12 +474,7 @@ func TestDHT_FindProvidersAsync_datastore_error(t *testing.T) { be.datastore = dstore out := d.FindProvidersAsync(ctx, newRandomContent(t), 0) - select { - case _, more := <-out: - assert.False(t, more) - case <-ctx.Done(): - t.Fatal("timeout") - } + assertClosed(t, ctx, out) } func TestDHT_FindProvidersAsync_invalid_key(t *testing.T) { @@ -432,10 +482,480 @@ func TestDHT_FindProvidersAsync_invalid_key(t *testing.T) { d := newTestDHT(t) out := d.FindProvidersAsync(ctx, cid.Cid{}, 0) + assertClosed(t, ctx, out) +} + +func TestDHT_GetValue_happy_path(t *testing.T) { + ctx := kadtest.CtxShort(t) + + clk := clock.New() + + cfg := DefaultConfig() + cfg.Clock = clk + + // generate new identity for the peer that issues the request + priv, _, err := crypto.GenerateEd25519Key(rng) + require.NoError(t, err) + + _, validValue := makeIPNSKeyValue(t, clk, priv, 1, time.Hour) + _, worseValue := makeIPNSKeyValue(t, clk, priv, 0, time.Hour) + key, betterValue := makeIPNSKeyValue(t, clk, priv, 2, time.Hour) // higher sequence number means better value + + top := NewTopology(t) + d1 := top.AddServer(cfg) + d2 := top.AddServer(cfg) + d3 := top.AddServer(cfg) + d4 := top.AddServer(cfg) + d5 := top.AddServer(cfg) + + top.ConnectChain(ctx, d1, d2, d3, d4, d5) + + err = d3.putValueLocal(ctx, key, validValue) + require.NoError(t, err) + + err = d4.putValueLocal(ctx, key, worseValue) + require.NoError(t, err) + + err = d5.putValueLocal(ctx, key, betterValue) + require.NoError(t, err) + + val, err := d1.GetValue(ctx, key) + assert.NoError(t, err) + assert.Equal(t, betterValue, val) +} + +func TestDHT_GetValue_returns_context_error(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + cancelledCtx, cancel := context.WithCancel(ctx) + cancel() + + _, err := d.GetValue(cancelledCtx, "/"+namespaceIPNS+"/some-key") + assert.ErrorIs(t, err, context.Canceled) +} + +func TestDHT_GetValue_returns_not_found_error(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + valueChan, err := d.GetValue(ctx, "/"+namespaceIPNS+"/some-key") + assert.ErrorIs(t, err, routing.ErrNotFound) + assert.Nil(t, valueChan) +} + +// assertClosed triggers a test failure if the given channel was not closed but +// carried more values or a timeout occurs (given by the context). +func assertClosed[T any](t testing.TB, ctx context.Context, c <-chan T) { + t.Helper() + select { - case _, more := <-out: + case _, more := <-c: assert.False(t, more) case <-ctx.Done(): - t.Fatal("timeout") + t.Fatal("timeout closing channel") } } + +func readItem[T any](t testing.TB, ctx context.Context, c <-chan T) T { + t.Helper() + + select { + case val, more := <-c: + require.True(t, more, "channel closed unexpectedly") + return val + case <-ctx.Done(): + t.Fatal("timeout reading item") + return *new(T) + } +} + +func TestDHT_SearchValue_simple(t *testing.T) { + // Test setup: + // There is just one other server that returns a valid value. + ctx := kadtest.CtxShort(t) + + key, v := makePkKeyValue(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + + top.Connect(ctx, d1, d2) + + err := d2.putValueLocal(ctx, key, v) + require.NoError(t, err) + + valChan, err := d1.SearchValue(ctx, key) + require.NoError(t, err) + + val := readItem(t, ctx, valChan) + assert.Equal(t, v, val) + + assertClosed(t, ctx, valChan) +} + +func TestDHT_SearchValue_returns_best_values(t *testing.T) { + // Test setup: + // d2 returns no value + // d3 returns valid value + // d4 returns worse value (will get rejected because we already have a valid value) + // d5 returns better value + // all peers are connected in a chain from d1 to d5 (d1 initiates the query) + // assert that we receive two values on the channel (valid + better) + ctx := kadtest.CtxShort(t) + clk := clock.New() + + cfg := DefaultConfig() + cfg.Clock = clk + + // generate new identity for the peer that issues the request + priv, _, err := crypto.GenerateEd25519Key(rng) + require.NoError(t, err) + + _, validValue := makeIPNSKeyValue(t, clk, priv, 1, time.Hour) + _, worseValue := makeIPNSKeyValue(t, clk, priv, 0, time.Hour) + key, betterValue := makeIPNSKeyValue(t, clk, priv, 2, time.Hour) // higher sequence number means better value + + top := NewTopology(t) + d1 := top.AddServer(cfg) + d2 := top.AddServer(cfg) + d3 := top.AddServer(cfg) + d4 := top.AddServer(cfg) + d5 := top.AddServer(cfg) + + top.ConnectChain(ctx, d1, d2, d3, d4, d5) + + err = d3.putValueLocal(ctx, key, validValue) + require.NoError(t, err) + + err = d4.putValueLocal(ctx, key, worseValue) + require.NoError(t, err) + + err = d5.putValueLocal(ctx, key, betterValue) + require.NoError(t, err) + + valChan, err := d1.SearchValue(ctx, key) + require.NoError(t, err) + + val := readItem(t, ctx, valChan) + assert.Equal(t, validValue, val) + + val = readItem(t, ctx, valChan) + assert.Equal(t, betterValue, val) + + assertClosed(t, ctx, valChan) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestDHT_SearchValue_quorum_test_suite(t *testing.T) { + suite.Run(t, new(SearchValueQuorumTestSuite)) +} + +type SearchValueQuorumTestSuite struct { + suite.Suite + + d *DHT + servers []*DHT + + key string + validValue []byte + betterValue []byte +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *SearchValueQuorumTestSuite) SetupTest() { + // Test setup: + // we create 1 DHT server that searches for values + // we create 10 additional DHT servers and connect all of them in a chain + // the first server holds an invalid record + // the next three servers of the 10 DHT servers hold a valid record + // the remaining 6 servers of the 10 DHT servers hold a better record + // first test assertion: with quorum of 3 we expect the valid but old record + // second test assertion: with a quorum of 5 we expect to receive the valid but also better record. + + t := suite.T() + ctx := kadtest.CtxShort(t) + clk := clock.New() + + cfg := DefaultConfig() + cfg.Clock = clk + top := NewTopology(t) + + // init privileged DHT server + suite.d = top.AddServer(cfg) + + // init remaining ones + suite.servers = make([]*DHT, 10) + for i := 0; i < 10; i++ { + suite.servers[i] = top.AddServer(cfg) + } + + // connect all together + top.ConnectChain(ctx, append([]*DHT{suite.d}, suite.servers...)...) + + // generate records + remote, priv := newIdentity(t) + invalidPutReq := newPutIPNSRequest(t, clk, priv, 3, -time.Hour) + suite.key, suite.validValue = makeIPNSKeyValue(t, clk, priv, 1, time.Hour) + suite.key, suite.betterValue = makeIPNSKeyValue(t, clk, priv, 2, time.Hour) // higher sequence number means better value + + // store invalid (expired) record directly in the datastore of + // the respective DHT server (bypassing any validation). + invalidRec, err := invalidPutReq.Record.Marshal() + require.NoError(t, err) + + rbe, err := typedBackend[*RecordBackend](suite.servers[0], namespaceIPNS) + require.NoError(t, err) + + dsKey := newDatastoreKey(namespaceIPNS, string(remote)) + err = rbe.datastore.Put(ctx, dsKey, invalidRec) + require.NoError(t, err) + + // The first four DHT servers hold a valid but old value + for i := 1; i < 4; i++ { + err = suite.servers[i].putValueLocal(ctx, suite.key, suite.validValue) + require.NoError(t, err) + } + + // The remaining six DHT servers hold a valid and newer record + for i := 4; i < 10; i++ { + err = suite.servers[i].putValueLocal(ctx, suite.key, suite.betterValue) + require.NoError(t, err) + } + + // one of the remaining returns and old record again + err = suite.servers[8].putValueLocal(ctx, suite.key, suite.betterValue) + require.NoError(t, err) +} + +func (suite *SearchValueQuorumTestSuite) TestQuorumReachedPrematurely() { + t := suite.T() + ctx := kadtest.CtxShort(t) + out, err := suite.d.SearchValue(ctx, suite.key, RoutingQuorum(3)) + require.NoError(t, err) + + val := readItem(t, ctx, out) + assert.Equal(t, suite.validValue, val) + + assertClosed(t, ctx, out) +} + +func (suite *SearchValueQuorumTestSuite) TestQuorumReachedAfterDiscoveryOfBetter() { + t := suite.T() + ctx := kadtest.CtxShort(t) + out, err := suite.d.SearchValue(ctx, suite.key, RoutingQuorum(5)) + require.NoError(t, err) + + val := readItem(t, ctx, out) + assert.Equal(t, suite.validValue, val) + + val = readItem(t, ctx, out) + assert.Equal(t, suite.betterValue, val) + + assertClosed(t, ctx, out) +} + +func (suite *SearchValueQuorumTestSuite) TestQuorumZero() { + t := suite.T() + ctx := kadtest.CtxShort(t) + + // search until query exhausted + out, err := suite.d.SearchValue(ctx, suite.key, RoutingQuorum(0)) + require.NoError(t, err) + + val := readItem(t, ctx, out) + assert.Equal(t, suite.validValue, val) + + val = readItem(t, ctx, out) + assert.Equal(t, suite.betterValue, val) + + assertClosed(t, ctx, out) +} + +func (suite *SearchValueQuorumTestSuite) TestQuorumUnspecified() { + t := suite.T() + ctx := kadtest.CtxShort(t) + + // search until query exhausted + out, err := suite.d.SearchValue(ctx, suite.key) + require.NoError(t, err) + + val := readItem(t, ctx, out) + assert.Equal(t, suite.validValue, val) + + val = readItem(t, ctx, out) + assert.Equal(t, suite.betterValue, val) + + assertClosed(t, ctx, out) +} + +func TestDHT_SearchValue_routing_option_returns_error(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + errOption := func(opts *routing.Options) error { + return fmt.Errorf("some error") + } + + valueChan, err := d.SearchValue(ctx, "/ipns/some-key", errOption) + assert.ErrorContains(t, err, "routing options") + assert.Nil(t, valueChan) +} + +func TestDHT_SearchValue_quorum_negative(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + out, err := d.SearchValue(ctx, "/"+namespaceIPNS+"/some-key", RoutingQuorum(-1)) + assert.ErrorContains(t, err, "quorum must not be negative") + assert.Nil(t, out) +} + +func TestDHT_SearchValue_invalid_key(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + valueChan, err := d.SearchValue(ctx, "invalid-key") + assert.ErrorContains(t, err, "splitting key") + assert.Nil(t, valueChan) +} + +func TestDHT_SearchValue_key_for_unsupported_namespace(t *testing.T) { + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + valueChan, err := d.SearchValue(ctx, "/unsupported/key") + assert.ErrorIs(t, err, routing.ErrNotSupported) + assert.Nil(t, valueChan) +} + +func TestDHT_SearchValue_stops_with_cancelled_context(t *testing.T) { + ctx := kadtest.CtxShort(t) + cancelledCtx, cancel := context.WithCancel(ctx) + cancel() + + // make sure we don't just stop because we don't know any other DHT server + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + top.Connect(ctx, d1, d2) + + valueChan, err := d1.SearchValue(cancelledCtx, "/"+namespaceIPNS+"/some-key") + assert.NoError(t, err) + assertClosed(t, ctx, valueChan) +} + +func TestDHT_SearchValue_has_record_locally(t *testing.T) { + // Test setup: + // There is just one other server that returns a valid value. + ctx := kadtest.CtxShort(t) + clk := clock.New() + + _, priv := newIdentity(t) + _, validValue := makeIPNSKeyValue(t, clk, priv, 1, time.Hour) + key, betterValue := makeIPNSKeyValue(t, clk, priv, 2, time.Hour) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + + top.Connect(ctx, d1, d2) + + err := d1.putValueLocal(ctx, key, validValue) + require.NoError(t, err) + + err = d2.putValueLocal(ctx, key, betterValue) + require.NoError(t, err) + + valChan, err := d1.SearchValue(ctx, key) + require.NoError(t, err) + + val := readItem(t, ctx, valChan) // from local store + assert.Equal(t, validValue, val) + + val = readItem(t, ctx, valChan) + assert.Equal(t, betterValue, val) + + assertClosed(t, ctx, valChan) +} + +func TestDHT_SearchValue_offline(t *testing.T) { + // Test setup: + // There is just one other server that returns a valid value. + ctx := kadtest.CtxShort(t) + d := newTestDHT(t) + + key, v := makePkKeyValue(t) + err := d.putValueLocal(ctx, key, v) + require.NoError(t, err) + + valChan, err := d.SearchValue(ctx, key, routing.Offline) + require.NoError(t, err) + + val := readItem(t, ctx, valChan) + assert.Equal(t, v, val) + + assertClosed(t, ctx, valChan) +} + +func TestDHT_SearchValue_offline_not_found_locally(t *testing.T) { + // Test setup: + // We are connected to a peer that holds the record but require an offline + // lookup. Assert that we don't receive the record + ctx := kadtest.CtxShort(t) + + key, v := makePkKeyValue(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + + top.Connect(ctx, d1, d2) + + err := d2.putValueLocal(ctx, key, v) + require.NoError(t, err) + + valChan, err := d1.SearchValue(ctx, key, routing.Offline) + assert.ErrorIs(t, err, routing.ErrNotFound) + assert.Nil(t, valChan) +} + +func TestDHT_Bootstrap_no_peers_configured(t *testing.T) { + // TIMING: this test is based on timeouts - so might become flaky! + ctx := kadtest.CtxShort(t) + + top := NewTopology(t) + d1 := top.AddServer(nil) + d2 := top.AddServer(nil) + d3 := top.AddServer(nil) + + d1.cfg.BootstrapPeers = []peer.AddrInfo{ + {ID: d2.host.ID(), Addrs: d2.host.Addrs()}, + {ID: d3.host.ID(), Addrs: d3.host.Addrs()}, + } + + err := d1.Bootstrap(ctx) + assert.NoError(t, err) + + deadline, hasDeadline := ctx.Deadline() + if !hasDeadline { + deadline = time.Now().Add(5 * time.Second) + } + + // bootstrapping is an asynchronous process, so we periodically check + // if the peers have each other in their routing tables + assert.EventuallyWithT(t, func(collect *assert.CollectT) { + _, found := d1.rt.GetNode(kadt.PeerID(d2.host.ID()).Key()) + assert.True(collect, found) + _, found = d1.rt.GetNode(kadt.PeerID(d3.host.ID()).Key()) + assert.True(collect, found) + + _, found = d2.rt.GetNode(kadt.PeerID(d1.host.ID()).Key()) + assert.True(collect, found) + _, found = d3.rt.GetNode(kadt.PeerID(d1.host.ID()).Key()) + assert.True(collect, found) + }, time.Until(deadline), 10*time.Millisecond) +} diff --git a/v2/tele/tele.go b/v2/tele/tele.go index 78aecc98..2c0aa42b 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -27,7 +27,7 @@ func NoopTracer() trace.Tracer { return trace.NewNoopTracerProvider().Tracer("") } -// NoopMeterProvider returns a meter provider that does not record or emit metrics. +// NoopMeter returns a meter provider that does not record or emit metrics. func NoopMeter() metric.Meter { return noop.NewMeterProvider().Meter("") } diff --git a/v2/topology_test.go b/v2/topology_test.go index 189b494e..b6be05be 100644 --- a/v2/topology_test.go +++ b/v2/topology_test.go @@ -61,10 +61,6 @@ func (t *Topology) AddServer(cfg *Config) *DHT { rn := coord.NewBufferedRoutingNotifier() d.kad.SetRoutingNotifier(rn) - // add at least 1 entry in the routing table so the server will pass connectivity checks - fillRoutingTable(t.tb, d, 1) - require.NotEmpty(t.tb, d.rt.NearestNodes(kadt.PeerID(d.host.ID()).Key(), 1)) - t.tb.Cleanup(func() { if err = d.Close(); err != nil { t.tb.Logf("unexpected error when closing dht: %s", err) From 90d748be7ba4c0ccd50a6126292ebaba6d7ce09e Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 28 Sep 2023 14:36:04 +0200 Subject: [PATCH 61/64] fix: missing QueryMessage return value --- v2/internal/coord/coordinator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index f932f164..f0d13568 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -383,7 +383,7 @@ func (c *Coordinator) QueryMessage(ctx context.Context, msg *pb.Message, fn coor ctx, span := c.tele.Tracer.Start(ctx, "Coordinator.QueryMessage") defer span.End() if msg == nil { - return coordt.QueryStats{}, fmt.Errorf("no message supplied for query") + return nil, coordt.QueryStats{}, fmt.Errorf("no message supplied for query") } c.cfg.Logger.Debug("starting query with message", tele.LogAttrKey(msg.Target()), slog.String("type", msg.Type.String())) From 0e628c067a0da58514a08e96a279346cb61be7e8 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 28 Sep 2023 14:39:34 +0200 Subject: [PATCH 62/64] fix: use correct err log method --- v2/routing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/routing.go b/v2/routing.go index a0745945..5a81b41a 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -438,7 +438,7 @@ func (d *DHT) searchValueRoutine(ctx context.Context, backend Backend, ns string _, _, err := d.kad.QueryMessage(ctx, req, fn, d.cfg.BucketSize) if err != nil { - d.logErr(err, "Search value query failed") + d.warnErr(err, "Search value query failed") return } From 509eee414ba614f38108986a0f417ff0044de84a Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Thu, 28 Sep 2023 16:43:57 +0100 Subject: [PATCH 63/64] fix: avoid panic in bootstrap when late messages arrive (#949) --- v2/internal/coord/routing/bootstrap.go | 29 ++--- v2/internal/coord/routing/bootstrap_test.go | 114 ++++++++++++++++++++ 2 files changed, 131 insertions(+), 12 deletions(-) diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index e941ae0f..310b9a86 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -213,19 +213,24 @@ func (b *Bootstrap[K, N]) Advance(ctx context.Context, ev BootstrapEvent) (out B return b.advanceQuery(ctx, &query.EventQueryPoll{}) case *EventBootstrapFindCloserResponse[K, N]: - b.counterFindSucceeded.Add(ctx, 1) - return b.advanceQuery(ctx, &query.EventQueryNodeResponse[K, N]{ - NodeID: tev.NodeID, - CloserNodes: tev.CloserNodes, - }) + // ignore late responses + if b.qry != nil { + b.counterFindSucceeded.Add(ctx, 1) + return b.advanceQuery(ctx, &query.EventQueryNodeResponse[K, N]{ + NodeID: tev.NodeID, + CloserNodes: tev.CloserNodes, + }) + } case *EventBootstrapFindCloserFailure[K, N]: - b.counterFindFailed.Add(ctx, 1) - span.RecordError(tev.Error) - return b.advanceQuery(ctx, &query.EventQueryNodeFailure[K, N]{ - NodeID: tev.NodeID, - Error: tev.Error, - }) - + // ignore late responses + if b.qry != nil { + b.counterFindFailed.Add(ctx, 1) + span.RecordError(tev.Error) + return b.advanceQuery(ctx, &query.EventQueryNodeFailure[K, N]{ + NodeID: tev.NodeID, + Error: tev.Error, + }) + } case *EventBootstrapPoll: // ignore, nothing to do default: diff --git a/v2/internal/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go index 6adcb74c..c4b9fd57 100644 --- a/v2/internal/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -270,3 +270,117 @@ func TestBootstrapFinishesThenGoesIdle(t *testing.T) { // bootstrap should now be idle require.IsType(t, &StateBootstrapIdle{}, state) } + +func TestBootstrapFinishedIgnoresLaterResponses(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig() + cfg.Clock = clk + + self := tiny.NewNode(0) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + a := tiny.NewNode(4) + b := tiny.NewNode(8) + + // start the bootstrap + state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ + KnownClosestNodes: []tiny.Node{b}, + }) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // the bootstrap should attempt to contact the node it was given + st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, coordt.QueryID("bootstrap"), st.QueryID) + require.Equal(t, b, st.NodeID) + + // notify bootstrap that node was contacted successfully with a closer node + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{a}, + }) + + // bootstrap should respond that it wants to contact the new node + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // poll bootstrap + state = bs.Advance(ctx, &EventBootstrapPoll{}) + + // bootstrap should now be waiting + require.IsType(t, &StateBootstrapWaiting{}, state) + + // advance the clock past the timeout + clk.Add(cfg.RequestTimeout * 2) + + // poll bootstrap + state = bs.Advance(ctx, &EventBootstrapPoll{}) + + // bootstrap should now be finished + require.IsType(t, &StateBootstrapFinished{}, state) + + // notify bootstrap that node was contacted successfully after the timeout + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // bootstrap should ignore late message and now be idle + require.IsType(t, &StateBootstrapIdle{}, state) +} + +func TestBootstrapFinishedIgnoresLaterFailures(t *testing.T) { + ctx := context.Background() + clk := clock.NewMock() + cfg := DefaultBootstrapConfig() + cfg.Clock = clk + + self := tiny.NewNode(0) + bs, err := NewBootstrap[tiny.Key](self, cfg) + require.NoError(t, err) + + a := tiny.NewNode(4) + b := tiny.NewNode(8) + + // start the bootstrap + state := bs.Advance(ctx, &EventBootstrapStart[tiny.Key, tiny.Node]{ + KnownClosestNodes: []tiny.Node{b}, + }) + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // the bootstrap should attempt to contact the node it was given + st := state.(*StateBootstrapFindCloser[tiny.Key, tiny.Node]) + require.Equal(t, coordt.QueryID("bootstrap"), st.QueryID) + require.Equal(t, b, st.NodeID) + + // notify bootstrap that node was contacted successfully with a closer node + state = bs.Advance(ctx, &EventBootstrapFindCloserResponse[tiny.Key, tiny.Node]{ + NodeID: b, + CloserNodes: []tiny.Node{a}, + }) + + // bootstrap should respond that it wants to contact the new node + require.IsType(t, &StateBootstrapFindCloser[tiny.Key, tiny.Node]{}, state) + + // poll bootstrap + state = bs.Advance(ctx, &EventBootstrapPoll{}) + + // bootstrap should now be waiting + require.IsType(t, &StateBootstrapWaiting{}, state) + + // advance the clock past the timeout + clk.Add(cfg.RequestTimeout * 2) + + // poll bootstrap + state = bs.Advance(ctx, &EventBootstrapPoll{}) + + // bootstrap should now be finished + require.IsType(t, &StateBootstrapFinished{}, state) + + // notify bootstrap that node failed to be contacted + state = bs.Advance(ctx, &EventBootstrapFindCloserFailure[tiny.Key, tiny.Node]{ + NodeID: a, + }) + + // bootstrap should ignore late message and now be idle + require.IsType(t, &StateBootstrapIdle{}, state) +} From 1d1fe933fc5496a19f7ae7509d922ff0f4c27400 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Fri, 29 Sep 2023 09:25:49 +0100 Subject: [PATCH 64/64] Use go-libdht (#952) --- v2/config.go | 2 +- v2/dht.go | 2 +- v2/go.mod | 3 +- v2/go.sum | 4 +-- v2/internal/coord/brdcst/brdcst.go | 2 +- v2/internal/coord/brdcst/followup.go | 2 +- v2/internal/coord/brdcst/pool.go | 2 +- v2/internal/coord/brdcst/pool_test.go | 2 +- v2/internal/coord/brdcst/static.go | 2 +- v2/internal/coord/coordinator.go | 2 +- v2/internal/coord/coordt/coretypes.go | 2 +- v2/internal/coord/cplutil/cpl_test.go | 6 ++-- v2/internal/coord/internal/nettest/layouts.go | 9 +++-- v2/internal/coord/internal/nettest/routing.go | 26 +++++++------- .../coord/internal/nettest/topology.go | 7 ++-- v2/internal/coord/internal/tiny/node.go | 7 ++-- v2/internal/coord/network.go | 2 +- v2/internal/coord/query/iter.go | 6 ++-- v2/internal/coord/query/iter_test.go | 2 +- v2/internal/coord/query/node.go | 2 +- v2/internal/coord/query/pool.go | 16 ++++----- v2/internal/coord/query/pool_test.go | 2 +- v2/internal/coord/query/query.go | 14 ++++---- v2/internal/coord/query/query_test.go | 2 +- v2/internal/coord/routing/bootstrap.go | 2 +- v2/internal/coord/routing/bootstrap_test.go | 2 +- v2/internal/coord/routing/explore.go | 2 +- v2/internal/coord/routing/explore_test.go | 21 ++++++++---- v2/internal/coord/routing/include.go | 4 +-- v2/internal/coord/routing/include_test.go | 22 +++++++----- v2/internal/coord/routing/probe.go | 4 +-- v2/internal/coord/routing/probe_test.go | 34 ++++++++++++------- v2/kadt/kadt.go | 17 ++++------ 33 files changed, 129 insertions(+), 105 deletions(-) diff --git a/v2/config.go b/v2/config.go index b7314ecf..01c006cc 100644 --- a/v2/config.go +++ b/v2/config.go @@ -12,7 +12,7 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" - "github.com/plprobelab/go-kademlia/routing/triert" + "github.com/plprobelab/go-libdht/kad/triert" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" diff --git a/v2/dht.go b/v2/dht.go index 86e3b97f..ce6ace07 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -36,7 +36,7 @@ type DHT struct { modeMu sync.RWMutex mode mode - // kad is a reference to the go-kademlia coordinator + // kad is a reference to the coordinator kad *coord.Coordinator // rt holds a reference to the routing table implementation. This can be diff --git a/v2/go.mod b/v2/go.mod index 5a0b9e6b..1e83a10e 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -17,7 +17,6 @@ require ( github.com/multiformats/go-multiaddr v0.11.0 github.com/multiformats/go-multihash v0.2.3 github.com/pkg/errors v0.9.1 // indirect - github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 github.com/prometheus/client_golang v1.16.0 // indirect github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.18.0 @@ -31,6 +30,8 @@ require ( google.golang.org/protobuf v1.31.0 ) +require github.com/plprobelab/go-libdht v0.0.0-20230928112736-796722ce828d + require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect diff --git a/v2/go.sum b/v2/go.sum index bffb7bb6..7641275e 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -264,8 +264,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080 h1:CqaVJqntB6Gm7LILVsIZv0Sdy9kfmi74rwZRt66hPLM= -github.com/plprobelab/go-kademlia v0.0.0-20230913171354-443ec1f56080/go.mod h1:9mz9/8plJj9HWiQmB6JkBNHY30AXzy9LrJ++sCvWqFQ= +github.com/plprobelab/go-libdht v0.0.0-20230928112736-796722ce828d h1:CM1jpcL54+I/7eY8Ti+Ul+1xdpekNjHt9ZpGPMdC0uc= +github.com/plprobelab/go-libdht v0.0.0-20230928112736-796722ce828d/go.mod h1:1m6gBp1WX7RPN3KnwC5BX5YZ5nDTy+g6x9M4fgb/n1w= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= diff --git a/v2/internal/coord/brdcst/brdcst.go b/v2/internal/coord/brdcst/brdcst.go index 8711a1b3..7a149120 100644 --- a/v2/internal/coord/brdcst/brdcst.go +++ b/v2/internal/coord/brdcst/brdcst.go @@ -1,7 +1,7 @@ package brdcst import ( - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" ) diff --git a/v2/internal/coord/brdcst/followup.go b/v2/internal/coord/brdcst/followup.go index 7769f961..901caea7 100644 --- a/v2/internal/coord/brdcst/followup.go +++ b/v2/internal/coord/brdcst/followup.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" diff --git a/v2/internal/coord/brdcst/pool.go b/v2/internal/coord/brdcst/pool.go index 71d4e936..25f3053c 100644 --- a/v2/internal/coord/brdcst/pool.go +++ b/v2/internal/coord/brdcst/pool.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "go.opentelemetry.io/otel/trace" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" diff --git a/v2/internal/coord/brdcst/pool_test.go b/v2/internal/coord/brdcst/pool_test.go index aaad7dc9..83242b47 100644 --- a/v2/internal/coord/brdcst/pool_test.go +++ b/v2/internal/coord/brdcst/pool_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" diff --git a/v2/internal/coord/brdcst/static.go b/v2/internal/coord/brdcst/static.go index 0a36721b..49088de1 100644 --- a/v2/internal/coord/brdcst/static.go +++ b/v2/internal/coord/brdcst/static.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" diff --git a/v2/internal/coord/coordinator.go b/v2/internal/coord/coordinator.go index f0d13568..cb1695e7 100644 --- a/v2/internal/coord/coordinator.go +++ b/v2/internal/coord/coordinator.go @@ -9,7 +9,7 @@ import ( "sync/atomic" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" diff --git a/v2/internal/coord/coordt/coretypes.go b/v2/internal/coord/coordt/coretypes.go index 2e000c81..14d53038 100644 --- a/v2/internal/coord/coordt/coretypes.go +++ b/v2/internal/coord/coordt/coretypes.go @@ -5,7 +5,7 @@ import ( "errors" "time" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" diff --git a/v2/internal/coord/cplutil/cpl_test.go b/v2/internal/coord/cplutil/cpl_test.go index 4ea4ad73..051c127a 100644 --- a/v2/internal/coord/cplutil/cpl_test.go +++ b/v2/internal/coord/cplutil/cpl_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key/bit256" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,10 +20,10 @@ func TestPrefix(t *testing.T) { 0b0000000000000010, } - makeKeyWithPrefix := func(v uint16) key.Key256 { + makeKeyWithPrefix := func(v uint16) bit256.Key { data := [32]byte{} binary.BigEndian.PutUint16(data[0:2], v) - return key.NewKey256(data[:]) + return bit256.NewKey(data[:]) } for _, tc := range testCases { diff --git a/v2/internal/coord/internal/nettest/layouts.go b/v2/internal/coord/internal/nettest/layouts.go index b85d320d..fc06a30d 100644 --- a/v2/internal/coord/internal/nettest/layouts.go +++ b/v2/internal/coord/internal/nettest/layouts.go @@ -4,7 +4,7 @@ import ( "context" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/plprobelab/go-libdht/kad/triert" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -26,10 +26,15 @@ func LinearTopology(n int, clk clock.Clock) (*Topology, []*Peer, error) { return nil, nil, err } + rt, err := triert.New[kadt.Key, kadt.PeerID](id, nil) + if err != nil { + return nil, nil, err + } + nodes[i] = &Peer{ NodeID: id, Router: NewRouter(id, top), - RoutingTable: simplert.New[kadt.Key, kadt.PeerID](id, 20), + RoutingTable: rt, } } diff --git a/v2/internal/coord/internal/nettest/routing.go b/v2/internal/coord/internal/nettest/routing.go index 05360c87..7ce5eef3 100644 --- a/v2/internal/coord/internal/nettest/routing.go +++ b/v2/internal/coord/internal/nettest/routing.go @@ -9,10 +9,8 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/network/address" - "github.com/plprobelab/go-kademlia/network/endpoint" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/key" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" @@ -56,8 +54,8 @@ type Router struct { } type nodeStatus struct { - NodeID kadt.PeerID - Connectedness endpoint.Connectedness + NodeID kadt.PeerID + Connected bool } func NewRouter(self kadt.PeerID, top *Topology) *Router { @@ -72,7 +70,7 @@ func (r *Router) NodeID() kad.NodeID[kadt.Key] { return r.self } -func (r *Router) handleMessage(ctx context.Context, n kadt.PeerID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { +func (r *Router) handleMessage(ctx context.Context, n kadt.PeerID, req *pb.Message) (*pb.Message, error) { closer := make([]*pb.Message_Peer, 0) r.mu.Lock() @@ -101,19 +99,19 @@ func (r *Router) dial(ctx context.Context, to kadt.PeerID) error { if !ok { status = &nodeStatus{ - NodeID: to, - Connectedness: endpoint.CanConnect, + NodeID: to, + Connected: false, } } - if status.Connectedness == endpoint.Connected { + if status.Connected { return nil } if err := r.top.Dial(ctx, r.self, to); err != nil { return err } - status.Connectedness = endpoint.Connected + status.Connected = true r.mu.Lock() r.nodes[to.String()] = status r.mu.Unlock() @@ -126,8 +124,8 @@ func (r *Router) AddToPeerStore(ctx context.Context, id kadt.PeerID) error { if _, ok := r.nodes[id.String()]; !ok { r.nodes[id.String()] = &nodeStatus{ - NodeID: id, - Connectedness: endpoint.CanConnect, + NodeID: id, + Connected: false, } } return nil @@ -138,7 +136,7 @@ func (r *Router) SendMessage(ctx context.Context, to kadt.PeerID, req *pb.Messag return nil, fmt.Errorf("dial: %w", err) } - return r.top.RouteMessage(ctx, r.self, to, "", req) + return r.top.RouteMessage(ctx, r.self, to, req) } func (r *Router) GetClosestNodes(ctx context.Context, to kadt.PeerID, target kadt.Key) ([]kadt.PeerID, error) { diff --git a/v2/internal/coord/internal/nettest/topology.go b/v2/internal/coord/internal/nettest/topology.go index dda13ade..95beeec6 100644 --- a/v2/internal/coord/internal/nettest/topology.go +++ b/v2/internal/coord/internal/nettest/topology.go @@ -6,7 +6,6 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" - "github.com/plprobelab/go-kademlia/network/address" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/routing" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -110,14 +109,14 @@ func (t *Topology) Dial(ctx context.Context, from kadt.PeerID, to kadt.PeerID) e return nil } -func (t *Topology) RouteMessage(ctx context.Context, from kadt.PeerID, to kadt.PeerID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { +func (t *Topology) RouteMessage(ctx context.Context, from kadt.PeerID, to kadt.PeerID, req *pb.Message) (*pb.Message, error) { if from == to { node, ok := t.nodeIndex[to.String()] if !ok { return nil, fmt.Errorf("unknown node") } - return node.Router.handleMessage(ctx, from, protoID, req) + return node.Router.handleMessage(ctx, from, req) } route, err := t.findRoute(ctx, from, to) @@ -135,5 +134,5 @@ func (t *Topology) RouteMessage(ctx context.Context, from kadt.PeerID, to kadt.P return nil, fmt.Errorf("no route to node") } - return node.Router.handleMessage(ctx, from, protoID, req) + return node.Router.handleMessage(ctx, from, req) } diff --git a/v2/internal/coord/internal/tiny/node.go b/v2/internal/coord/internal/tiny/node.go index 065b5881..8b0a4e0a 100644 --- a/v2/internal/coord/internal/tiny/node.go +++ b/v2/internal/coord/internal/tiny/node.go @@ -4,11 +4,12 @@ package tiny import ( "fmt" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/kadtest" + "github.com/plprobelab/go-libdht/kad/key" ) -type Key = key.Key8 +type Key = kadtest.Key8 type Node struct { key Key diff --git a/v2/internal/coord/network.go b/v2/internal/coord/network.go index 7d9d8374..5dfd99ac 100644 --- a/v2/internal/coord/network.go +++ b/v2/internal/coord/network.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key" "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" diff --git a/v2/internal/coord/query/iter.go b/v2/internal/coord/query/iter.go index 0cf2bbed..84403de6 100644 --- a/v2/internal/coord/query/iter.go +++ b/v2/internal/coord/query/iter.go @@ -3,9 +3,9 @@ package query import ( "context" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/key/trie" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/key" + "github.com/plprobelab/go-libdht/kad/trie" ) // A NodeIter iterates nodes according to some strategy. diff --git a/v2/internal/coord/query/iter_test.go b/v2/internal/coord/query/iter_test.go index cb987349..998f5024 100644 --- a/v2/internal/coord/query/iter_test.go +++ b/v2/internal/coord/query/iter_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" diff --git a/v2/internal/coord/query/node.go b/v2/internal/coord/query/node.go index 7540acef..d35da8c4 100644 --- a/v2/internal/coord/query/node.go +++ b/v2/internal/coord/query/node.go @@ -3,7 +3,7 @@ package query import ( "time" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" ) type NodeStatus[K kad.Key[K], N kad.NodeID[K]] struct { diff --git a/v2/internal/coord/query/pool.go b/v2/internal/coord/query/pool.go index d6edea86..68164d49 100644 --- a/v2/internal/coord/query/pool.go +++ b/v2/internal/coord/query/pool.go @@ -6,9 +6,9 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/kaderr" + "github.com/plprobelab/go-libdht/kad" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -39,39 +39,39 @@ type PoolConfig struct { // Validate checks the configuration options and returns an error if any have invalid values. func (cfg *PoolConfig) Validate() error { if cfg.Clock == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "PoolConfig", Err: fmt.Errorf("clock must not be nil"), } } if cfg.Concurrency < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "PoolConfig", Err: fmt.Errorf("concurrency must be greater than zero"), } } if cfg.Timeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "PoolConfig", Err: fmt.Errorf("timeout must be greater than zero"), } } if cfg.Replication < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "PoolConfig", Err: fmt.Errorf("replication must be greater than zero"), } } if cfg.QueryConcurrency < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "PoolConfig", Err: fmt.Errorf("query concurrency must be greater than zero"), } } if cfg.RequestTimeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "PoolConfig", Err: fmt.Errorf("request timeout must be greater than zero"), } diff --git a/v2/internal/coord/query/pool_test.go b/v2/internal/coord/query/pool_test.go index 88f30091..06a15203 100644 --- a/v2/internal/coord/query/pool_test.go +++ b/v2/internal/coord/query/pool_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" diff --git a/v2/internal/coord/query/query.go b/v2/internal/coord/query/query.go index 77a4dae8..ead1cb27 100644 --- a/v2/internal/coord/query/query.go +++ b/v2/internal/coord/query/query.go @@ -6,11 +6,11 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/kaderr" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/key" "go.opentelemetry.io/otel/trace" + "github.com/libp2p/go-libp2p-kad-dht/v2/errs" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) @@ -34,25 +34,25 @@ type QueryConfig struct { // Validate checks the configuration options and returns an error if any have invalid values. func (cfg *QueryConfig) Validate() error { if cfg.Clock == nil { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "QueryConfig", Err: fmt.Errorf("clock must not be nil"), } } if cfg.Concurrency < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "QueryConfig", Err: fmt.Errorf("concurrency must be greater than zero"), } } if cfg.NumResults < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "QueryConfig", Err: fmt.Errorf("num results must be greater than zero"), } } if cfg.RequestTimeout < 1 { - return &kaderr.ConfigurationError{ + return &errs.ConfigurationError{ Component: "QueryConfig", Err: fmt.Errorf("request timeout must be greater than zero"), } diff --git a/v2/internal/coord/query/query_test.go b/v2/internal/coord/query/query_test.go index f1a211a2..ec2ba823 100644 --- a/v2/internal/coord/query/query_test.go +++ b/v2/internal/coord/query/query_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" diff --git a/v2/internal/coord/routing/bootstrap.go b/v2/internal/coord/routing/bootstrap.go index 310b9a86..f0125ebd 100644 --- a/v2/internal/coord/routing/bootstrap.go +++ b/v2/internal/coord/routing/bootstrap.go @@ -7,7 +7,7 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" diff --git a/v2/internal/coord/routing/bootstrap_test.go b/v2/internal/coord/routing/bootstrap_test.go index c4b9fd57..8fff54fa 100644 --- a/v2/internal/coord/routing/bootstrap_test.go +++ b/v2/internal/coord/routing/bootstrap_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad/key" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/coordt" diff --git a/v2/internal/coord/routing/explore.go b/v2/internal/coord/routing/explore.go index cf731aa5..5559a080 100644 --- a/v2/internal/coord/routing/explore.go +++ b/v2/internal/coord/routing/explore.go @@ -9,7 +9,7 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-libdht/kad" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" diff --git a/v2/internal/coord/routing/explore_test.go b/v2/internal/coord/routing/explore_test.go index 29d8653b..d942d864 100644 --- a/v2/internal/coord/routing/explore_test.go +++ b/v2/internal/coord/routing/explore_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/plprobelab/go-libdht/kad/triert" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -126,7 +126,9 @@ func TestExploreStartsIdle(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(128) - rt := simplert.New[tiny.Key, tiny.Node](self, 5) + rt, err := triert.New[tiny.Key, tiny.Node](self, nil) + require.NoError(t, err) + schedule := DefaultDynamicSchedule(t, clk) ex, err := NewExplore[tiny.Key, tiny.Node](self, rt, tiny.NodeWithCpl, schedule, cfg) require.NoError(t, err) @@ -142,7 +144,8 @@ func TestExploreFirstQueriesForMaximumCpl(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(128) - rt := simplert.New[tiny.Key, tiny.Node](self, 5) + rt, err := triert.New[tiny.Key, tiny.Node](self, nil) + require.NoError(t, err) // populate the routing table with at least one node a := tiny.NewNode(4) @@ -190,7 +193,8 @@ func TestExploreFindCloserResponse(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(128) - rt := simplert.New[tiny.Key, tiny.Node](self, 5) + rt, err := triert.New[tiny.Key, tiny.Node](self, nil) + require.NoError(t, err) // populate the routing table with at least one node a := tiny.NewNode(4) @@ -232,7 +236,8 @@ func TestExploreFindCloserFailure(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(128) - rt := simplert.New[tiny.Key, tiny.Node](self, 5) + rt, err := triert.New[tiny.Key, tiny.Node](self, nil) + require.NoError(t, err) // populate the routing table with at least one node a := tiny.NewNode(4) @@ -274,7 +279,8 @@ func TestExploreProgress(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(128) - rt := simplert.New[tiny.Key, tiny.Node](self, 5) + rt, err := triert.New[tiny.Key, tiny.Node](self, nil) + require.NoError(t, err) a := tiny.NewNode(4) // 4 b := tiny.NewNode(8) // 8 @@ -349,7 +355,8 @@ func TestExploreQueriesNextHighestCpl(t *testing.T) { cfg.Clock = clk self := tiny.NewNode(128) - rt := simplert.New[tiny.Key, tiny.Node](self, 5) + rt, err := triert.New[tiny.Key, tiny.Node](self, nil) + require.NoError(t, err) // populate the routing table with at least one node a := tiny.NewNode(4) diff --git a/v2/internal/coord/routing/include.go b/v2/internal/coord/routing/include.go index 4aad5383..53551f46 100644 --- a/v2/internal/coord/routing/include.go +++ b/v2/internal/coord/routing/include.go @@ -7,8 +7,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/key" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" diff --git a/v2/internal/coord/routing/include_test.go b/v2/internal/coord/routing/include_test.go index b727a88c..73f243c3 100644 --- a/v2/internal/coord/routing/include_test.go +++ b/v2/internal/coord/routing/include_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/plprobelab/go-libdht/kad/key" + "github.com/plprobelab/go-libdht/kad/triert" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" @@ -67,7 +67,8 @@ func TestIncludeStartsIdle(t *testing.T) { cfg := DefaultIncludeConfig() cfg.Clock = clk - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) bs, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -83,7 +84,8 @@ func TestIncludeAddCandidateStartsCheckIfCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 1 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -117,7 +119,8 @@ func TestIncludeAddCandidateReportsCapacity(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -143,7 +146,8 @@ func TestIncludeAddCandidateOverQueueLength(t *testing.T) { cfg.QueueCapacity = 2 // only allow two candidates in the queue cfg.Concurrency = 3 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -215,7 +219,8 @@ func TestIncludeConnectivityCheckSuccess(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -258,7 +263,8 @@ func TestIncludeConnectivityCheckFailure(t *testing.T) { cfg.Clock = clk cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) p, err := NewInclude[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) diff --git a/v2/internal/coord/routing/probe.go b/v2/internal/coord/routing/probe.go index 56f31146..56591229 100644 --- a/v2/internal/coord/routing/probe.go +++ b/v2/internal/coord/routing/probe.go @@ -9,8 +9,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/key" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" diff --git a/v2/internal/coord/routing/probe_test.go b/v2/internal/coord/routing/probe_test.go index 9c29a0e8..c0fb4d23 100644 --- a/v2/internal/coord/routing/probe_test.go +++ b/v2/internal/coord/routing/probe_test.go @@ -7,8 +7,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/key" - "github.com/plprobelab/go-kademlia/routing/simplert" + "github.com/plprobelab/go-libdht/kad/key" + "github.com/plprobelab/go-libdht/kad/triert" "github.com/stretchr/testify/require" "github.com/libp2p/go-libp2p-kad-dht/v2/internal/coord/internal/tiny" @@ -71,7 +71,8 @@ func TestProbeStartsIdle(t *testing.T) { cfg := DefaultProbeConfig() cfg.Clock = clk - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) bs, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -91,7 +92,8 @@ func TestProbeAddChecksPresenceInRoutingTable(t *testing.T) { // Set concurrency to allow one check to run cfg.Concurrency = 1 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) require.NoError(t, err) @@ -120,7 +122,8 @@ func TestProbeAddStartsCheckIfCapacity(t *testing.T) { // Set concurrency to allow one check to run cfg.Concurrency = 1 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) @@ -160,7 +163,8 @@ func TestProbeAddManyStartsChecksIfCapacity(t *testing.T) { // Set concurrency lower than the number of nodes cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) rt.AddNode(tiny.NewNode(3)) rt.AddNode(tiny.NewNode(2)) @@ -224,7 +228,8 @@ func TestProbeAddReportsCapacity(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) @@ -267,7 +272,8 @@ func TestProbeRemoveDeletesNodeValue(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) @@ -627,7 +633,8 @@ func TestProbeConnectivityCheckSuccess(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) @@ -685,7 +692,8 @@ func TestProbeConnectivityCheckFailure(t *testing.T) { // Set concurrency to allow more than one check to run cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) sm, err := NewProbe[tiny.Key, tiny.Node](rt, cfg) @@ -739,7 +747,8 @@ func TestProbeNotifyConnectivity(t *testing.T) { cfg.CheckInterval = 10 * time.Minute cfg.Concurrency = 2 - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) rt.AddNode(tiny.NewNode(3)) @@ -809,7 +818,8 @@ func TestProbeTimeout(t *testing.T) { cfg.Timeout = 3 * time.Minute cfg.Concurrency = 1 // one probe at a time, timeouts will be used to free capacity if there are more requests - rt := simplert.New[tiny.Key, tiny.Node](tiny.NewNode(128), 5) + rt, err := triert.New[tiny.Key, tiny.Node](tiny.NewNode(128), nil) + require.NoError(t, err) rt.AddNode(tiny.NewNode(4)) rt.AddNode(tiny.NewNode(3)) diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index 6e6aa8c2..741fc781 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -1,4 +1,4 @@ -// Package kadt contains the kademlia types for interacting with go-kademlia. +// Package kadt contains the kademlia types for interacting with go-libdht. package kadt import ( @@ -6,8 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-libdht/kad" + "github.com/plprobelab/go-libdht/kad/key/bit256" ) // Key is a type alias for the type of key that's used with this DHT @@ -15,7 +15,7 @@ import ( // of the actual key that's used for calculating Kademlia distance. That's // why this Key struct also holds the preimage bytes. type Key struct { - key key.Key256 + key bit256.Key preimage []byte } @@ -27,7 +27,7 @@ var _ kad.Key[Key] = (*Key)(nil) func NewKey(preimage []byte) Key { h := sha256.Sum256(preimage) return Key{ - key: key.NewKey256(h[:]), + key: bit256.NewKey(h[:]), preimage: preimage, } } @@ -66,7 +66,7 @@ func (k Key) HexString() string { // PeerID is a type alias for [peer.ID] that implements the [kad.NodeID] // interface. This means we can use PeerID for any operation that interfaces -// with go-kademlia. +// with go-libdht. type PeerID peer.ID // assertion that PeerID implements the kad.NodeID interface @@ -92,7 +92,7 @@ func (p PeerID) Equal(o PeerID) bool { // AddrInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo // interface. This means we can use AddrInfo for any operation that interfaces -// with go-kademlia. +// with go-libdht. // // A more accurate name would be PeerInfo or NodeInfo. However, for consistency // and coherence with [peer.AddrInfo] we also name it AddrInfo. @@ -100,9 +100,6 @@ type AddrInfo struct { Info peer.AddrInfo } -// assertion that AddrInfo implements the [kad.NodeInfo] interface -var _ kad.NodeInfo[Key, ma.Multiaddr] = (*AddrInfo)(nil) - // ID returns the [kad.NodeID] of this peer's information struct. func (ai AddrInfo) ID() kad.NodeID[Key] { return PeerID(ai.Info.ID)