From f88eb9ea603535514d5bd2289a1ea034c89f5350 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Mon, 28 Aug 2023 17:18:26 +0200 Subject: [PATCH 01/26] WIP --- v2/dht.go | 2 +- v2/pb/dht.aux.go | 131 ++++++++++++++++++++++++++++++++++++++++++++ v2/router.go | 136 +++++++++++++++++++++++++++++++++++++++++----- v2/router_test.go | 47 ++++++++++++++++ v2/routing.go | 30 +++++++++- 5 files changed, 328 insertions(+), 18 deletions(-) create mode 100644 v2/router_test.go diff --git a/v2/dht.go b/v2/dht.go index be34d685..1883a580 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -110,7 +110,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - d.kad, err = kademlia.NewDht[key.Key256, ma.Multiaddr](nid, d, d.rt, nil) + d.kad, err = kademlia.NewDht[key.Key256, ma.Multiaddr](nid, &Router{host: h}, d.rt, nil) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index abc589c1..891ff15e 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -1,6 +1,12 @@ package dht_pb import ( + "fmt" + + mh "github.com/multiformats/go-multihash" + mhreg "github.com/multiformats/go-multihash/core" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p/core/peer" @@ -51,6 +57,61 @@ func (m *Message) ProviderAddrInfos() []peer.AddrInfo { return addrInfos } +// CloserPeersAddrInfos returns the peer.AddrInfo's of the closer peers in this +// message. +func (m *Message) CloserPeersAddrInfos() []peer.AddrInfo { + if m == nil { + return nil + } + + addrInfos := make([]peer.AddrInfo, 0, len(m.CloserPeers)) + for _, p := range m.CloserPeers { + addrInfos = append(addrInfos, peer.AddrInfo{ + ID: peer.ID(p.Id), + Addrs: p.Addresses(), + }) + } + + return addrInfos +} + +func (m *Message) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { + closerPeers := m.GetCloserPeers() + if closerPeers == nil { + return []kad.NodeInfo[key.Key256, ma.Multiaddr]{} + } + return ParsePeers(closerPeers) +} + +func PBPeerToPeerInfo(pbp Message_Peer) (*AddrInfo, error) { + addrs := make([]ma.Multiaddr, 0, len(pbp.Addrs)) + for _, a := range pbp.Addrs { + addr, err := ma.NewMultiaddrBytes(a) + if err == nil { + addrs = append(addrs, addr) + } + } + if len(addrs) == 0 { + return nil, fmt.Errorf("asdfsdf") + } + + return NewAddrInfo(peer.AddrInfo{ + ID: peer.ID(pbp.Id), + Addrs: addrs, + }), nil +} + +func ParsePeers(pbps []Message_Peer) []kad.NodeInfo[key.Key256, ma.Multiaddr] { + peers := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], 0, len(pbps)) + for _, p := range pbps { + pi, err := PBPeerToPeerInfo(p) + if err == nil { + peers = append(peers, pi) + } + } + return peers +} + // Addresses returns the Multiaddresses associated with the Message_Peer entry func (m *Message_Peer) Addresses() []ma.Multiaddr { if m == nil { @@ -70,3 +131,73 @@ func (m *Message_Peer) Addresses() []ma.Multiaddr { return maddrs } + +type KadKey = key.Key256 + +type AddrInfo struct { + peer.AddrInfo + id *PeerID +} + +var _ kad.NodeInfo[KadKey, ma.Multiaddr] = (*AddrInfo)(nil) + +func NewAddrInfo(ai peer.AddrInfo) *AddrInfo { + return &AddrInfo{ + AddrInfo: ai, + id: NewPeerID(ai.ID), + } +} + +func (ai AddrInfo) Key() KadKey { + return ai.id.Key() +} + +func (ai AddrInfo) String() string { + return ai.id.String() +} + +func (ai AddrInfo) PeerID() *PeerID { + return ai.id +} + +func (ai AddrInfo) ID() kad.NodeID[KadKey] { + return ai.id +} + +func (ai AddrInfo) Addresses() []ma.Multiaddr { + addrs := make([]ma.Multiaddr, len(ai.Addrs)) + copy(addrs, ai.Addrs) + return addrs +} + +type PeerID struct { + peer.ID +} + +var _ kad.NodeID[KadKey] = (*PeerID)(nil) + +func NewPeerID(p peer.ID) *PeerID { + return &PeerID{p} +} + +func (id PeerID) Key() KadKey { + hasher, _ := mhreg.GetHasher(mh.SHA2_256) + hasher.Write([]byte(id.ID)) + return key.NewKey256(hasher.Sum(nil)) +} + +func (id PeerID) NodeID() kad.NodeID[KadKey] { + return &id +} + +func (m *Message) Protocol() string { + return "/test/1.0.0" +} + +func (m *Message) Target() key.Key256 { + return key.NewKey256(m.Key) +} + +func (m *Message) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { + return &Message{} +} diff --git a/v2/router.go b/v2/router.go index 1879795d..2fe469f4 100644 --- a/v2/router.go +++ b/v2/router.go @@ -5,37 +5,145 @@ import ( "fmt" "time" - "github.com/iand/zikade/kademlia" + "github.com/libp2p/go-msgio" + + pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p/core/peer" + "google.golang.org/protobuf/proto" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-msgio/pbio" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/iand/zikade/kademlia" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/protocol" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" ) -var _ kademlia.Router[key.Key256, ma.Multiaddr] = (*DHT)(nil) +type Router struct { + host host.Host +} + +var _ kademlia.Router[key.Key256, ma.Multiaddr] = (*Router)(nil) + +func WriteMsg(s network.Stream, msg protoreflect.ProtoMessage) error { + w := pbio.NewDelimitedWriter(s) + return w.WriteMsg(msg) +} + +func ReadMsg(s network.Stream, msg proto.Message) error { + r := pbio.NewDelimitedReader(s, network.MessageSizeMax) + return r.ReadMsg(msg) +} + +type ProtoKadMessage interface { + proto.Message +} + +type ProtoKadRequestMessage[K kad.Key[K], A kad.Address[A]] interface { + ProtoKadMessage + kad.Request[K, A] +} + +type ProtoKadResponseMessage[K kad.Key[K], A kad.Address[A]] interface { + ProtoKadMessage + kad.Response[K, A] +} + +func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], protoID address.ProtocolID, req kad.Request[key.Key256, ma.Multiaddr]) (kad.Response[key.Key256, ma.Multiaddr], error) { + if err := r.AddNodeInfo(ctx, to, time.Hour); err != nil { + return nil, fmt.Errorf("add node info: %w", err) + } + + protoReq, ok := req.(ProtoKadMessage) + if !ok { + return nil, fmt.Errorf("aaah ProtoKadMessage") + } + + p := peer.ID(to.ID().(nodeID)) + + if len(r.host.Peerstore().Addrs(p)) == 0 { + return nil, fmt.Errorf("aaah ProtoKadMessage") + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + var err error -func (d *DHT) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], protoID address.ProtocolID, req kad.Request[key.Key256, ma.Multiaddr]) (kad.Response[key.Key256, ma.Multiaddr], error) { - s, err := d.host.NewStream(ctx, peer.ID(to.ID().(nodeID)), d.cfg.ProtocolID) + var s network.Stream + s, err = r.host.NewStream(ctx, p, protocol.ID(protoID)) if err != nil { - return nil, fmt.Errorf("new stream: %w", err) + return nil, fmt.Errorf("stream creation: %w", err) } - defer d.logErr(s.Close(), "failed to close stream") + defer s.Close() - return nil, nil + w := pbio.NewDelimitedWriter(s) + reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + + err = w.WriteMsg(protoReq) + if err != nil { + return nil, fmt.Errorf("write message: %w", err) + } + + data, err := reader.ReadMsg() + if err != nil { + return nil, fmt.Errorf("read message: %w", err) + } + protoResp := pb.Message{} + if err = protoResp.Unmarshal(data); err != nil { + return nil, err + } + + for _, info := range protoResp.CloserPeersAddrInfos() { + _ = r.AddNodeInfo(ctx, nodeInfo{ + info: info, + }, time.Hour) + } + + return &protoResp, err } -func (d *DHT) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { - // TODO implement me - panic("implement me") +func (r *Router) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { + p := peer.ID(info.ID().(nodeID)) + + ai := peer.AddrInfo{ + ID: p, + Addrs: info.Addresses(), + } + + // Don't add addresses for self or our connected peers. We have better ones. + if ai.ID == r.host.ID() || + r.host.Network().Connectedness(ai.ID) == network.Connected { + return nil + } + r.host.Peerstore().AddAddrs(ai.ID, ai.Addrs, ttl) + return nil } -func (d *DHT) GetNodeInfo(ctx context.Context, id kad.NodeID[key.Key256]) (kad.NodeInfo[key.Key256, ma.Multiaddr], error) { +func (r *Router) GetNodeInfo(ctx context.Context, id kad.NodeID[key.Key256]) (kad.NodeInfo[key.Key256, ma.Multiaddr], error) { // TODO implement me panic("implement me") } -func (d *DHT) GetClosestNodes(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], target key.Key256) ([]kad.NodeInfo[key.Key256, ma.Multiaddr], error) { - // TODO implement me - panic("implement me") +func (r *Router) GetClosestNodes(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], target key.Key256) ([]kad.NodeInfo[key.Key256, ma.Multiaddr], error) { + resp, err := r.SendMessage(ctx, to, address.ProtocolID(ProtocolIPFS), FindKeyRequest(target)) + if err != nil { + return nil, err + } + return resp.CloserNodes(), nil +} + +func FindKeyRequest(k key.Key256) *pb.Message { + marshalledKey, _ := k.MarshalBinary() + return &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: marshalledKey, + } } diff --git a/v2/router_test.go b/v2/router_test.go new file mode 100644 index 00000000..010a1b1f --- /dev/null +++ b/v2/router_test.go @@ -0,0 +1,47 @@ +package dht + +import ( + "context" + "testing" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +func TestRouter(t *testing.T) { + d := newTestDHT(t) + ctx := context.Background() + + // friend is the first peer we know in the IPFS DHT network (bootstrap node) + friendID, err := peer.Decode("12D3KooWGjgvfDkpuVAoNhd7PRRvMTEG4ZgzHBFURqDe1mqEzAMS") + require.NoError(t, err) + + // multiaddress of friend + friendAddr, err := multiaddr.NewMultiaddr("/ip4/45.32.75.236/tcp/4001") + require.NoError(t, err) + + t.Log("connecting...") + friendInfo := peer.AddrInfo{ID: friendID, Addrs: []multiaddr.Multiaddr{friendAddr}} + err = d.host.Connect(ctx, friendInfo) + require.NoError(t, err) + t.Log("connected") + + // target is the peer we want to find + target, err := peer.Decode("12D3KooWGWcyxn3JfihYiu2HspbE5XHzfgZiLwihVCeyXQQU8yC1") + require.NoError(t, err) + + err = d.kad.AddNodes(ctx, []kad.NodeInfo[key.Key256, multiaddr.Multiaddr]{ + nodeInfo{info: friendInfo}, + }) + require.NoError(t, err) + + targetInfo, err := d.FindPeer(ctx, target) + require.NoError(t, err) + t.Log(targetInfo.ID) + t.Log(targetInfo.Addrs) +} diff --git a/v2/routing.go b/v2/routing.go index 6107a30d..490d583b 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -6,6 +6,12 @@ import ( "fmt" "time" + "golang.org/x/exp/slog" + + "github.com/iand/zikade/core" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/key" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" record "github.com/libp2p/go-libp2p-record" @@ -23,7 +29,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { ctx, span := tracer.Start(ctx, "DHT.FindPeer") defer span.End() - // First check locally. If are or were recently connected to the peer, + // First check locally. If we are or were recently connected to the peer, // return the addresses from our peerstore unless the information doesn't // contain any. switch d.host.Network().Connectedness(id) { @@ -36,9 +42,27 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { // we're } - // TODO reach out to Zikade + target := nodeID(id) - panic("implement me") + var foundNode core.Node[key.Key256, ma.Multiaddr] + fn := func(ctx context.Context, node core.Node[key.Key256, ma.Multiaddr], stats core.QueryStats) error { + slog.Info("visiting node", "id", node.ID()) + if key.Equal(node.ID().Key(), target.Key()) { + foundNode = node + return core.SkipRemaining + } + return nil + } + + _, err := core.Query[key.Key256, ma.Multiaddr](ctx, d.kad, target.Key(), fn) + if err != nil { + return peer.AddrInfo{}, fmt.Errorf("failed to run query: %w", err) + } + + return peer.AddrInfo{ + ID: peer.ID(foundNode.ID().(nodeID)), + Addrs: foundNode.Addresses(), + }, nil } func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { From 3898f39346c6067cf9e9a447e0e9ae573d69c6fd Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 29 Aug 2023 18:32:30 +0200 Subject: [PATCH 02/26] revise protobuf --- v2/handlers.go | 10 +- v2/handlers_test.go | 7 +- v2/pb/.gitignore | 1 + v2/pb/Makefile | 14 +- v2/pb/README.md | 18 + v2/pb/bytestring.go | 52 -- v2/pb/dht.aux.go | 14 +- v2/pb/dht.pb.go | 1137 ++++++++++++------------------------------- v2/pb/dht.proto | 24 +- v2/router.go | 2 +- v2/stream.go | 5 +- v2/stream_test.go | 6 +- 12 files changed, 364 insertions(+), 926 deletions(-) create mode 100644 v2/pb/.gitignore create mode 100644 v2/pb/README.md delete mode 100644 v2/pb/bytestring.go diff --git a/v2/handlers.go b/v2/handlers.go index a0215433..88fcebbc 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -36,7 +36,7 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag // if the remote is asking for us, short-circuit and return us only if target == d.host.ID() { - resp.CloserPeers = []pb.Message_Peer{pb.FromAddrInfo(pstore.PeerInfo(d.host.ID()))} + resp.CloserPeers = []*pb.Message_Peer{pb.FromAddrInfo(pstore.PeerInfo(d.host.ID()))} return resp, nil } @@ -138,7 +138,7 @@ func (d *DHT) handleGetValue(ctx context.Context, remote peer.ID, req *pb.Messag pset, ok := fetched.(*providerSet) if ok { - resp.ProviderPeers = make([]pb.Message_Peer, len(pset.providers)) + resp.ProviderPeers = make([]*pb.Message_Peer, len(pset.providers)) for i, p := range pset.providers { resp.ProviderPeers[i] = pb.FromAddrInfo(p) } @@ -213,7 +213,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me return nil, fmt.Errorf("expected *providerSet value type, got: %T", pset) } - pbProviders := make([]pb.Message_Peer, len(pset.providers)) + pbProviders := make([]*pb.Message_Peer, len(pset.providers)) for i, p := range pset.providers { pbProviders[i] = pb.FromAddrInfo(p) } @@ -230,7 +230,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. -func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []pb.Message_Peer { +func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { ctx, span := tracer.Start(ctx, "DHT.closerPeers") defer span.End() @@ -240,7 +240,7 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 } // pre-allocated the result set slice. - filtered := make([]pb.Message_Peer, 0, len(peers)) + filtered := make([]*pb.Message_Peer, 0, len(peers)) for _, p := range peers { pid := peer.ID(p.(nodeID)) // TODO: type cast diff --git a/v2/handlers_test.go b/v2/handlers_test.go index f38a2ce6..a169072d 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -12,7 +12,8 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" @@ -1172,7 +1173,7 @@ func newAddrInfo(t testing.TB) peer.AddrInfo { } func newAddProviderRequest(key []byte, addrInfos ...peer.AddrInfo) *pb.Message { - providerPeers := make([]pb.Message_Peer, len(addrInfos)) + providerPeers := make([]*pb.Message_Peer, len(addrInfos)) for i, addrInfo := range addrInfos { providerPeers[i] = pb.FromAddrInfo(addrInfo) } @@ -1311,7 +1312,7 @@ func TestDHT_handleAddProvider_empty_provider_peers(t *testing.T) { // construct request req := newAddProviderRequest([]byte("random-key")) - req.ProviderPeers = make([]pb.Message_Peer, 0) // overwrite + req.ProviderPeers = make([]*pb.Message_Peer, 0) // overwrite // do the request _, err := d.handleAddProvider(ctx, newPeerID(t), req) diff --git a/v2/pb/.gitignore b/v2/pb/.gitignore new file mode 100644 index 00000000..c1addd0d --- /dev/null +++ b/v2/pb/.gitignore @@ -0,0 +1 @@ +github.com \ No newline at end of file diff --git a/v2/pb/Makefile b/v2/pb/Makefile index eb14b576..99ff32d1 100644 --- a/v2/pb/Makefile +++ b/v2/pb/Makefile @@ -1,11 +1,11 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) +all: clone reset + protoc --go_out=./ --go_opt=Mgithub.com/libp2p/go-libp2p-record/pb/record.proto=github.com/libp2p/go-libp2p-record/pb ./dht.proto -all: $(GO) +clone: + git clone --depth=1 git@github.com:libp2p/go-libp2p-record.git github.com/libp2p/go-libp2p-record || true -%.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< +reset: + rm dht.pb.go clean: - rm -f *.pb.go - rm -f *.go + rm -rf github.com diff --git a/v2/pb/README.md b/v2/pb/README.md new file mode 100644 index 00000000..afeaab1b --- /dev/null +++ b/v2/pb/README.md @@ -0,0 +1,18 @@ +# Protocol Buffers + +To generate the protobuf definitions run: + +```shell +make +``` + +This command will clone the `libp2p/go-libp2p-record` repository into this +directory (git-ignored) and run the `protoc` command to generate the `dht.pb.go` file for the +`dht.proto` protobuf definition. We need `go-libp2p-record` because `dht.proto` +reference the `Record` protobuf definition from that repository. + +To clean up after you have generated the `dht.pb.go` file, you can run: + +```shell +make clean +``` \ No newline at end of file diff --git a/v2/pb/bytestring.go b/v2/pb/bytestring.go deleted file mode 100644 index 5099a991..00000000 --- a/v2/pb/bytestring.go +++ /dev/null @@ -1,52 +0,0 @@ -package dht_pb - -import ( - "encoding/json" - "fmt" -) - -type byteString string - -func (b *byteString) MarshalTo(data []byte) (int, error) { - return copy(data, *b), nil -} - -func (b *byteString) Size() int { - return len(*b) -} - -func (b *byteString) Marshal() ([]byte, error) { - if b == nil { - return nil, fmt.Errorf("empty byte string") - } - return []byte(*b), nil -} - -func (b *byteString) Unmarshal(data []byte) error { - *b = byteString(data) - return nil -} - -func (b *byteString) Equal(other *byteString) bool { - if b != nil && other != nil { - return *b == *other - } - return b == nil && other == nil -} - -func (b *byteString) MarshalJSON() ([]byte, error) { - if b == nil { - return nil, fmt.Errorf("empty byte string") - } - return json.Marshal([]byte(*b)) -} - -func (b *byteString) UnmarshalJSON(data []byte) error { - var buf []byte - err := json.Unmarshal(data, &buf) - if err != nil { - return err - } - *b = byteString(buf) - return nil -} diff --git a/v2/pb/dht.aux.go b/v2/pb/dht.aux.go index 891ff15e..ae25c093 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/dht.aux.go @@ -1,6 +1,7 @@ package dht_pb import ( + "bytes" "fmt" mh "github.com/multiformats/go-multihash" @@ -14,9 +15,9 @@ import ( ) // FromAddrInfo constructs a Message_Peer from the given peer.AddrInfo -func FromAddrInfo(p peer.AddrInfo) Message_Peer { - mp := Message_Peer{ - Id: byteString(p.ID), +func FromAddrInfo(p peer.AddrInfo) *Message_Peer { + mp := &Message_Peer{ + Id: []byte(p.ID), Addrs: make([][]byte, len(p.Addrs)), } @@ -30,9 +31,8 @@ func FromAddrInfo(p peer.AddrInfo) Message_Peer { // ContainsCloserPeer returns true if the provided peer ID is among the // list of closer peers contained in this message. func (m *Message) ContainsCloserPeer(pid peer.ID) bool { - b := byteString(pid) for _, cp := range m.CloserPeers { - if cp.Id.Equal(&b) { + if bytes.Equal(cp.Id, []byte(pid)) { return true } } @@ -83,7 +83,7 @@ func (m *Message) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { return ParsePeers(closerPeers) } -func PBPeerToPeerInfo(pbp Message_Peer) (*AddrInfo, error) { +func PBPeerToPeerInfo(pbp *Message_Peer) (*AddrInfo, error) { addrs := make([]ma.Multiaddr, 0, len(pbp.Addrs)) for _, a := range pbp.Addrs { addr, err := ma.NewMultiaddrBytes(a) @@ -101,7 +101,7 @@ func PBPeerToPeerInfo(pbp Message_Peer) (*AddrInfo, error) { }), nil } -func ParsePeers(pbps []Message_Peer) []kad.NodeInfo[key.Key256, ma.Multiaddr] { +func ParsePeers(pbps []*Message_Peer) []kad.NodeInfo[key.Key256, ma.Multiaddr] { peers := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], 0, len(pbps)) for _, p := range pbps { pi, err := PBPeerToPeerInfo(p) diff --git a/v2/pb/dht.pb.go b/v2/pb/dht.pb.go index dd317f5e..36e7a925 100644 --- a/v2/pb/dht.pb.go +++ b/v2/pb/dht.pb.go @@ -1,32 +1,29 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 // source: dht.proto package dht_pb import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" pb "github.com/libp2p/go-libp2p-record/pb" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = proto.Marshal - _ = fmt.Errorf - _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - +// MessageType represents the type of RPC being called. Based on the message +// type different fields of this message will be populated. The response +// of a message with a certain type will have the same type. type Message_MessageType int32 const ( @@ -38,30 +35,51 @@ const ( Message_PING Message_MessageType = 5 ) -var Message_MessageType_name = map[int32]string{ - 0: "PUT_VALUE", - 1: "GET_VALUE", - 2: "ADD_PROVIDER", - 3: "GET_PROVIDERS", - 4: "FIND_NODE", - 5: "PING", -} +// Enum value maps for Message_MessageType. +var ( + Message_MessageType_name = map[int32]string{ + 0: "PUT_VALUE", + 1: "GET_VALUE", + 2: "ADD_PROVIDER", + 3: "GET_PROVIDERS", + 4: "FIND_NODE", + 5: "PING", + } + Message_MessageType_value = map[string]int32{ + "PUT_VALUE": 0, + "GET_VALUE": 1, + "ADD_PROVIDER": 2, + "GET_PROVIDERS": 3, + "FIND_NODE": 4, + "PING": 5, + } +) -var Message_MessageType_value = map[string]int32{ - "PUT_VALUE": 0, - "GET_VALUE": 1, - "ADD_PROVIDER": 2, - "GET_PROVIDERS": 3, - "FIND_NODE": 4, - "PING": 5, +func (x Message_MessageType) Enum() *Message_MessageType { + p := new(Message_MessageType) + *p = x + return p } func (x Message_MessageType) String() string { - return proto.EnumName(Message_MessageType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor { + return file_dht_proto_enumTypes[0].Descriptor() +} + +func (Message_MessageType) Type() protoreflect.EnumType { + return &file_dht_proto_enumTypes[0] } +func (x Message_MessageType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Message_MessageType.Descriptor instead. func (Message_MessageType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0, 0} + return file_dht_proto_rawDescGZIP(), []int{0, 0} } type Message_ConnectionType int32 @@ -78,34 +96,63 @@ const ( Message_CANNOT_CONNECT Message_ConnectionType = 3 ) -var Message_ConnectionType_name = map[int32]string{ - 0: "NOT_CONNECTED", - 1: "CONNECTED", - 2: "CAN_CONNECT", - 3: "CANNOT_CONNECT", -} +// Enum value maps for Message_ConnectionType. +var ( + Message_ConnectionType_name = map[int32]string{ + 0: "NOT_CONNECTED", + 1: "CONNECTED", + 2: "CAN_CONNECT", + 3: "CANNOT_CONNECT", + } + Message_ConnectionType_value = map[string]int32{ + "NOT_CONNECTED": 0, + "CONNECTED": 1, + "CAN_CONNECT": 2, + "CANNOT_CONNECT": 3, + } +) -var Message_ConnectionType_value = map[string]int32{ - "NOT_CONNECTED": 0, - "CONNECTED": 1, - "CAN_CONNECT": 2, - "CANNOT_CONNECT": 3, +func (x Message_ConnectionType) Enum() *Message_ConnectionType { + p := new(Message_ConnectionType) + *p = x + return p } func (x Message_ConnectionType) String() string { - return proto.EnumName(Message_ConnectionType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message_ConnectionType) Descriptor() protoreflect.EnumDescriptor { + return file_dht_proto_enumTypes[1].Descriptor() +} + +func (Message_ConnectionType) Type() protoreflect.EnumType { + return &file_dht_proto_enumTypes[1] +} + +func (x Message_ConnectionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use Message_ConnectionType.Descriptor instead. func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0, 1} + return file_dht_proto_rawDescGZIP(), []int{0, 1} } +// Message is the top-level envelope for exchanging +// information with the DHT protocol. type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // defines what type of message it is. Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"` // defines what coral cluster level this query/response belongs to. // in case we want to implement coral's cluster rings in the future. - ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"` + // + // Deprecated: Marked as deprecated in dht.proto. + ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=cluster_level_raw,json=clusterLevelRaw,proto3" json:"cluster_level_raw,omitempty"` // Used to specify the key associated with this message. // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` @@ -114,863 +161,283 @@ type Message struct { Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"` // Used to return peers closer to a key in a query // GET_VALUE, GET_PROVIDERS, FIND_NODE - CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"` + CloserPeers []*Message_Peer `protobuf:"bytes,8,rep,name=closer_peers,json=closerPeers,proto3" json:"closer_peers,omitempty"` // Used to return Providers // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS - ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ProviderPeers []*Message_Peer `protobuf:"bytes,9,rep,name=provider_peers,json=providerPeers,proto3" json:"provider_peers,omitempty"` } -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0} +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_dht_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Message) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_dht_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} - -func (m *Message) XXX_Size() int { - return m.Size() -} - -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_dht_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetType() Message_MessageType { - if m != nil { - return m.Type +func (x *Message) GetType() Message_MessageType { + if x != nil { + return x.Type } return Message_PUT_VALUE } -func (m *Message) GetClusterLevelRaw() int32 { - if m != nil { - return m.ClusterLevelRaw +// Deprecated: Marked as deprecated in dht.proto. +func (x *Message) GetClusterLevelRaw() int32 { + if x != nil { + return x.ClusterLevelRaw } return 0 } -func (m *Message) GetKey() []byte { - if m != nil { - return m.Key +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key } return nil } -func (m *Message) GetRecord() *pb.Record { - if m != nil { - return m.Record +func (x *Message) GetRecord() *pb.Record { + if x != nil { + return x.Record } return nil } -func (m *Message) GetCloserPeers() []Message_Peer { - if m != nil { - return m.CloserPeers +func (x *Message) GetCloserPeers() []*Message_Peer { + if x != nil { + return x.CloserPeers } return nil } -func (m *Message) GetProviderPeers() []Message_Peer { - if m != nil { - return m.ProviderPeers +func (x *Message) GetProviderPeers() []*Message_Peer { + if x != nil { + return x.ProviderPeers } return nil } type Message_Peer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID of a given peer. - Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"` + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // multiaddrs for a given peer Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` // used to signal the sender's connection capabilities to the peer - Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"` } -func (m *Message_Peer) Reset() { *m = Message_Peer{} } -func (m *Message_Peer) String() string { return proto.CompactTextString(m) } -func (*Message_Peer) ProtoMessage() {} -func (*Message_Peer) Descriptor() ([]byte, []int) { - return fileDescriptor_616a434b24c97ff4, []int{0, 0} -} - -func (m *Message_Peer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} - -func (m *Message_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_Peer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Message_Peer) Reset() { + *x = Message_Peer{} + if protoimpl.UnsafeEnabled { + mi := &file_dht_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Message_Peer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Peer.Merge(m, src) +func (x *Message_Peer) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message_Peer) XXX_Size() int { - return m.Size() -} - -func (m *Message_Peer) XXX_DiscardUnknown() { - xxx_messageInfo_Message_Peer.DiscardUnknown(m) -} +func (*Message_Peer) ProtoMessage() {} -var xxx_messageInfo_Message_Peer proto.InternalMessageInfo - -func (m *Message_Peer) GetAddrs() [][]byte { - if m != nil { - return m.Addrs - } - return nil -} - -func (m *Message_Peer) GetConnection() Message_ConnectionType { - if m != nil { - return m.Connection - } - return Message_NOT_CONNECTED -} - -func init() { - proto.RegisterEnum("dht.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value) - proto.RegisterEnum("dht.pb.Message_ConnectionType", Message_ConnectionType_name, Message_ConnectionType_value) - proto.RegisterType((*Message)(nil), "dht.pb.Message") - proto.RegisterType((*Message_Peer)(nil), "dht.pb.Message.Peer") -} - -func init() { proto.RegisterFile("dht.proto", fileDescriptor_616a434b24c97ff4) } - -var fileDescriptor_616a434b24c97ff4 = []byte{ - // 469 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x6f, 0x9b, 0x40, - 0x18, 0xc5, 0x73, 0x80, 0xdd, 0xf8, 0x03, 0x3b, 0xe4, 0x94, 0x01, 0xb9, 0x92, 0x83, 0x3c, 0xd1, - 0xc1, 0x20, 0xd1, 0xb5, 0xaa, 0x6a, 0x03, 0x8d, 0x2c, 0xa5, 0xd8, 0xba, 0x38, 0xe9, 0x68, 0x19, - 0xb8, 0x12, 0x54, 0xd7, 0x87, 0x00, 0xa7, 0xf2, 0xd6, 0x3f, 0x2f, 0x63, 0xe7, 0x0e, 0x51, 0xe5, - 0xa9, 0x7f, 0x46, 0xc5, 0x11, 0x5a, 0xec, 0x25, 0x13, 0xef, 0x7d, 0xf7, 0x7e, 0xe2, 0xdd, 0xa7, - 0x83, 0x4e, 0x74, 0x5f, 0x98, 0x69, 0xc6, 0x0a, 0x86, 0xdb, 0x5c, 0x06, 0x7d, 0x3b, 0x4e, 0x8a, - 0xfb, 0x6d, 0x60, 0x86, 0xec, 0x9b, 0xb5, 0x4e, 0x82, 0xd4, 0x4e, 0xad, 0x98, 0x8d, 0x2a, 0x35, - 0xca, 0x68, 0xc8, 0xb2, 0xc8, 0x4a, 0x03, 0xab, 0x52, 0x15, 0xdb, 0x1f, 0x35, 0x98, 0x98, 0xc5, - 0xcc, 0xe2, 0xe3, 0x60, 0xfb, 0x85, 0x3b, 0x6e, 0xb8, 0xaa, 0xe2, 0xc3, 0x3f, 0x12, 0xbc, 0xfa, - 0x44, 0xf3, 0x7c, 0x15, 0x53, 0x6c, 0x81, 0x54, 0xec, 0x52, 0xaa, 0x21, 0x1d, 0x19, 0x3d, 0xfb, - 0xb5, 0x59, 0xb5, 0x30, 0x9f, 0x8f, 0xeb, 0xef, 0x62, 0x97, 0x52, 0xc2, 0x83, 0xd8, 0x80, 0xb3, - 0x70, 0xbd, 0xcd, 0x0b, 0x9a, 0x5d, 0xd3, 0x07, 0xba, 0x26, 0xab, 0xef, 0x1a, 0xe8, 0xc8, 0x68, - 0x91, 0xe3, 0x31, 0x56, 0x41, 0xfc, 0x4a, 0x77, 0x9a, 0xa0, 0x23, 0x43, 0x21, 0xa5, 0xc4, 0x6f, - 0xa0, 0x5d, 0xf5, 0xd6, 0x44, 0x1d, 0x19, 0xb2, 0x7d, 0x6e, 0xd6, 0xd7, 0x08, 0x4c, 0xc2, 0x15, - 0x79, 0x0e, 0xe0, 0x77, 0x20, 0x87, 0x6b, 0x96, 0xd3, 0x6c, 0x4e, 0x69, 0x96, 0x6b, 0xa7, 0xba, - 0x68, 0xc8, 0xf6, 0xc5, 0x71, 0xbd, 0xf2, 0x70, 0x22, 0x3d, 0x3e, 0x5d, 0x9e, 0x90, 0x66, 0x1c, - 0x7f, 0x80, 0x6e, 0x9a, 0xb1, 0x87, 0x24, 0xaa, 0xf9, 0xce, 0x8b, 0xfc, 0x21, 0xd0, 0xff, 0x81, - 0x40, 0x2a, 0x15, 0x1e, 0x82, 0x90, 0x44, 0x7c, 0x3d, 0xca, 0x04, 0x97, 0xc9, 0x5f, 0x4f, 0x97, - 0x10, 0xec, 0x0a, 0x7a, 0x53, 0x64, 0xc9, 0x26, 0x26, 0x42, 0x12, 0xe1, 0x0b, 0x68, 0xad, 0xa2, - 0x28, 0xcb, 0x35, 0x41, 0x17, 0x0d, 0x85, 0x54, 0x06, 0xbf, 0x07, 0x08, 0xd9, 0x66, 0x43, 0xc3, - 0x22, 0x61, 0x1b, 0x7e, 0xe3, 0x9e, 0x3d, 0x38, 0x6e, 0xe0, 0xfc, 0x4b, 0xf0, 0x1d, 0x37, 0x88, - 0x61, 0x02, 0x72, 0x63, 0xfd, 0xb8, 0x0b, 0x9d, 0xf9, 0xed, 0x62, 0x79, 0x37, 0xbe, 0xbe, 0xf5, - 0xd4, 0x93, 0xd2, 0x5e, 0x79, 0xb5, 0x45, 0x58, 0x05, 0x65, 0xec, 0xba, 0xcb, 0x39, 0x99, 0xdd, - 0x4d, 0x5d, 0x8f, 0xa8, 0x02, 0x3e, 0x87, 0x6e, 0x19, 0xa8, 0x27, 0x37, 0xaa, 0x58, 0x32, 0x1f, - 0xa7, 0xbe, 0xbb, 0xf4, 0x67, 0xae, 0xa7, 0x4a, 0xf8, 0x14, 0xa4, 0xf9, 0xd4, 0xbf, 0x52, 0x5b, - 0xc3, 0xcf, 0xd0, 0x3b, 0x2c, 0x52, 0xd2, 0xfe, 0x6c, 0xb1, 0x74, 0x66, 0xbe, 0xef, 0x39, 0x0b, - 0xcf, 0xad, 0xfe, 0xf8, 0xdf, 0x22, 0x7c, 0x06, 0xb2, 0x33, 0xf6, 0xeb, 0x84, 0x2a, 0x60, 0x0c, - 0x3d, 0x67, 0xec, 0x37, 0x28, 0x55, 0x9c, 0x28, 0x8f, 0xfb, 0x01, 0xfa, 0xb9, 0x1f, 0xa0, 0xdf, - 0xfb, 0x01, 0x0a, 0xda, 0xfc, 0xfd, 0xbd, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x1a, 0xa1, - 0xbe, 0xf7, 0x02, 0x00, 0x00, -} - -func (m *Message) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ClusterLevelRaw != 0 { - i = encodeVarintDht(dAtA, i, uint64(m.ClusterLevelRaw)) - i-- - dAtA[i] = 0x50 - } - if len(m.ProviderPeers) > 0 { - for iNdEx := len(m.ProviderPeers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ProviderPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - if len(m.CloserPeers) > 0 { - for iNdEx := len(m.CloserPeers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CloserPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 +func (x *Message_Peer) ProtoReflect() protoreflect.Message { + mi := &file_dht_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if m.Record != nil { - { - size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintDht(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintDht(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *Message_Peer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_Peer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Message_Peer.ProtoReflect.Descriptor instead. +func (*Message_Peer) Descriptor() ([]byte, []int) { + return file_dht_proto_rawDescGZIP(), []int{0, 0} } -func (m *Message_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Connection != 0 { - i = encodeVarintDht(dAtA, i, uint64(m.Connection)) - i-- - dAtA[i] = 0x18 +func (x *Message_Peer) GetId() []byte { + if x != nil { + return x.Id } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarintDht(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - { - size := m.Id.Size() - i -= size - if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintDht(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintDht(dAtA []byte, offset int, v uint64) int { - offset -= sovDht(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base + return nil } -func (m *Message) Size() (n int) { - if m == nil { - return 0 +func (x *Message_Peer) GetAddrs() [][]byte { + if x != nil { + return x.Addrs } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovDht(uint64(m.Type)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovDht(uint64(l)) - } - if m.Record != nil { - l = m.Record.Size() - n += 1 + l + sovDht(uint64(l)) - } - if len(m.CloserPeers) > 0 { - for _, e := range m.CloserPeers { - l = e.Size() - n += 1 + l + sovDht(uint64(l)) - } - } - if len(m.ProviderPeers) > 0 { - for _, e := range m.ProviderPeers { - l = e.Size() - n += 1 + l + sovDht(uint64(l)) - } - } - if m.ClusterLevelRaw != 0 { - n += 1 + sovDht(uint64(m.ClusterLevelRaw)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return nil } -func (m *Message_Peer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Id.Size() - n += 1 + l + sovDht(uint64(l)) - if len(m.Addrs) > 0 { - for _, b := range m.Addrs { - l = len(b) - n += 1 + l + sovDht(uint64(l)) - } - } - if m.Connection != 0 { - n += 1 + sovDht(uint64(m.Connection)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *Message_Peer) GetConnection() Message_ConnectionType { + if x != nil { + return x.Connection } - return n -} - -func sovDht(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + return Message_NOT_CONNECTED } -func sozDht(x uint64) (n int) { - return sovDht(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +var File_dht_proto protoreflect.FileDescriptor + +var file_dht_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x64, 0x68, 0x74, + 0x2e, 0x70, 0x62, 0x1a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, + 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1b, 0x2e, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x72, 0x61, 0x77, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x61, 0x77, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, + 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x12, 0x37, 0x0a, 0x0c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0b, 0x63, + 0x6c, 0x6f, 0x73, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x0e, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x73, 0x1a, 0x6c, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, + 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x64, 0x68, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x69, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x50, 0x55, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x45, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x44, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, + 0x45, 0x52, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x56, + 0x49, 0x44, 0x45, 0x52, 0x53, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x49, 0x4e, 0x44, 0x5f, + 0x4e, 0x4f, 0x44, 0x45, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, + 0x22, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x41, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, + 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, 0x42, 0x0b, 0x5a, 0x09, 0x2e, 0x2f, 0x3b, + 0x64, 0x68, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *Message) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Message_MessageType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Record == nil { - m.Record = &pb.Record{} - } - if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CloserPeers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CloserPeers = append(m.CloserPeers, Message_Peer{}) - if err := m.CloserPeers[len(m.CloserPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderPeers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProviderPeers = append(m.ProviderPeers, Message_Peer{}) - if err := m.ProviderPeers[len(m.ProviderPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterLevelRaw", wireType) - } - m.ClusterLevelRaw = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterLevelRaw |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDht(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDht - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func (m *Message_Peer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Peer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDht - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDht - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) - copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) - } - m.Connection = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDht - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Connection |= Message_ConnectionType(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDht(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDht - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} +var ( + file_dht_proto_rawDescOnce sync.Once + file_dht_proto_rawDescData = file_dht_proto_rawDesc +) -func skipDht(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDht - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break +func file_dht_proto_rawDescGZIP() []byte { + file_dht_proto_rawDescOnce.Do(func() { + file_dht_proto_rawDescData = protoimpl.X.CompressGZIP(file_dht_proto_rawDescData) + }) + return file_dht_proto_rawDescData +} + +var file_dht_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_dht_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_dht_proto_goTypes = []interface{}{ + (Message_MessageType)(0), // 0: dht.pb.Message.MessageType + (Message_ConnectionType)(0), // 1: dht.pb.Message.ConnectionType + (*Message)(nil), // 2: dht.pb.Message + (*Message_Peer)(nil), // 3: dht.pb.Message.Peer + (*pb.Record)(nil), // 4: record.pb.Record +} +var file_dht_proto_depIdxs = []int32{ + 0, // 0: dht.pb.Message.type:type_name -> dht.pb.Message.MessageType + 4, // 1: dht.pb.Message.record:type_name -> record.pb.Record + 3, // 2: dht.pb.Message.closer_peers:type_name -> dht.pb.Message.Peer + 3, // 3: dht.pb.Message.provider_peers:type_name -> dht.pb.Message.Peer + 1, // 4: dht.pb.Message.Peer.connection:type_name -> dht.pb.Message.ConnectionType + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_dht_proto_init() } +func file_dht_proto_init() { + if File_dht_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_dht_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDht - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDht - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDht - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDht + file_dht_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message_Peer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDht - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_dht_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_dht_proto_goTypes, + DependencyIndexes: file_dht_proto_depIdxs, + EnumInfos: file_dht_proto_enumTypes, + MessageInfos: file_dht_proto_msgTypes, + }.Build() + File_dht_proto = out.File + file_dht_proto_rawDesc = nil + file_dht_proto_goTypes = nil + file_dht_proto_depIdxs = nil } - -var ( - ErrInvalidLengthDht = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDht = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDht = fmt.Errorf("proto: unexpected end of group") -) diff --git a/v2/pb/dht.proto b/v2/pb/dht.proto index 18bfd741..659230fd 100644 --- a/v2/pb/dht.proto +++ b/v2/pb/dht.proto @@ -1,17 +1,17 @@ -// In order to re-generate the golang packages for `Message` you will need... -// 1. Protobuf binary (tested with protoc 3.0.0). - https://github.com/gogo/protobuf/releases -// 2. Gogo Protobuf (tested with gogo 0.3). - https://github.com/gogo/protobuf -// 3. To have cloned `libp2p/go-libp2p-{record,kad-dht}` under the same directory. -// Now from `libp2p/go-libp2p-kad-dht/pb` you can run... -// `protoc --gogo_out=. --proto_path=../../go-libp2p-record/pb/ --proto_path=./ dht.proto` - syntax = "proto3"; package dht.pb; +option go_package = "./;dht_pb"; + import "github.com/libp2p/go-libp2p-record/pb/record.proto"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +// Message is the top-level envelope for exchanging +// information with the DHT protocol. message Message { + + // MessageType represents the type of RPC being called. Based on the message + // type different fields of this message will be populated. The response + // of a message with a certain type will have the same type. enum MessageType { PUT_VALUE = 0; GET_VALUE = 1; @@ -38,7 +38,7 @@ message Message { message Peer { // ID of a given peer. - bytes id = 1 [(gogoproto.customtype) = "byteString", (gogoproto.nullable) = false]; + bytes id = 1; // multiaddrs for a given peer repeated bytes addrs = 2; @@ -52,7 +52,7 @@ message Message { // defines what coral cluster level this query/response belongs to. // in case we want to implement coral's cluster rings in the future. - int32 clusterLevelRaw = 10; + int32 cluster_level_raw = 10 [deprecated = true]; // Used to specify the key associated with this message. // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS @@ -64,9 +64,9 @@ message Message { // Used to return peers closer to a key in a query // GET_VALUE, GET_PROVIDERS, FIND_NODE - repeated Peer closerPeers = 8 [(gogoproto.nullable) = false]; + repeated Peer closer_peers = 8; // Used to return Providers // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS - repeated Peer providerPeers = 9 [(gogoproto.nullable) = false]; + repeated Peer provider_peers = 9; } diff --git a/v2/router.go b/v2/router.go index 2fe469f4..eb6d97bb 100644 --- a/v2/router.go +++ b/v2/router.go @@ -97,7 +97,7 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma return nil, fmt.Errorf("read message: %w", err) } protoResp := pb.Message{} - if err = protoResp.Unmarshal(data); err != nil { + if err = proto.Unmarshal(data, &protoResp); err != nil { return nil, err } diff --git a/v2/stream.go b/v2/stream.go index b5fb1341..2e8671bc 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -17,6 +17,7 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" "golang.org/x/exp/slog" + "google.golang.org/protobuf/proto" "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" @@ -77,6 +78,8 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { return fmt.Errorf("set initial stream deadline: %w", err) } + // not using pbio because it doesn't support a pooled reader that optimizes + // memory allocations. reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) for { // 1. read message from stream @@ -187,7 +190,7 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data defer span.End() var req pb.Message - if err := req.Unmarshal(data); err != nil { + if err := proto.Unmarshal(data, &req); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("err", err.Error())) _ = stats.RecordWithTags(ctx, diff --git a/v2/stream_test.go b/v2/stream_test.go index c10d48e1..b23e05d6 100644 --- a/v2/stream_test.go +++ b/v2/stream_test.go @@ -14,6 +14,7 @@ import ( "github.com/libp2p/go-msgio" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) type testReadWriter struct { @@ -35,12 +36,11 @@ func (trw testReadWriter) ReadMsg() (*pb.Message, error) { } resp := &pb.Message{} - err = resp.Unmarshal(msg) - return resp, err + return resp, proto.Unmarshal(msg, resp) } func (trw testReadWriter) WriteMsg(msg *pb.Message) error { - data, err := msg.Marshal() + data, err := proto.Marshal(msg) if err != nil { return err } From d31e9c993d1e8a26b8cc0ac24f4097ce104dcb93 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 30 Aug 2023 14:08:15 +0200 Subject: [PATCH 03/26] remove gogo protobuf dependency --- v2/go.mod | 6 +++--- v2/handlers_test.go | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/v2/go.mod b/v2/go.mod index 32888ee7..96a5ce32 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -4,7 +4,6 @@ go 1.20 require ( github.com/benbjohnson/clock v1.3.5 - github.com/gogo/protobuf v1.3.2 github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb github.com/ipfs/boxo v0.12.0 @@ -17,6 +16,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multihash v0.2.3 github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 @@ -24,6 +24,7 @@ require ( go.opentelemetry.io/otel/trace v1.16.0 go.uber.org/zap/exp v0.1.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + google.golang.org/protobuf v1.31.0 ) require ( @@ -42,6 +43,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect @@ -78,7 +80,6 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nxadm/tail v1.4.8 // indirect @@ -111,7 +112,6 @@ require ( golang.org/x/sys v0.11.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect - google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index a169072d..bc7c336a 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -12,8 +12,6 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" @@ -26,6 +24,7 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) var rng = rand.New(rand.NewSource(1337)) From d2cfda6c9af29769de7aff08c757b550bc5d55ae Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 30 Aug 2023 14:30:23 +0200 Subject: [PATCH 04/26] WIP --- v2/router.go | 29 ++++++++++++++++++++++++++--- v2/router_test.go | 5 +++++ v2/routing.go | 2 +- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/v2/router.go b/v2/router.go index eb6d97bb..249f1822 100644 --- a/v2/router.go +++ b/v2/router.go @@ -65,7 +65,19 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma return nil, fmt.Errorf("aaah ProtoKadMessage") } - p := peer.ID(to.ID().(nodeID)) + var p peer.ID + nid, ok := to.ID().(nodeID) + if !ok { + ai, ok := to.(*pb.AddrInfo) + if !ok { + naddr := to.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) + p = naddr.ID().(*pb.PeerID).ID + } else { + p = ai.AddrInfo.ID + } + } else { + p = peer.ID(nid) + } if len(r.host.Peerstore().Addrs(p)) == 0 { return nil, fmt.Errorf("aaah ProtoKadMessage") @@ -111,8 +123,19 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma } func (r *Router) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { - p := peer.ID(info.ID().(nodeID)) - + var p peer.ID + nid, ok := info.ID().(nodeID) + if !ok { + ai, ok := info.(*pb.AddrInfo) + if !ok { + naddr := info.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) + p = naddr.ID().(*pb.PeerID).ID + } else { + p = ai.AddrInfo.ID + } + } else { + p = peer.ID(nid) + } ai := peer.AddrInfo{ ID: p, Addrs: info.Addresses(), diff --git a/v2/router_test.go b/v2/router_test.go index 010a1b1f..0add0ed7 100644 --- a/v2/router_test.go +++ b/v2/router_test.go @@ -3,6 +3,7 @@ package dht import ( "context" "testing" + "time" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" @@ -35,10 +36,14 @@ func TestRouter(t *testing.T) { target, err := peer.Decode("12D3KooWGWcyxn3JfihYiu2HspbE5XHzfgZiLwihVCeyXQQU8yC1") require.NoError(t, err) + // Error -> delay between AddNodes and added to routing table err = d.kad.AddNodes(ctx, []kad.NodeInfo[key.Key256, multiaddr.Multiaddr]{ nodeInfo{info: friendInfo}, }) require.NoError(t, err) + time.Sleep(100 * time.Millisecond) + + d.rt.AddNode(nodeID(friendInfo.ID)) targetInfo, err := d.FindPeer(ctx, target) require.NoError(t, err) diff --git a/v2/routing.go b/v2/routing.go index 490d583b..7f21b840 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -54,7 +54,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { return nil } - _, err := core.Query[key.Key256, ma.Multiaddr](ctx, d.kad, target.Key(), fn) + _, err := d.kad.Query(ctx, target.Key(), fn) if err != nil { return peer.AddrInfo{}, fmt.Errorf("failed to run query: %w", err) } From f2f22a3b8e0376c5c33b20c8715eaee275cfc609 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 30 Aug 2023 17:44:40 +0200 Subject: [PATCH 05/26] add kadt package --- v2/{pb => }/.gitignore | 0 v2/dht.go | 10 +- v2/handlers.go | 6 +- v2/handlers_test.go | 15 +-- v2/kad.go | 59 --------- v2/kadt/kadt.go | 61 +++++++++ v2/pb/Makefile | 12 +- v2/pb/{dht.aux.go => msg.aux.go} | 140 +++++---------------- v2/pb/{dht.aux_test.go => msg.aux_test.go} | 2 +- v2/pb/{dht.pb.go => msg.pb.go} | 110 ++++++++-------- v2/pb/{dht.proto => msg.proto} | 2 +- 11 files changed, 181 insertions(+), 236 deletions(-) rename v2/{pb => }/.gitignore (100%) delete mode 100644 v2/kad.go create mode 100644 v2/kadt/kadt.go rename v2/pb/{dht.aux.go => msg.aux.go} (51%) rename v2/pb/{dht.aux_test.go => msg.aux_test.go} (93%) rename v2/pb/{dht.pb.go => msg.pb.go} (85%) rename v2/pb/{dht.proto => msg.proto} (98%) diff --git a/v2/pb/.gitignore b/v2/.gitignore similarity index 100% rename from v2/pb/.gitignore rename to v2/.gitignore diff --git a/v2/dht.go b/v2/dht.go index 1883a580..46f045c4 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -1,12 +1,14 @@ package dht import ( + "crypto/sha256" "fmt" "io" "sync" "github.com/iand/zikade/kademlia" "github.com/ipfs/go-datastore/trace" + kadt "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" @@ -67,7 +69,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { log: cfg.Logger, } - nid := nodeID(d.host.ID()) + nid := kadt.PeerID(d.host.ID()) // Use the configured routing table if it was provided if cfg.RoutingTable != nil { @@ -255,3 +257,9 @@ func (d *DHT) logErr(err error, msg string) { d.log.Warn(msg, "err", err.Error()) } + +// newSHA256Key SHA256 hashes the given bytes and returns a new 256-bit key. +func newSHA256Key(data []byte) key.Key256 { + h := sha256.Sum256(data) + return key.NewKey256(h[:]) +} diff --git a/v2/handlers.go b/v2/handlers.go index 88fcebbc..7c135659 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" + kadt "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + ds "github.com/ipfs/go-datastore" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" @@ -41,7 +43,7 @@ func (d *DHT) handleFindPeer(ctx context.Context, remote peer.ID, req *pb.Messag } // gather closer peers that we know - resp.CloserPeers = d.closerPeers(ctx, remote, nodeID(target).Key()) + resp.CloserPeers = d.closerPeers(ctx, remote, kadt.PeerID(target).Key()) // if we happen to know the target peers addresses (e.g., although we are // far away in the keyspace), we add the peer to the result set. This means @@ -242,7 +244,7 @@ func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256 // pre-allocated the result set slice. filtered := make([]*pb.Message_Peer, 0, len(peers)) for _, p := range peers { - pid := peer.ID(p.(nodeID)) // TODO: type cast + pid := peer.ID(p.(kadt.PeerID)) // TODO: type cast // check for own peer ID if pid == d.host.ID() { diff --git a/v2/handlers_test.go b/v2/handlers_test.go index bc7c336a..113b8691 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -16,6 +16,7 @@ import ( "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" @@ -81,7 +82,7 @@ func fillRoutingTable(t testing.TB, d *DHT) { pid := newPeerID(t) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // craft network address for peer a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) @@ -122,7 +123,7 @@ func BenchmarkDHT_handleFindPeer(b *testing.B) { pid := newPeerID(b) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // keep track of peer peers = append(peers, pid) @@ -174,7 +175,7 @@ func TestDHT_handleFindPeer_happy_path(t *testing.T) { // closer peers. This means we can't assert for exactly 20 closer peers // below. if i > 0 { - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) } // keep track of peer @@ -208,7 +209,7 @@ func TestDHT_handleFindPeer_self_in_routing_table(t *testing.T) { // a case that shouldn't happen d := newTestDHT(t) - d.rt.AddNode(nodeID(d.host.ID())) + d.rt.AddNode(kadt.PeerID(d.host.ID())) req := &pb.Message{ Type: pb.Message_FIND_NODE, @@ -253,7 +254,7 @@ func TestDHT_handleFindPeer_unknown_addresses_but_in_routing_table(t *testing.T) pid := newPeerID(t) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // keep track of peer peers[i] = pid @@ -322,7 +323,7 @@ func TestDHT_handleFindPeer_request_for_self(t *testing.T) { pid := newPeerID(t) // add peer to routing table - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) // keep track of peer peers[i] = pid @@ -378,7 +379,7 @@ func TestDHT_handleFindPeer_request_for_known_but_far_peer(t *testing.T) { // don't add first peer to routing table -> the one we're asking for // don't add second peer -> the one that's requesting if i > 1 { - d.rt.AddNode(nodeID(pid)) + d.rt.AddNode(kadt.PeerID(pid)) } } diff --git a/v2/kad.go b/v2/kad.go deleted file mode 100644 index 77d15cb7..00000000 --- a/v2/kad.go +++ /dev/null @@ -1,59 +0,0 @@ -package dht - -import ( - "crypto/sha256" - - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" -) - -// nodeID is a type alias for peer.ID that implements the kad.NodeID interface. -// This means we can use nodeID for any operation that interfaces with -// go-kademlia. -type nodeID peer.ID - -// assertion that nodeID implements the kad.NodeID interface -var _ kad.NodeID[key.Key256] = nodeID("") - -// Key returns the Kademlia key of nodeID. The amino DHT operates on SHA256 -// hashes of, in this case, peer.IDs. This means this Key method takes -// the peer.ID, hashes it and constructs a 256-bit key. -func (p nodeID) Key() key.Key256 { - return newSHA256Key([]byte(p)) -} - -// String calls String on the underlying peer.ID and returns a string like -// QmFoo or 12D3KooBar. -func (p nodeID) String() string { - return peer.ID(p).String() -} - -// nodeInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo -// interface. This means we can use nodeInfo for any operation that interfaces -// with go-kademlia. -type nodeInfo struct { - info peer.AddrInfo -} - -// assertion that nodeInfo implements the kad.NodeInfo interface -var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*nodeInfo)(nil) - -// ID returns the kad.NodeID of this peer's information struct. -func (ai nodeInfo) ID() kad.NodeID[key.Key256] { - return nodeID(ai.info.ID) -} - -// Addresses returns all Multiaddresses of this peer. -func (ai nodeInfo) Addresses() []ma.Multiaddr { - addrs := make([]ma.Multiaddr, len(ai.info.Addrs)) - copy(addrs, ai.info.Addrs) - return addrs -} - -// newSHA256Key SHA256 hashes the given bytes and returns a new 256-bit key. -func newSHA256Key(data []byte) key.Key256 { - h := sha256.Sum256(data) - return key.NewKey256(h[:]) -} diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go new file mode 100644 index 00000000..b2d586f9 --- /dev/null +++ b/v2/kadt/kadt.go @@ -0,0 +1,61 @@ +// Package kadt contains the kademlia types for interacting with go-kademlia. +// It would be nicer to have these types in the top-level DHT package, however +// we also need these types in, e.g., the dht_pb package to let the Message +// type conform to certain interfaces. +package kadt + +import ( + "crypto/sha256" + + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +// PeerID is a type alias for peer.ID that implements the kad.NodeID interface. +// This means we can use PeerID for any operation that interfaces with +// go-kademlia. +type PeerID peer.ID + +// assertion that PeerID implements the kad.NodeID interface +var _ kad.NodeID[key.Key256] = PeerID("") + +// Key returns the Kademlia key of PeerID. The amino DHT operates on SHA256 +// hashes of, in this case, peer.IDs. This means this Key method takes +// the peer.ID, hashes it and constructs a 256-bit key. +func (p PeerID) Key() key.Key256 { + h := sha256.Sum256([]byte(p)) + return key.NewKey256(h[:]) +} + +// String calls String on the underlying peer.ID and returns a string like +// QmFoo or 12D3KooBar. +func (p PeerID) String() string { + return peer.ID(p).String() +} + +// AddrInfo is a type that wraps peer.AddrInfo and implements the kad.NodeInfo +// interface. This means we can use AddrInfo for any operation that interfaces +// with go-kademlia. +// +// A more accurate name would be PeerInfo or NodeInfo. However, for consistency +// and coherence with [peer.AddrInfo] we also name it AddrInfo. +type AddrInfo struct { + Info peer.AddrInfo +} + +// assertion that AddrInfo implements the kad.NodeInfo interface +var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*AddrInfo)(nil) + +// ID returns the kad.NodeID of this peer's information struct. +func (ai AddrInfo) ID() kad.NodeID[key.Key256] { + return PeerID(ai.Info.ID) +} + +// Addresses returns all Multiaddresses of this peer. +func (ai AddrInfo) Addresses() []ma.Multiaddr { + addrs := make([]ma.Multiaddr, len(ai.Info.Addrs)) + copy(addrs, ai.Info.Addrs) + return addrs +} diff --git a/v2/pb/Makefile b/v2/pb/Makefile index 99ff32d1..0d5b6d67 100644 --- a/v2/pb/Makefile +++ b/v2/pb/Makefile @@ -1,11 +1,15 @@ -all: clone reset - protoc --go_out=./ --go_opt=Mgithub.com/libp2p/go-libp2p-record/pb/record.proto=github.com/libp2p/go-libp2p-record/pb ./dht.proto +all: clone reset build clean + +build: + protoc --go_out=./ --go_opt=Mgithub.com/libp2p/go-libp2p-record/pb/record.proto=github.com/libp2p/go-libp2p-record/pb ./msg.proto clone: - git clone --depth=1 git@github.com:libp2p/go-libp2p-record.git github.com/libp2p/go-libp2p-record || true + git clone --depth 1 --branch v0.2.0 git@github.com:libp2p/go-libp2p-record.git github.com/libp2p/go-libp2p-record || true reset: - rm dht.pb.go + rm msg.pb.go || true clean: rm -rf github.com + +.PHONY: all build clone reset clean \ No newline at end of file diff --git a/v2/pb/dht.aux.go b/v2/pb/msg.aux.go similarity index 51% rename from v2/pb/dht.aux.go rename to v2/pb/msg.aux.go index ae25c093..14b7f6d0 100644 --- a/v2/pb/dht.aux.go +++ b/v2/pb/msg.aux.go @@ -1,20 +1,35 @@ -package dht_pb +package pb import ( "bytes" - "fmt" + "crypto/sha256" - mh "github.com/multiformats/go-multihash" - mhreg "github.com/multiformats/go-multihash/core" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" - - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" ) -// FromAddrInfo constructs a Message_Peer from the given peer.AddrInfo +// this file contains auxiliary methods to augment the protobuf generated types. +// It is used to let these types conform to interfaces or add convenience methods. + +var _ kad.Request[key.Key256, ma.Multiaddr] = (*Message)(nil) + +func (m *Message) Target() key.Key256 { + b := sha256.Sum256(m.Key) + return key.NewKey256(b[:]) +} + +func (m *Message) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { + return &Message{ + Type: m.Type, + Key: m.Key, + } +} + +// FromAddrInfo constructs a [Message_Peer] from the given [peer.AddrInfo]. func FromAddrInfo(p peer.AddrInfo) *Message_Peer { mp := &Message_Peer{ Id: []byte(p.ID), @@ -76,40 +91,19 @@ func (m *Message) CloserPeersAddrInfos() []peer.AddrInfo { } func (m *Message) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { - closerPeers := m.GetCloserPeers() - if closerPeers == nil { - return []kad.NodeInfo[key.Key256, ma.Multiaddr]{} + if m == nil { + return nil } - return ParsePeers(closerPeers) -} -func PBPeerToPeerInfo(pbp *Message_Peer) (*AddrInfo, error) { - addrs := make([]ma.Multiaddr, 0, len(pbp.Addrs)) - for _, a := range pbp.Addrs { - addr, err := ma.NewMultiaddrBytes(a) - if err == nil { - addrs = append(addrs, addr) - } - } - if len(addrs) == 0 { - return nil, fmt.Errorf("asdfsdf") + infos := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], 0, len(m.CloserPeers)) + for _, p := range m.CloserPeers { + infos = append(infos, &kadt.AddrInfo{Info: peer.AddrInfo{ + ID: peer.ID(p.Id), + Addrs: p.Addresses(), + }}) } - return NewAddrInfo(peer.AddrInfo{ - ID: peer.ID(pbp.Id), - Addrs: addrs, - }), nil -} - -func ParsePeers(pbps []*Message_Peer) []kad.NodeInfo[key.Key256, ma.Multiaddr] { - peers := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], 0, len(pbps)) - for _, p := range pbps { - pi, err := PBPeerToPeerInfo(p) - if err == nil { - peers = append(peers, pi) - } - } - return peers + return infos } // Addresses returns the Multiaddresses associated with the Message_Peer entry @@ -131,73 +125,3 @@ func (m *Message_Peer) Addresses() []ma.Multiaddr { return maddrs } - -type KadKey = key.Key256 - -type AddrInfo struct { - peer.AddrInfo - id *PeerID -} - -var _ kad.NodeInfo[KadKey, ma.Multiaddr] = (*AddrInfo)(nil) - -func NewAddrInfo(ai peer.AddrInfo) *AddrInfo { - return &AddrInfo{ - AddrInfo: ai, - id: NewPeerID(ai.ID), - } -} - -func (ai AddrInfo) Key() KadKey { - return ai.id.Key() -} - -func (ai AddrInfo) String() string { - return ai.id.String() -} - -func (ai AddrInfo) PeerID() *PeerID { - return ai.id -} - -func (ai AddrInfo) ID() kad.NodeID[KadKey] { - return ai.id -} - -func (ai AddrInfo) Addresses() []ma.Multiaddr { - addrs := make([]ma.Multiaddr, len(ai.Addrs)) - copy(addrs, ai.Addrs) - return addrs -} - -type PeerID struct { - peer.ID -} - -var _ kad.NodeID[KadKey] = (*PeerID)(nil) - -func NewPeerID(p peer.ID) *PeerID { - return &PeerID{p} -} - -func (id PeerID) Key() KadKey { - hasher, _ := mhreg.GetHasher(mh.SHA2_256) - hasher.Write([]byte(id.ID)) - return key.NewKey256(hasher.Sum(nil)) -} - -func (id PeerID) NodeID() kad.NodeID[KadKey] { - return &id -} - -func (m *Message) Protocol() string { - return "/test/1.0.0" -} - -func (m *Message) Target() key.Key256 { - return key.NewKey256(m.Key) -} - -func (m *Message) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { - return &Message{} -} diff --git a/v2/pb/dht.aux_test.go b/v2/pb/msg.aux_test.go similarity index 93% rename from v2/pb/dht.aux_test.go rename to v2/pb/msg.aux_test.go index f092e8fd..dc3bd016 100644 --- a/v2/pb/dht.aux_test.go +++ b/v2/pb/msg.aux_test.go @@ -1,4 +1,4 @@ -package dht_pb +package pb import ( "testing" diff --git a/v2/pb/dht.pb.go b/v2/pb/msg.pb.go similarity index 85% rename from v2/pb/dht.pb.go rename to v2/pb/msg.pb.go index 36e7a925..0acb59b6 100644 --- a/v2/pb/dht.pb.go +++ b/v2/pb/msg.pb.go @@ -2,16 +2,17 @@ // versions: // protoc-gen-go v1.30.0 // protoc v3.21.12 -// source: dht.proto +// source: msg.proto -package dht_pb +package pb import ( + reflect "reflect" + sync "sync" + pb "github.com/libp2p/go-libp2p-record/pb" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const ( @@ -66,11 +67,11 @@ func (x Message_MessageType) String() string { } func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor { - return file_dht_proto_enumTypes[0].Descriptor() + return file_msg_proto_enumTypes[0].Descriptor() } func (Message_MessageType) Type() protoreflect.EnumType { - return &file_dht_proto_enumTypes[0] + return &file_msg_proto_enumTypes[0] } func (x Message_MessageType) Number() protoreflect.EnumNumber { @@ -79,7 +80,7 @@ func (x Message_MessageType) Number() protoreflect.EnumNumber { // Deprecated: Use Message_MessageType.Descriptor instead. func (Message_MessageType) EnumDescriptor() ([]byte, []int) { - return file_dht_proto_rawDescGZIP(), []int{0, 0} + return file_msg_proto_rawDescGZIP(), []int{0, 0} } type Message_ConnectionType int32 @@ -123,11 +124,11 @@ func (x Message_ConnectionType) String() string { } func (Message_ConnectionType) Descriptor() protoreflect.EnumDescriptor { - return file_dht_proto_enumTypes[1].Descriptor() + return file_msg_proto_enumTypes[1].Descriptor() } func (Message_ConnectionType) Type() protoreflect.EnumType { - return &file_dht_proto_enumTypes[1] + return &file_msg_proto_enumTypes[1] } func (x Message_ConnectionType) Number() protoreflect.EnumNumber { @@ -136,7 +137,7 @@ func (x Message_ConnectionType) Number() protoreflect.EnumNumber { // Deprecated: Use Message_ConnectionType.Descriptor instead. func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) { - return file_dht_proto_rawDescGZIP(), []int{0, 1} + return file_msg_proto_rawDescGZIP(), []int{0, 1} } // Message is the top-level envelope for exchanging @@ -151,7 +152,7 @@ type Message struct { // defines what coral cluster level this query/response belongs to. // in case we want to implement coral's cluster rings in the future. // - // Deprecated: Marked as deprecated in dht.proto. + // Deprecated: Marked as deprecated in msg.proto. ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=cluster_level_raw,json=clusterLevelRaw,proto3" json:"cluster_level_raw,omitempty"` // Used to specify the key associated with this message. // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS @@ -170,7 +171,7 @@ type Message struct { func (x *Message) Reset() { *x = Message{} if protoimpl.UnsafeEnabled { - mi := &file_dht_proto_msgTypes[0] + mi := &file_msg_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -183,7 +184,7 @@ func (x *Message) String() string { func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { - mi := &file_dht_proto_msgTypes[0] + mi := &file_msg_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -196,7 +197,7 @@ func (x *Message) ProtoReflect() protoreflect.Message { // Deprecated: Use Message.ProtoReflect.Descriptor instead. func (*Message) Descriptor() ([]byte, []int) { - return file_dht_proto_rawDescGZIP(), []int{0} + return file_msg_proto_rawDescGZIP(), []int{0} } func (x *Message) GetType() Message_MessageType { @@ -206,7 +207,7 @@ func (x *Message) GetType() Message_MessageType { return Message_PUT_VALUE } -// Deprecated: Marked as deprecated in dht.proto. +// Deprecated: Marked as deprecated in msg.proto. func (x *Message) GetClusterLevelRaw() int32 { if x != nil { return x.ClusterLevelRaw @@ -258,7 +259,7 @@ type Message_Peer struct { func (x *Message_Peer) Reset() { *x = Message_Peer{} if protoimpl.UnsafeEnabled { - mi := &file_dht_proto_msgTypes[1] + mi := &file_msg_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -271,7 +272,7 @@ func (x *Message_Peer) String() string { func (*Message_Peer) ProtoMessage() {} func (x *Message_Peer) ProtoReflect() protoreflect.Message { - mi := &file_dht_proto_msgTypes[1] + mi := &file_msg_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -284,7 +285,7 @@ func (x *Message_Peer) ProtoReflect() protoreflect.Message { // Deprecated: Use Message_Peer.ProtoReflect.Descriptor instead. func (*Message_Peer) Descriptor() ([]byte, []int) { - return file_dht_proto_rawDescGZIP(), []int{0, 0} + return file_msg_proto_rawDescGZIP(), []int{0, 0} } func (x *Message_Peer) GetId() []byte { @@ -308,10 +309,10 @@ func (x *Message_Peer) GetConnection() Message_ConnectionType { return Message_NOT_CONNECTED } -var File_dht_proto protoreflect.FileDescriptor +var File_msg_proto protoreflect.FileDescriptor -var file_dht_proto_rawDesc = []byte{ - 0x0a, 0x09, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x64, 0x68, 0x74, +var file_msg_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x6d, 0x73, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x64, 0x68, 0x74, 0x2e, 0x70, 0x62, 0x1a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, @@ -352,32 +353,35 @@ var file_dht_proto_rawDesc = []byte{ 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x41, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, - 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, 0x42, 0x0b, 0x5a, 0x09, 0x2e, 0x2f, 0x3b, - 0x64, 0x68, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, 0x42, 0x08, 0x5a, 0x06, 0x2e, 0x2f, 0x3b, + 0x64, 0x68, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - file_dht_proto_rawDescOnce sync.Once - file_dht_proto_rawDescData = file_dht_proto_rawDesc + file_msg_proto_rawDescOnce sync.Once + file_msg_proto_rawDescData = file_msg_proto_rawDesc ) -func file_dht_proto_rawDescGZIP() []byte { - file_dht_proto_rawDescOnce.Do(func() { - file_dht_proto_rawDescData = protoimpl.X.CompressGZIP(file_dht_proto_rawDescData) +func file_msg_proto_rawDescGZIP() []byte { + file_msg_proto_rawDescOnce.Do(func() { + file_msg_proto_rawDescData = protoimpl.X.CompressGZIP(file_msg_proto_rawDescData) }) - return file_dht_proto_rawDescData + return file_msg_proto_rawDescData } -var file_dht_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_dht_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_dht_proto_goTypes = []interface{}{ - (Message_MessageType)(0), // 0: dht.pb.Message.MessageType - (Message_ConnectionType)(0), // 1: dht.pb.Message.ConnectionType - (*Message)(nil), // 2: dht.pb.Message - (*Message_Peer)(nil), // 3: dht.pb.Message.Peer - (*pb.Record)(nil), // 4: record.pb.Record -} -var file_dht_proto_depIdxs = []int32{ +var ( + file_msg_proto_enumTypes = make([]protoimpl.EnumInfo, 2) + file_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 2) + file_msg_proto_goTypes = []interface{}{ + (Message_MessageType)(0), // 0: dht.pb.Message.MessageType + (Message_ConnectionType)(0), // 1: dht.pb.Message.ConnectionType + (*Message)(nil), // 2: dht.pb.Message + (*Message_Peer)(nil), // 3: dht.pb.Message.Peer + (*pb.Record)(nil), // 4: record.pb.Record + } +) + +var file_msg_proto_depIdxs = []int32{ 0, // 0: dht.pb.Message.type:type_name -> dht.pb.Message.MessageType 4, // 1: dht.pb.Message.record:type_name -> record.pb.Record 3, // 2: dht.pb.Message.closer_peers:type_name -> dht.pb.Message.Peer @@ -390,13 +394,13 @@ var file_dht_proto_depIdxs = []int32{ 0, // [0:5] is the sub-list for field type_name } -func init() { file_dht_proto_init() } -func file_dht_proto_init() { - if File_dht_proto != nil { +func init() { file_msg_proto_init() } +func file_msg_proto_init() { + if File_msg_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_dht_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_msg_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Message); i { case 0: return &v.state @@ -408,7 +412,7 @@ func file_dht_proto_init() { return nil } } - file_dht_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_msg_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Message_Peer); i { case 0: return &v.state @@ -425,19 +429,19 @@ func file_dht_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_dht_proto_rawDesc, + RawDescriptor: file_msg_proto_rawDesc, NumEnums: 2, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, - GoTypes: file_dht_proto_goTypes, - DependencyIndexes: file_dht_proto_depIdxs, - EnumInfos: file_dht_proto_enumTypes, - MessageInfos: file_dht_proto_msgTypes, + GoTypes: file_msg_proto_goTypes, + DependencyIndexes: file_msg_proto_depIdxs, + EnumInfos: file_msg_proto_enumTypes, + MessageInfos: file_msg_proto_msgTypes, }.Build() - File_dht_proto = out.File - file_dht_proto_rawDesc = nil - file_dht_proto_goTypes = nil - file_dht_proto_depIdxs = nil + File_msg_proto = out.File + file_msg_proto_rawDesc = nil + file_msg_proto_goTypes = nil + file_msg_proto_depIdxs = nil } diff --git a/v2/pb/dht.proto b/v2/pb/msg.proto similarity index 98% rename from v2/pb/dht.proto rename to v2/pb/msg.proto index 659230fd..08d249bb 100644 --- a/v2/pb/dht.proto +++ b/v2/pb/msg.proto @@ -1,7 +1,7 @@ syntax = "proto3"; package dht.pb; -option go_package = "./;dht_pb"; +option go_package = "./;dht"; import "github.com/libp2p/go-libp2p-record/pb/record.proto"; From 7e8f1dfea9334c377fa08e14a1727b1010114b62 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 30 Aug 2023 17:44:50 +0200 Subject: [PATCH 06/26] Add routing test --- v2/router.go | 34 ++++++++++++------------------- v2/router_test.go | 51 ---------------------------------------------- v2/routing.go | 10 +++++++-- v2/routing_test.go | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 74 deletions(-) diff --git a/v2/router.go b/v2/router.go index 249f1822..bd08ab97 100644 --- a/v2/router.go +++ b/v2/router.go @@ -5,10 +5,12 @@ import ( "fmt" "time" - "github.com/libp2p/go-msgio" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" pb "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-msgio" + "github.com/libp2p/go-libp2p/core/peer" "google.golang.org/protobuf/proto" @@ -66,15 +68,10 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma } var p peer.ID - nid, ok := to.ID().(nodeID) + nid, ok := to.ID().(kadt.PeerID) if !ok { - ai, ok := to.(*pb.AddrInfo) - if !ok { - naddr := to.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) - p = naddr.ID().(*pb.PeerID).ID - } else { - p = ai.AddrInfo.ID - } + naddr := to.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) + p = peer.ID(naddr.ID().(kadt.PeerID)) } else { p = peer.ID(nid) } @@ -114,8 +111,8 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma } for _, info := range protoResp.CloserPeersAddrInfos() { - _ = r.AddNodeInfo(ctx, nodeInfo{ - info: info, + _ = r.AddNodeInfo(ctx, kadt.AddrInfo{ + Info: info, }, time.Hour) } @@ -124,15 +121,10 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma func (r *Router) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { var p peer.ID - nid, ok := info.ID().(nodeID) + nid, ok := info.ID().(kadt.PeerID) if !ok { - ai, ok := info.(*pb.AddrInfo) - if !ok { - naddr := info.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) - p = naddr.ID().(*pb.PeerID).ID - } else { - p = ai.AddrInfo.ID - } + naddr := info.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) + p = peer.ID(naddr.ID().(kadt.PeerID)) } else { p = peer.ID(nid) } @@ -151,8 +143,8 @@ func (r *Router) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, } func (r *Router) GetNodeInfo(ctx context.Context, id kad.NodeID[key.Key256]) (kad.NodeInfo[key.Key256, ma.Multiaddr], error) { - // TODO implement me - panic("implement me") + pid := peer.ID(id.(kadt.PeerID)) + return kadt.AddrInfo{Info: r.host.Peerstore().PeerInfo(pid)}, nil } func (r *Router) GetClosestNodes(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], target key.Key256) ([]kad.NodeInfo[key.Key256, ma.Multiaddr], error) { diff --git a/v2/router_test.go b/v2/router_test.go index 0add0ed7..745a87dc 100644 --- a/v2/router_test.go +++ b/v2/router_test.go @@ -1,52 +1 @@ package dht - -import ( - "context" - "testing" - "time" - - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" - - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" -) - -func TestRouter(t *testing.T) { - d := newTestDHT(t) - ctx := context.Background() - - // friend is the first peer we know in the IPFS DHT network (bootstrap node) - friendID, err := peer.Decode("12D3KooWGjgvfDkpuVAoNhd7PRRvMTEG4ZgzHBFURqDe1mqEzAMS") - require.NoError(t, err) - - // multiaddress of friend - friendAddr, err := multiaddr.NewMultiaddr("/ip4/45.32.75.236/tcp/4001") - require.NoError(t, err) - - t.Log("connecting...") - friendInfo := peer.AddrInfo{ID: friendID, Addrs: []multiaddr.Multiaddr{friendAddr}} - err = d.host.Connect(ctx, friendInfo) - require.NoError(t, err) - t.Log("connected") - - // target is the peer we want to find - target, err := peer.Decode("12D3KooWGWcyxn3JfihYiu2HspbE5XHzfgZiLwihVCeyXQQU8yC1") - require.NoError(t, err) - - // Error -> delay between AddNodes and added to routing table - err = d.kad.AddNodes(ctx, []kad.NodeInfo[key.Key256, multiaddr.Multiaddr]{ - nodeInfo{info: friendInfo}, - }) - require.NoError(t, err) - time.Sleep(100 * time.Millisecond) - - d.rt.AddNode(nodeID(friendInfo.ID)) - - targetInfo, err := d.FindPeer(ctx, target) - require.NoError(t, err) - t.Log(targetInfo.ID) - t.Log(targetInfo.Addrs) -} diff --git a/v2/routing.go b/v2/routing.go index 7f21b840..c2b53abf 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -6,6 +6,8 @@ import ( "fmt" "time" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "golang.org/x/exp/slog" "github.com/iand/zikade/core" @@ -42,7 +44,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { // we're } - target := nodeID(id) + target := kadt.PeerID(id) var foundNode core.Node[key.Key256, ma.Multiaddr] fn := func(ctx context.Context, node core.Node[key.Key256, ma.Multiaddr], stats core.QueryStats) error { @@ -59,8 +61,12 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { return peer.AddrInfo{}, fmt.Errorf("failed to run query: %w", err) } + if foundNode == nil { + return peer.AddrInfo{}, fmt.Errorf("peer record not found") + } + return peer.AddrInfo{ - ID: peer.ID(foundNode.ID().(nodeID)), + ID: peer.ID(foundNode.ID().(kadt.PeerID)), Addrs: foundNode.Addresses(), }, nil } diff --git a/v2/routing_test.go b/v2/routing_test.go index 745a87dc..7406c401 100644 --- a/v2/routing_test.go +++ b/v2/routing_test.go @@ -1 +1,52 @@ package dht + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +func TestRouting_FindPeer(t *testing.T) { + d := newTestDHT(t) + ctx := context.Background() + + // friend is the first peer we know in the IPFS DHT network (bootstrap node) + friendID, err := peer.Decode("12D3KooWGjgvfDkpuVAoNhd7PRRvMTEG4ZgzHBFURqDe1mqEzAMS") + require.NoError(t, err) + + // multiaddress of friend + friendAddr, err := multiaddr.NewMultiaddr("/ip4/45.32.75.236/tcp/4001") + require.NoError(t, err) + + t.Log("connecting...") + friendInfo := peer.AddrInfo{ID: friendID, Addrs: []multiaddr.Multiaddr{friendAddr}} + err = d.host.Connect(ctx, friendInfo) + require.NoError(t, err) + t.Log("connected") + + // target is the peer we want to find + target, err := peer.Decode("12D3KooWGWcyxn3JfihYiu2HspbE5XHzfgZiLwihVCeyXQQU8yC1") + require.NoError(t, err) + + // Error -> delay between AddNodes and added to routing table + //err = d.kad.AddNodes(ctx, []kad.NodeInfo[key.Key256, multiaddr.Multiaddr]{ + // kadt.AddrInfo{Info: friendInfo}, + //}) + //require.NoError(t, err) + //time.Sleep(100 * time.Millisecond) + + d.rt.AddNode(kadt.PeerID(friendInfo.ID)) + + targetInfo, err := d.FindPeer(ctx, target) + require.NoError(t, err) + t.Log(targetInfo.ID) + t.Log(targetInfo.Addrs) + + assert.Greater(t, len(targetInfo.Addrs), 0) +} From 95520acb6f7212dd19ce5b6861b8ec479dfc39c5 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 30 Aug 2023 17:46:20 +0200 Subject: [PATCH 07/26] add custom zikade dependency --- v2/go.mod | 4 +++- v2/go.sum | 4 ++-- v2/routing.go | 11 ++++------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/v2/go.mod b/v2/go.mod index 96a5ce32..d951ff3d 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -16,7 +16,6 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/multiformats/go-multihash v0.2.3 github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 @@ -80,6 +79,7 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nxadm/tail v1.4.8 // indirect @@ -117,3 +117,5 @@ require ( ) replace github.com/ipfs/go-datastore v0.6.0 => github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a + +replace github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb => github.com/dennis-tra/zikade v0.0.0-20230830153809-e6af3ad31acd diff --git a/v2/go.sum b/v2/go.sum index 24ae5122..6f0a62b5 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -43,6 +43,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etly github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a h1:YnrW4Kcy7kTIJRfL3Xg7+fIMS17izs0WWH2GdYwYhNs= github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= +github.com/dennis-tra/zikade v0.0.0-20230830153809-e6af3ad31acd h1:EGfJ0TEVP3z99zFuMOztW7wTb/60nHpEzLcAmS+eknA= +github.com/dennis-tra/zikade v0.0.0-20230830153809-e6af3ad31acd/go.mod h1:k5AXGe5qXg7d/pUBDNQvtmTvsnXRehzGNf1XC04+qBM= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -141,8 +143,6 @@ github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb h1:L0sxl/vHUf/wdEX6+QJGC0cQsnn2AglFL0qbJvv8+64= -github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb/go.mod h1:9BszmzAjw3qghO/oCaTvIhQUHb3h+F0EAHecClvcUnA= github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= diff --git a/v2/routing.go b/v2/routing.go index c2b53abf..c2e84aa3 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -6,23 +6,20 @@ import ( "fmt" "time" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - - "golang.org/x/exp/slog" - "github.com/iand/zikade/core" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" - "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/slog" ) var _ routing.Routing = (*DHT)(nil) From d0c67d3d46eb3c06accaab2b046d1dadbebc1430 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Fri, 1 Sep 2023 16:19:11 +0100 Subject: [PATCH 08/26] Import zikade code --- v2/coord/behaviour.go | 150 ++++++++ v2/coord/behaviour_test.go | 54 +++ v2/coord/coretypes.go | 81 +++++ v2/coord/dht.go | 416 ++++++++++++++++++++++ v2/coord/dht_test.go | 286 +++++++++++++++ v2/coord/event.go | 133 +++++++ v2/coord/internal/kadtest/bench.go | 11 + v2/coord/internal/kadtest/bench_pre120.go | 10 + v2/coord/internal/kadtest/context.go | 30 ++ v2/coord/internal/kadtest/kadtypes.go | 107 ++++++ v2/coord/internal/kadtest/message.go | 81 +++++ v2/coord/internal/kadtest/rand.go | 45 +++ v2/coord/internal/nettest/layouts.go | 55 +++ v2/coord/internal/nettest/routing.go | 144 ++++++++ v2/coord/internal/nettest/topology.go | 145 ++++++++ v2/coord/network.go | 263 ++++++++++++++ v2/coord/network_test.go | 34 ++ v2/coord/query.go | 179 ++++++++++ v2/coord/routing.go | 356 ++++++++++++++++++ v2/coord/routing_test.go | 300 ++++++++++++++++ 20 files changed, 2880 insertions(+) create mode 100644 v2/coord/behaviour.go create mode 100644 v2/coord/behaviour_test.go create mode 100644 v2/coord/coretypes.go create mode 100644 v2/coord/dht.go create mode 100644 v2/coord/dht_test.go create mode 100644 v2/coord/event.go create mode 100644 v2/coord/internal/kadtest/bench.go create mode 100644 v2/coord/internal/kadtest/bench_pre120.go create mode 100644 v2/coord/internal/kadtest/context.go create mode 100644 v2/coord/internal/kadtest/kadtypes.go create mode 100644 v2/coord/internal/kadtest/message.go create mode 100644 v2/coord/internal/kadtest/rand.go create mode 100644 v2/coord/internal/nettest/layouts.go create mode 100644 v2/coord/internal/nettest/routing.go create mode 100644 v2/coord/internal/nettest/topology.go create mode 100644 v2/coord/network.go create mode 100644 v2/coord/network_test.go create mode 100644 v2/coord/query.go create mode 100644 v2/coord/routing.go create mode 100644 v2/coord/routing_test.go diff --git a/v2/coord/behaviour.go b/v2/coord/behaviour.go new file mode 100644 index 00000000..b8cdc40c --- /dev/null +++ b/v2/coord/behaviour.go @@ -0,0 +1,150 @@ +package kademlia + +import ( + "context" + "sync" + "sync/atomic" +) + +type Notify[C DhtEvent] interface { + Notify(ctx context.Context, ev C) +} + +type NotifyCloser[C DhtEvent] interface { + Notify[C] + Close() +} + +type NotifyFunc[C DhtEvent] func(ctx context.Context, ev C) + +func (f NotifyFunc[C]) Notify(ctx context.Context, ev C) { + f(ctx, ev) +} + +type Behaviour[I DhtEvent, O DhtEvent] interface { + // Ready returns a channel that signals when the behaviour is ready to perform work. + Ready() <-chan struct{} + + // Notify informs the behaviour of an event. The behaviour may perform the event + // immediately and queue the result, causing the behaviour to become ready. + // It is safe to call Notify from the Perform method. + Notify(ctx context.Context, ev I) + + // Perform gives the behaviour the opportunity to perform work or to return a queued + // result as an event. + Perform(ctx context.Context) (O, bool) +} + +type SM[E any, S any] interface { + Advance(context.Context, E) S +} + +type WorkQueueFunc[E DhtEvent] func(context.Context, E) bool + +// WorkQueue is buffered queue of work to be performed. +// The queue automatically drains the queue sequentially by calling a +// WorkQueueFunc for each work item, passing the original context +// and event. +type WorkQueue[E DhtEvent] struct { + pending chan pendingEvent[E] + fn WorkQueueFunc[E] + done atomic.Bool + once sync.Once +} + +func NewWorkQueue[E DhtEvent](fn WorkQueueFunc[E]) *WorkQueue[E] { + w := &WorkQueue[E]{ + pending: make(chan pendingEvent[E], 16), + fn: fn, + } + return w +} + +type pendingEvent[E any] struct { + Ctx context.Context + Event E +} + +// Enqueue queues work to be perfomed. It will block if the +// queue has reached its maximum capacity for pending work. While +// blocking it will return a context cancellation error if the work +// item's context is cancelled. +func (w *WorkQueue[E]) Enqueue(ctx context.Context, cmd E) error { + if w.done.Load() { + return nil + } + w.once.Do(func() { + go func() { + defer w.done.Store(true) + for cc := range w.pending { + if cc.Ctx.Err() != nil { + return + } + if done := w.fn(cc.Ctx, cc.Event); done { + w.done.Store(true) + return + } + } + }() + }) + + select { + case <-ctx.Done(): // this is the context for the work item + return ctx.Err() + case w.pending <- pendingEvent[E]{ + Ctx: ctx, + Event: cmd, + }: + return nil + + } +} + +// A Waiter is a Notifiee whose Notify method forwards the +// notified event to a channel which a client can wait on. +type Waiter[E DhtEvent] struct { + pending chan WaiterEvent[E] + done atomic.Bool +} + +var _ Notify[DhtEvent] = (*Waiter[DhtEvent])(nil) + +func NewWaiter[E DhtEvent]() *Waiter[E] { + w := &Waiter[E]{ + pending: make(chan WaiterEvent[E], 16), + } + return w +} + +type WaiterEvent[E DhtEvent] struct { + Ctx context.Context + Event E +} + +func (w *Waiter[E]) Notify(ctx context.Context, ev E) { + if w.done.Load() { + return + } + select { + case <-ctx.Done(): // this is the context for the work item + return + case w.pending <- WaiterEvent[E]{ + Ctx: ctx, + Event: ev, + }: + return + + } +} + +// Close signals that the waiter should not forward and further calls to Notify. +// It closes the waiter channel so a client selecting on it will receive the close +// operation. +func (w *Waiter[E]) Close() { + w.done.Store(true) + close(w.pending) +} + +func (w *Waiter[E]) Chan() <-chan WaiterEvent[E] { + return w.pending +} diff --git a/v2/coord/behaviour_test.go b/v2/coord/behaviour_test.go new file mode 100644 index 00000000..0efd04a7 --- /dev/null +++ b/v2/coord/behaviour_test.go @@ -0,0 +1,54 @@ +package kademlia + +import ( + "context" + "fmt" + "reflect" + "testing" +) + +type NullSM[E any, S any] struct{} + +func (NullSM[E, S]) Advance(context.Context, E) S { + var v S + return v +} + +type RecordingSM[E any, S any] struct { + State S + Received E +} + +func NewRecordingSM[E any, S any](response S) *RecordingSM[E, S] { + return &RecordingSM[E, S]{ + State: response, + } +} + +func (r *RecordingSM[E, S]) Advance(ctx context.Context, e E) S { + r.Received = e + return r.State +} + +// expectBehaviourEvent selects on a behaviour's ready channel until it becomes ready and then checks the perform +// mehtod for the expected event type. Unexpected events are ignored and selecting resumes. +// The function returns when an event matching the type of expected is received or when the context is cancelled. +func expectBehaviourEvent[I DhtEvent, O DhtEvent](t *testing.T, ctx context.Context, b Behaviour[I, O], expected O) (O, error) { + t.Helper() + for { + select { + case <-b.Ready(): + ev, ok := b.Perform(ctx) + if !ok { + continue + } + t.Logf("saw event: %T\n", ev) + if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + return ev, nil + } + case <-ctx.Done(): + var v O + return v, fmt.Errorf("test deadline exceeded") + } + } +} diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go new file mode 100644 index 00000000..6e18f0f4 --- /dev/null +++ b/v2/coord/coretypes.go @@ -0,0 +1,81 @@ +package kademlia + +import ( + "context" + "errors" + "time" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/network/address" +) + +// Value is a value that may be stored in the DHT. +type Value[K kad.Key[K]] interface { + Key() K + MarshalBinary() ([]byte, error) +} + +// Node represent a remote node, a participant in the DHT. +type Node[K kad.Key[K], A kad.Address[A]] interface { + kad.NodeInfo[K, A] + + // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. + // The node may return fewer nodes than requested. + GetClosestNodes(ctx context.Context, key K, n int) ([]Node[K, A], error) + + // GetValue requests that the node return any value associated with the supplied key. + // If the node does not have a value for the key it returns ErrValueNotFound. + GetValue(ctx context.Context, key K) (Value[K], error) + + // PutValue requests that the node stores a value to be associated with the supplied key. + // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. + PutValue(ctx context.Context, r Value[K], q int) error +} + +var ( + ErrNodeNotFound = errors.New("node not found") + ErrValueNotFound = errors.New("value not found") + ErrValueNotAccepted = errors.New("value not accepted") +) + +// QueryFunc is the type of the function called by Query to visit each node. +// +// The error result returned by the function controls how Query proceeds. If the function returns the special value +// SkipNode, Query skips fetching closer nodes from the current node. If the function returns the special value +// SkipRemaining, Query skips all visiting all remaining nodes. Otherwise, if the function returns a non-nil error, +// Query stops entirely and returns that error. +// +// The stats argument contains statistics on the progress of the query so far. +type QueryFunc[K kad.Key[K], A kad.Address[A]] func(ctx context.Context, node Node[K, A], stats QueryStats) error + +type QueryStats struct { + Start time.Time // Start is the time the query began executing. + End time.Time // End is the time the query stopped executing. + Requests int // Requests is a count of the number of requests made by the query. + Success int // Success is a count of the number of nodes the query succesfully contacted. + Failure int // Failure is a count of the number of nodes the query received an error response from. + Exhausted bool // Exhausted is true if the query ended after visiting every node it could. +} + +var ( + // SkipNode is used as a return value from a QueryFunc to indicate that the node is to be skipped. + SkipNode = errors.New("skip node") + + // SkipRemaining is used as a return value a QueryFunc to indicate that all remaining nodes are to be skipped. + SkipRemaining = errors.New("skip remaining nodes") +) + +// Router its a work in progress +// TODO figure out the role of protocol identifiers +type Router[K kad.Key[K], A kad.Address[A]] interface { + // SendMessage attempts to send a request to another node. The Router will absorb the addresses in to into its + // internal nodestore. This method blocks until a response is received or an error is encountered. + SendMessage(ctx context.Context, to kad.NodeInfo[K, A], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) + + AddNodeInfo(ctx context.Context, info kad.NodeInfo[K, A], ttl time.Duration) error + GetNodeInfo(ctx context.Context, id kad.NodeID[K]) (kad.NodeInfo[K, A], error) + + // GetClosestNodes attempts to send a request to another node asking it for nodes that it considers to be + // closest to the target key. + GetClosestNodes(ctx context.Context, to kad.NodeInfo[K, A], target K) ([]kad.NodeInfo[K, A], error) +} diff --git a/v2/coord/dht.go b/v2/coord/dht.go new file mode 100644 index 00000000..50f62563 --- /dev/null +++ b/v2/coord/dht.go @@ -0,0 +1,416 @@ +package kademlia + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/benbjohnson/clock" + logging "github.com/ipfs/go-log/v2" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/kaderr" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/query" + "github.com/plprobelab/go-kademlia/routing" + "github.com/plprobelab/go-kademlia/util" + "go.uber.org/zap/exp/zapslog" + "golang.org/x/exp/slog" +) + +// A Coordinator coordinates the state machines that comprise a Kademlia DHT +// It is only one possible configuration of the DHT components, others are possible. +type Coordinator[K kad.Key[K], A kad.Address[A]] struct { + // self is the node id of the system the dht is running on + self kad.NodeID[K] + + // cfg is a copy of the optional configuration supplied to the dht + cfg Config + + // rt is the routing table used to look up nodes by distance + rt kad.RoutingTable[K, kad.NodeID[K]] + + // rtr is the message router used to send messages + rtr Router[K, A] + + routingNotifications chan RoutingNotification + + // networkBehaviour is the behaviour responsible for communicating with the network + networkBehaviour *NetworkBehaviour[K, A] + + // routingBehaviour is the behaviour responsible for maintaining the routing table + routingBehaviour Behaviour[DhtEvent, DhtEvent] + + // queryBehaviour is the behaviour responsible for running user-submitted queries + queryBehaviour Behaviour[DhtEvent, DhtEvent] +} + +const DefaultChanqueueCapacity = 1024 + +type Config struct { + PeerstoreTTL time.Duration // duration for which a peer is kept in the peerstore + + Clock clock.Clock // a clock that may replaced by a mock when testing + + QueryConcurrency int // the maximum number of queries that may be waiting for message responses at any one time + QueryTimeout time.Duration // the time to wait before terminating a query that is not making progress + + RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight + RequestTimeout time.Duration // the timeout queries should use for contacting a single node + + Logger *slog.Logger // a structured logger that should be used when logging. +} + +// Validate checks the configuration options and returns an error if any have invalid values. +func (cfg *Config) Validate() error { + if cfg.Clock == nil { + return &kaderr.ConfigurationError{ + Component: "DhtConfig", + Err: fmt.Errorf("clock must not be nil"), + } + } + + if cfg.QueryConcurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "DhtConfig", + Err: fmt.Errorf("query concurrency must be greater than zero"), + } + } + if cfg.QueryTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "DhtConfig", + Err: fmt.Errorf("query timeout must be greater than zero"), + } + } + + if cfg.RequestConcurrency < 1 { + return &kaderr.ConfigurationError{ + Component: "DhtConfig", + Err: fmt.Errorf("request concurrency must be greater than zero"), + } + } + + if cfg.RequestTimeout < 1 { + return &kaderr.ConfigurationError{ + Component: "DhtConfig", + Err: fmt.Errorf("request timeout must be greater than zero"), + } + } + + if cfg.Logger == nil { + return &kaderr.ConfigurationError{ + Component: "DhtConfig", + Err: fmt.Errorf("logger must not be nil"), + } + } + return nil +} + +func DefaultConfig() *Config { + return &Config{ + Clock: clock.New(), // use standard time + PeerstoreTTL: 10 * time.Minute, + QueryConcurrency: 3, + QueryTimeout: 5 * time.Minute, + RequestConcurrency: 3, + RequestTimeout: time.Minute, + Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), + } +} + +func NewDht[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], rtr Router[K, A], rt routing.RoutingTableCpl[K, kad.NodeID[K]], cfg *Config) (*Coordinator[K, A], error) { + if cfg == nil { + cfg = DefaultConfig() + } else if err := cfg.Validate(); err != nil { + return nil, err + } + + qpCfg := query.DefaultPoolConfig() + qpCfg.Clock = cfg.Clock + qpCfg.Concurrency = cfg.QueryConcurrency + qpCfg.Timeout = cfg.QueryTimeout + qpCfg.QueryConcurrency = cfg.RequestConcurrency + qpCfg.RequestTimeout = cfg.RequestTimeout + + qp, err := query.NewPool[K, A](self, qpCfg) + if err != nil { + return nil, fmt.Errorf("query pool: %w", err) + } + queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger) + + bootstrapCfg := routing.DefaultBootstrapConfig[K, A]() + bootstrapCfg.Clock = cfg.Clock + bootstrapCfg.Timeout = cfg.QueryTimeout + bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency + bootstrapCfg.RequestTimeout = cfg.RequestTimeout + + bootstrap, err := routing.NewBootstrap(self, bootstrapCfg) + if err != nil { + return nil, fmt.Errorf("bootstrap: %w", err) + } + + includeCfg := routing.DefaultIncludeConfig() + includeCfg.Clock = cfg.Clock + includeCfg.Timeout = cfg.QueryTimeout + + // TODO: expose config + // includeCfg.QueueCapacity = cfg.IncludeQueueCapacity + // includeCfg.Concurrency = cfg.IncludeConcurrency + // includeCfg.Timeout = cfg.IncludeTimeout + + include, err := routing.NewInclude[K, A](rt, includeCfg) + if err != nil { + return nil, fmt.Errorf("include: %w", err) + } + + probeCfg := routing.DefaultProbeConfig() + probeCfg.Clock = cfg.Clock + probeCfg.Timeout = cfg.QueryTimeout + + // TODO: expose config + // probeCfg.Concurrency = cfg.ProbeConcurrency + probe, err := routing.NewProbe[K, A](rt, probeCfg) + if err != nil { + return nil, fmt.Errorf("include: %w", err) + } + + routingBehaviour := NewRoutingBehaviour[K, A](self, bootstrap, include, probe, cfg.Logger) + + networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger) + + d := &Coordinator[K, A]{ + self: self, + cfg: *cfg, + rtr: rtr, + rt: rt, + + networkBehaviour: networkBehaviour, + routingBehaviour: routingBehaviour, + queryBehaviour: queryBehaviour, + + routingNotifications: make(chan RoutingNotification, 20), + } + go d.eventLoop() + + return d, nil +} + +func (d *Coordinator[K, A]) ID() kad.NodeID[K] { + return d.self +} + +func (d *Coordinator[K, A]) Addresses() []A { + // TODO: return configured listen addresses + info, err := d.rtr.GetNodeInfo(context.TODO(), d.self) + if err != nil { + return nil + } + return info.Addresses() +} + +// RoutingNotifications returns a channel that may be read to be notified of routing updates +func (d *Coordinator[K, A]) RoutingNotifications() <-chan RoutingNotification { + return d.routingNotifications +} + +func (d *Coordinator[K, A]) eventLoop() { + ctx := context.Background() + + for { + var ev DhtEvent + var ok bool + select { + case <-d.networkBehaviour.Ready(): + ev, ok = d.networkBehaviour.Perform(ctx) + case <-d.routingBehaviour.Ready(): + ev, ok = d.routingBehaviour.Perform(ctx) + case <-d.queryBehaviour.Ready(): + ev, ok = d.queryBehaviour.Perform(ctx) + } + + if ok { + d.dispatchDhtNotice(ctx, ev) + } + } +} + +func (c *Coordinator[K, A]) dispatchDhtNotice(ctx context.Context, ev DhtEvent) { + ctx, span := util.StartSpan(ctx, "Dht.dispatchDhtNotice") + defer span.End() + + switch ev := ev.(type) { + case *EventDhtStartBootstrap[K, A]: + c.routingBehaviour.Notify(ctx, ev) + case *EventOutboundGetClosestNodes[K, A]: + c.networkBehaviour.Notify(ctx, ev) + case *EventStartQuery[K, A]: + c.queryBehaviour.Notify(ctx, ev) + case *EventStopQuery: + c.queryBehaviour.Notify(ctx, ev) + case *EventDhtAddNodeInfo[K, A]: + c.routingBehaviour.Notify(ctx, ev) + case RoutingNotification: + select { + case <-ctx.Done(): + case c.routingNotifications <- ev: + default: + } + } +} + +// GetNode retrieves the node associated with the given node id from the DHT's local routing table. +// If the node isn't found in the table, it returns ErrNodeNotFound. +func (d *Coordinator[K, A]) GetNode(ctx context.Context, id kad.NodeID[K]) (Node[K, A], error) { + if _, exists := d.rt.GetNode(id.Key()); !exists { + return nil, ErrNodeNotFound + } + + nh, err := d.networkBehaviour.getNodeHandler(ctx, id) + if err != nil { + return nil, err + } + return nh, nil +} + +// GetClosestNodes requests the n closest nodes to the key from the node's local routing table. +func (d *Coordinator[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([]Node[K, A], error) { + closest := d.rt.NearestNodes(k, n) + nodes := make([]Node[K, A], 0, len(closest)) + for _, id := range closest { + nh, err := d.networkBehaviour.getNodeHandler(ctx, id) + if err != nil { + return nil, err + } + nodes = append(nodes, nh) + } + return nodes, nil +} + +// GetValue requests that the node return any value associated with the supplied key. +// If the node does not have a value for the key it returns ErrValueNotFound. +func (d *Coordinator[K, A]) GetValue(ctx context.Context, key K) (Value[K], error) { + panic("not implemented") +} + +// PutValue requests that the node stores a value to be associated with the supplied key. +// If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. +func (d *Coordinator[K, A]) PutValue(ctx context.Context, r Value[K], q int) error { + panic("not implemented") +} + +// Query traverses the DHT calling fn for each node visited. +func (d *Coordinator[K, A]) Query(ctx context.Context, target K, fn QueryFunc[K, A]) (QueryStats, error) { + ctx, span := util.StartSpan(ctx, "Dht.Query") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + seeds, err := d.GetClosestNodes(ctx, target, 20) + if err != nil { + return QueryStats{}, err + } + + seedIDs := make([]kad.NodeID[K], 0, len(seeds)) + for _, s := range seeds { + seedIDs = append(seedIDs, s.ID()) + } + + waiter := NewWaiter[DhtEvent]() + queryID := query.QueryID("foo") + + cmd := &EventStartQuery[K, A]{ + QueryID: queryID, + Target: target, + ProtocolID: address.ProtocolID("TODO"), + Message: &fakeMessage[K, A]{key: target}, + KnownClosestNodes: seedIDs, + Notify: waiter, + } + + // queue the start of the query + d.queryBehaviour.Notify(ctx, cmd) + + var lastStats QueryStats + for { + select { + case <-ctx.Done(): + return lastStats, ctx.Err() + case wev := <-waiter.Chan(): + ctx, ev := wev.Ctx, wev.Event + switch ev := ev.(type) { + case *EventQueryProgressed[K, A]: + lastStats = QueryStats{ + Start: ev.Stats.Start, + Requests: ev.Stats.Requests, + Success: ev.Stats.Success, + Failure: ev.Stats.Failure, + } + nh, err := d.networkBehaviour.getNodeHandler(ctx, ev.NodeID) + if err != nil { + // ignore unknown node + break + } + + err = fn(ctx, nh, lastStats) + if errors.Is(err, SkipRemaining) { + // done + d.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) + return lastStats, nil + } + if errors.Is(err, SkipNode) { + // TODO: don't add closer nodes from this node + break + } + if err != nil { + // user defined error that terminates the query + d.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) + return lastStats, err + } + + case *EventQueryFinished: + // query is done + lastStats.Exhausted = true + return lastStats, nil + + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) + } + } + } +} + +// AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. +// If the routing table is updated as a result of this operation an EventRoutingUpdated notification +// is emitted on the routing notification channel. +func (d *Coordinator[K, A]) AddNodes(ctx context.Context, infos []kad.NodeInfo[K, A]) error { + ctx, span := util.StartSpan(ctx, "Dht.AddNodes") + defer span.End() + for _, info := range infos { + if key.Equal(info.ID().Key(), d.self.Key()) { + // skip self + continue + } + + d.routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo[K, A]{ + NodeInfo: info, + }) + + } + + return nil +} + +// Bootstrap instructs the dht to begin bootstrapping the routing table. +func (d *Coordinator[K, A]) Bootstrap(ctx context.Context, seeds []kad.NodeID[K]) error { + ctx, span := util.StartSpan(ctx, "Dht.Bootstrap") + defer span.End() + d.routingBehaviour.Notify(ctx, &EventDhtStartBootstrap[K, A]{ + // Bootstrap state machine uses the message + Message: &fakeMessage[K, A]{key: d.self.Key()}, + SeedNodes: seeds, + }) + + return nil +} diff --git a/v2/coord/dht_test.go b/v2/coord/dht_test.go new file mode 100644 index 00000000..f9dba8c1 --- /dev/null +++ b/v2/coord/dht_test.go @@ -0,0 +1,286 @@ +package kademlia + +import ( + "context" + "fmt" + "log" + "reflect" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" +) + +const peerstoreTTL = 10 * time.Minute + +// expectEventType selects on the event channel until an event of the expected type is sent. +func expectEventType(t *testing.T, ctx context.Context, events <-chan RoutingNotification, expected RoutingNotification) (RoutingNotification, error) { + t.Helper() + for { + select { + case ev := <-events: + t.Logf("saw event: %T\n", ev) + if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + return ev, nil + } + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) + } + } +} + +func TestConfigValidate(t *testing.T) { + t.Run("default is valid", func(t *testing.T) { + cfg := DefaultConfig() + require.NoError(t, cfg.Validate()) + }) + + t.Run("clock is not nil", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Clock = nil + require.Error(t, cfg.Validate()) + }) + + t.Run("query concurrency positive", func(t *testing.T) { + cfg := DefaultConfig() + cfg.QueryConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.QueryConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("query timeout positive", func(t *testing.T) { + cfg := DefaultConfig() + cfg.QueryTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.QueryTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request concurrency positive", func(t *testing.T) { + cfg := DefaultConfig() + cfg.RequestConcurrency = 0 + require.Error(t, cfg.Validate()) + cfg.QueryConcurrency = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("request timeout positive", func(t *testing.T) { + cfg := DefaultConfig() + cfg.RequestTimeout = 0 + require.Error(t, cfg.Validate()) + cfg.RequestTimeout = -1 + require.Error(t, cfg.Validate()) + }) + + t.Run("logger not nil", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Logger = nil + require.Error(t, cfg.Validate()) + }) +} + +func TestExhaustiveQuery(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + ccfg := DefaultConfig() + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + // A (ids[0]) is looking for D (ids[3]) + // A will first ask B, B will reply with C's address (and A's address) + // A will then ask C, C will reply with D's address (and B's address) + self := nodes[0].NodeInfo.ID() + c, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + require.NoError(t, err) + + target := nodes[3].NodeInfo.ID().Key() + + visited := make(map[string]int) + + // Record the nodes as they are visited + qfn := func(ctx context.Context, node Node[key.Key8, kadtest.StrAddr], stats QueryStats) error { + visited[node.ID().String()]++ + return nil + } + + // Run a query to find the value + _, err = c.Query(ctx, target, qfn) + require.NoError(t, err) + + require.Equal(t, 3, len(visited)) + require.Contains(t, visited, nodes[1].NodeInfo.ID().String()) + require.Contains(t, visited, nodes[2].NodeInfo.ID().String()) + require.Contains(t, visited, nodes[3].NodeInfo.ID().String()) +} + +func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + ccfg := DefaultConfig() + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + // A (ids[0]) is looking for D (ids[3]) + // A will first ask B, B will reply with C's address (and A's address) + // A will then ask C, C will reply with D's address (and B's address) + self := nodes[0].NodeInfo.ID() + c, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + if err != nil { + log.Fatalf("unexpected error creating dht: %v", err) + } + + buffer := make(chan RoutingNotification, 5) + go func() { + for { + select { + case <-ctx.Done(): + return + case ev := <-c.RoutingNotifications(): + buffer <- ev + } + } + }() + + qfn := func(ctx context.Context, node Node[key.Key8, kadtest.StrAddr], stats QueryStats) error { + return nil + } + + // Run a query to find the value + target := nodes[3].NodeInfo.ID().Key() + _, err = c.Query(ctx, target, qfn) + require.NoError(t, err) + + // the query run by the dht should have received a response from nodes[1] with closer nodes + // nodes[0] and nodes[2] which should trigger a routing table update since nodes[2] was + // not in the dht's routing table. + ev, err := expectEventType(t, ctx, buffer, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}) + require.NoError(t, err) + + tev := ev.(*EventRoutingUpdated[key.Key8, kadtest.StrAddr]) + require.Equal(t, nodes[2].NodeInfo.ID(), tev.NodeInfo.ID()) + + // no EventRoutingUpdated is sent for the self node + + // the query continues and should have received a response from nodes[2] with closer nodes + // nodes[1] and nodes[3] which should trigger a routing table update since nodes[3] was + // not in the dht's routing table. + ev, err = expectEventType(t, ctx, buffer, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}) + require.NoError(t, err) + + tev = ev.(*EventRoutingUpdated[key.Key8, kadtest.StrAddr]) + require.Equal(t, nodes[3].NodeInfo.ID(), tev.NodeInfo.ID()) +} + +func TestBootstrap(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + ccfg := DefaultConfig() + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + self := nodes[0].NodeInfo.ID() + d, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + if err != nil { + log.Fatalf("unexpected error creating dht: %v", err) + } + + buffer := make(chan RoutingNotification, 5) + go func() { + for { + select { + case <-ctx.Done(): + return + case ev := <-d.RoutingNotifications(): + buffer <- ev + } + } + }() + + seeds := []kad.NodeID[key.Key8]{ + nodes[1].NodeInfo.ID(), + } + err = d.Bootstrap(ctx, seeds) + require.NoError(t, err) + + // the query run by the dht should have completed + ev, err := expectEventType(t, ctx, buffer, &EventBootstrapFinished{}) + require.NoError(t, err) + + require.IsType(t, &EventBootstrapFinished{}, ev) + tevf := ev.(*EventBootstrapFinished) + require.Equal(t, 3, tevf.Stats.Requests) + require.Equal(t, 3, tevf.Stats.Success) + require.Equal(t, 0, tevf.Stats.Failure) + + // DHT should now have node1 in its routing table + _, err = d.GetNode(ctx, nodes[1].NodeInfo.ID()) + require.NoError(t, err) + + // DHT should now have node2 in its routing table + _, err = d.GetNode(ctx, nodes[2].NodeInfo.ID()) + require.NoError(t, err) + + // DHT should now have node3 in its routing table + _, err = d.GetNode(ctx, nodes[3].NodeInfo.ID()) + require.NoError(t, err) +} + +func TestIncludeNode(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + ccfg := DefaultConfig() + ccfg.Clock = clk + ccfg.PeerstoreTTL = peerstoreTTL + + candidate := nodes[len(nodes)-1].NodeInfo // not in nodes[0] routing table + + self := nodes[0].NodeInfo.ID() + d, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + if err != nil { + log.Fatalf("unexpected error creating dht: %v", err) + } + + // the routing table should not contain the node yet + _, err = d.GetNode(ctx, candidate.ID()) + require.ErrorIs(t, err, ErrNodeNotFound) + + events := d.RoutingNotifications() + + // inject a new node into the dht's includeEvents queue + err = d.AddNodes(ctx, []kad.NodeInfo[key.Key8, kadtest.StrAddr]{candidate}) + require.NoError(t, err) + + // the include state machine runs in the background and eventually should add the node to routing table + ev, err := expectEventType(t, ctx, events, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}) + require.NoError(t, err) + + tev := ev.(*EventRoutingUpdated[key.Key8, kadtest.StrAddr]) + require.Equal(t, candidate.ID(), tev.NodeInfo.ID()) + + // the routing table should not contain the node yet + _, err = d.GetNode(ctx, candidate.ID()) + require.NoError(t, err) +} diff --git a/v2/coord/event.go b/v2/coord/event.go new file mode 100644 index 00000000..697289b2 --- /dev/null +++ b/v2/coord/event.go @@ -0,0 +1,133 @@ +package kademlia + +import ( + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/query" +) + +type DhtEvent interface { + dhtEvent() +} + +type DhtCommand interface { + DhtEvent + dhtCommand() +} + +type NodeHandlerRequest interface { + DhtEvent + nodeHandlerRequest() +} + +type NodeHandlerResponse interface { + DhtEvent + nodeHandlerResponse() +} + +type RoutingNotification interface { + DhtEvent + routingNotificationEvent() +} + +type EventDhtStartBootstrap[K kad.Key[K], A kad.Address[A]] struct { + ProtocolID address.ProtocolID + Message kad.Request[K, A] + SeedNodes []kad.NodeID[K] +} + +func (EventDhtStartBootstrap[K, A]) dhtEvent() {} +func (EventDhtStartBootstrap[K, A]) dhtCommand() {} + +type EventOutboundGetClosestNodes[K kad.Key[K], A kad.Address[A]] struct { + QueryID query.QueryID + To kad.NodeInfo[K, A] + Target K + Notify Notify[DhtEvent] +} + +func (EventOutboundGetClosestNodes[K, A]) dhtEvent() {} +func (EventOutboundGetClosestNodes[K, A]) nodeHandlerRequest() {} + +type EventStartQuery[K kad.Key[K], A kad.Address[A]] struct { + QueryID query.QueryID + Target K + ProtocolID address.ProtocolID + Message kad.Request[K, A] + KnownClosestNodes []kad.NodeID[K] + Notify NotifyCloser[DhtEvent] +} + +func (EventStartQuery[K, A]) dhtEvent() {} +func (EventStartQuery[K, A]) dhtCommand() {} + +type EventStopQuery struct { + QueryID query.QueryID +} + +func (EventStopQuery) dhtEvent() {} +func (EventStopQuery) dhtCommand() {} + +type EventDhtAddNodeInfo[K kad.Key[K], A kad.Address[A]] struct { + NodeInfo kad.NodeInfo[K, A] +} + +func (EventDhtAddNodeInfo[K, A]) dhtEvent() {} +func (EventDhtAddNodeInfo[K, A]) dhtCommand() {} + +type EventGetClosestNodesSuccess[K kad.Key[K], A kad.Address[A]] struct { + QueryID query.QueryID + To kad.NodeInfo[K, A] + Target K + ClosestNodes []kad.NodeInfo[K, A] +} + +func (EventGetClosestNodesSuccess[K, A]) dhtEvent() {} +func (EventGetClosestNodesSuccess[K, A]) nodeHandlerResponse() {} + +type EventGetClosestNodesFailure[K kad.Key[K], A kad.Address[A]] struct { + QueryID query.QueryID + To kad.NodeInfo[K, A] + Target K + Err error +} + +func (EventGetClosestNodesFailure[K, A]) dhtEvent() {} +func (EventGetClosestNodesFailure[K, A]) nodeHandlerResponse() {} + +// EventQueryProgressed is emitted by the dht when a query has received a +// response from a node. +type EventQueryProgressed[K kad.Key[K], A kad.Address[A]] struct { + QueryID query.QueryID + NodeID kad.NodeID[K] + Response kad.Response[K, A] + Stats query.QueryStats +} + +func (*EventQueryProgressed[K, A]) dhtEvent() {} + +// EventQueryFinished is emitted by the dht when a query has finished, either through +// running to completion or by being canceled. +type EventQueryFinished struct { + QueryID query.QueryID + Stats query.QueryStats +} + +func (*EventQueryFinished) dhtEvent() {} + +// EventRoutingUpdated is emitted by the dht when a new node has been verified and added to the routing table. +type EventRoutingUpdated[K kad.Key[K], A kad.Address[A]] struct { + NodeInfo kad.NodeInfo[K, A] +} + +func (*EventRoutingUpdated[K, A]) dhtEvent() {} +func (*EventRoutingUpdated[K, A]) routingNotificationEvent() {} + +// EventBootstrapFinished is emitted by the dht when a bootstrap has finished, either through +// running to completion or by being canceled. +type EventBootstrapFinished struct { + Stats query.QueryStats +} + +func (*EventBootstrapFinished) dhtEvent() {} +func (*EventBootstrapFinished) routingNotificationEvent() {} diff --git a/v2/coord/internal/kadtest/bench.go b/v2/coord/internal/kadtest/bench.go new file mode 100644 index 00000000..7bdc9035 --- /dev/null +++ b/v2/coord/internal/kadtest/bench.go @@ -0,0 +1,11 @@ +//go:build go1.20 + +package kadtest + +import "testing" + +// ReportTimePerItemMetric adds a custom metric to a benchmark that reports the number of nanoseconds taken per item. +func ReportTimePerItemMetric(b *testing.B, n int, name string) { + // b.Elapsed was added in Go 1.20 + b.ReportMetric(float64(b.Elapsed().Nanoseconds())/float64(n), "ns/"+name) +} diff --git a/v2/coord/internal/kadtest/bench_pre120.go b/v2/coord/internal/kadtest/bench_pre120.go new file mode 100644 index 00000000..643dff49 --- /dev/null +++ b/v2/coord/internal/kadtest/bench_pre120.go @@ -0,0 +1,10 @@ +//go:build !go1.20 + +package kadtest + +import "testing" + +// ReportTimePerItemMetric is a no-op on versions of Go before 1.20 +func ReportTimePerItemMetric(b *testing.B, n int, name string) { + // no-op +} diff --git a/v2/coord/internal/kadtest/context.go b/v2/coord/internal/kadtest/context.go new file mode 100644 index 00000000..659d328f --- /dev/null +++ b/v2/coord/internal/kadtest/context.go @@ -0,0 +1,30 @@ +package kadtest + +import ( + "context" + "testing" + "time" +) + +// CtxShort returns a Context and a CancelFunc. The context will be +// cancelled after 10 seconds or just before the test binary deadline (as +// specified by the -timeout flag when running the test), whichever is +// sooner. The CancelFunc may be called to cancel the context earlier than +// the deadline. +func CtxShort(t *testing.T) (context.Context, context.CancelFunc) { + t.Helper() + + timeout := 10 * time.Second + goal := time.Now().Add(timeout) + + deadline, ok := t.Deadline() + if !ok { + deadline = goal + } else { + deadline = deadline.Add(-time.Second) + if deadline.After(goal) { + deadline = goal + } + } + return context.WithDeadline(context.Background(), deadline) +} diff --git a/v2/coord/internal/kadtest/kadtypes.go b/v2/coord/internal/kadtest/kadtypes.go new file mode 100644 index 00000000..6ce09dd1 --- /dev/null +++ b/v2/coord/internal/kadtest/kadtypes.go @@ -0,0 +1,107 @@ +package kadtest + +import ( + "crypto/sha256" + "net" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +// ID is a concrete implementation of the NodeID interface. +type ID[K kad.Key[K]] struct { + key K +} + +// interface assertion. Using the concrete key type of key.Key8 does not +// limit the validity of the assertion for other key types. +var _ kad.NodeID[key.Key8] = (*ID[key.Key8])(nil) + +// NewID returns a new Kademlia identifier that implements the NodeID interface. +// Instead of deriving the Kademlia key from a NodeID, this method directly takes +// the Kademlia key. +func NewID[K kad.Key[K]](k K) *ID[K] { + return &ID[K]{key: k} +} + +// Key returns the Kademlia key that is used by, e.g., the routing table +// implementation to group nodes into buckets. The returned key was manually +// defined in the ID constructor NewID and not derived via, e.g., hashing +// a preimage. +func (i ID[K]) Key() K { + return i.key +} + +func (i ID[K]) Equal(other K) bool { + return i.key.Compare(other) == 0 +} + +func (i ID[K]) String() string { + return key.HexString(i.key) +} + +type StringID string + +var _ kad.NodeID[key.Key256] = (*StringID)(nil) + +func NewStringID(s string) *StringID { + return (*StringID)(&s) +} + +func (s StringID) Key() key.Key256 { + h := sha256.New() + h.Write([]byte(s)) + return key.NewKey256(h.Sum(nil)) +} + +func (s StringID) NodeID() kad.NodeID[key.Key256] { + return &s +} + +func (s StringID) Equal(other string) bool { + return string(s) == other +} + +func (s StringID) String() string { + return string(s) +} + +type Info[K kad.Key[K], A kad.Address[A]] struct { + id *ID[K] + addrs []A +} + +var _ kad.NodeInfo[key.Key8, net.IP] = (*Info[key.Key8, net.IP])(nil) + +func NewInfo[K kad.Key[K], A kad.Address[A]](id *ID[K], addrs []A) *Info[K, A] { + return &Info[K, A]{ + id: id, + addrs: addrs, + } +} + +func (a *Info[K, A]) AddAddr(addr A) { + a.addrs = append(a.addrs, addr) +} + +func (a *Info[K, A]) RemoveAddr(addr A) { + writeIndex := 0 + // remove all occurrences of addr + for _, ad := range a.addrs { + if !ad.Equal(addr) { + a.addrs[writeIndex] = ad + writeIndex++ + } + } + a.addrs = a.addrs[:writeIndex] +} + +func (a *Info[K, A]) ID() kad.NodeID[K] { + return a.id +} + +func (a *Info[K, A]) Addresses() []A { + addresses := make([]A, len(a.addrs)) + copy(addresses, a.addrs) + return addresses +} diff --git a/v2/coord/internal/kadtest/message.go b/v2/coord/internal/kadtest/message.go new file mode 100644 index 00000000..05071cee --- /dev/null +++ b/v2/coord/internal/kadtest/message.go @@ -0,0 +1,81 @@ +package kadtest + +import ( + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" +) + +// StrAddr is a simple implementation of kad.Address that uses a string to represent the address. +type StrAddr string + +var _ kad.Address[StrAddr] = StrAddr("") + +func (a StrAddr) Equal(b StrAddr) bool { return a == b } + +type Request[K kad.Key[K]] struct { + target K + id string +} + +func NewRequest[K kad.Key[K]](id string, target K) *Request[K] { + return &Request[K]{ + target: target, + id: id, + } +} + +func (r *Request[K]) Target() K { + return r.target +} + +func (r *Request[K]) ID() string { + return r.id +} + +func (r *Request[K]) EmptyResponse() kad.Response[K, StrAddr] { + return &Response[K]{} +} + +type Response[K kad.Key[K]] struct { + id string + closer []kad.NodeInfo[K, StrAddr] +} + +func NewResponse[K kad.Key[K]](id string, closer []kad.NodeInfo[K, StrAddr]) *Response[K] { + return &Response[K]{ + id: id, + closer: closer, + } +} + +func (r *Response[K]) ID() string { + return r.id +} + +func (r *Response[K]) CloserNodes() []kad.NodeInfo[K, StrAddr] { + return r.closer +} + +type ( + // Request8 is a Request message that uses key.Key8 + Request8 = Request[key.Key8] + + // Response8 is a Response message that uses key.Key8 + Response8 = Response[key.Key8] + + // Request8 is a Request message that uses key.Key256 + Request256 = Request[key.Key256] + + // Response256 is a Response message that uses key.Key256 + Response256 = Response[key.Key256] +) + +var ( + _ kad.Request[key.Key8, StrAddr] = (*Request8)(nil) + _ kad.Response[key.Key8, StrAddr] = (*Response8)(nil) +) + +var ( + _ kad.Request[key.Key256, StrAddr] = (*Request256)(nil) + _ kad.Response[key.Key256, StrAddr] = (*Response256)(nil) +) diff --git a/v2/coord/internal/kadtest/rand.go b/v2/coord/internal/kadtest/rand.go new file mode 100644 index 00000000..7c4be431 --- /dev/null +++ b/v2/coord/internal/kadtest/rand.go @@ -0,0 +1,45 @@ +package kadtest + +import ( + "math/rand" + "strconv" + + "github.com/plprobelab/go-kademlia/key" +) + +var rng = rand.New(rand.NewSource(299792458)) + +// RandomKey returns a random 32-bit Kademlia key. +func RandomKey() key.Key32 { + return key.Key32(rng.Uint32()) +} + +// RandomKeyWithPrefix returns a 32-bit Kademlia key having a prefix equal to the bit pattern held in s and +// random following bits. A prefix of up to 32 bits is supported. +func RandomKeyWithPrefix(s string) key.Key32 { + kk := RandomKey() + if s == "" { + return kk + } + + prefixbits := len(s) + if prefixbits > 32 { + panic("RandomKeyWithPrefix: prefix too long") + } + n, err := strconv.ParseInt(s, 2, 32) + if err != nil { + panic("RandomKeyWithPrefix: " + err.Error()) + } + prefix := uint32(n) << (32 - prefixbits) + + v := uint32(kk) << prefixbits + v >>= prefixbits + + return key.Key32(v | prefix) +} + +// Key256WithLeadingBytes returns a 256-bit Kademlia key consisting of the given leading bytes padded by +// zero bytes to the end of the key. +func Key256WithLeadingBytes(in []byte) key.Key256 { + return key.NewKey256(append(in, make([]byte, 32-len(in))...)) +} diff --git a/v2/coord/internal/nettest/layouts.go b/v2/coord/internal/nettest/layouts.go new file mode 100644 index 00000000..66e53a27 --- /dev/null +++ b/v2/coord/internal/nettest/layouts.go @@ -0,0 +1,55 @@ +package nettest + +import ( + "context" + + "github.com/benbjohnson/clock" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing/simplert" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" +) + +// LinearTopology creates a network topology consisting of n nodes peered in a linear chain. +// The nodes are configured with routing tables that contain immediate neighbours. +// It returns the topology and the nodes ordered such that nodes[x] has nodes[x-1] and nodes[x+1] in its routing table +// The topology is not a ring: nodes[0] only has nodes[1] in its table and nodes[n-1] only has nodes[n-2] in its table. +// nodes[1] has nodes[0] and nodes[2] in its routing table. +// If n > 2 then the first and last nodes will not have one another in their routing tables. +func LinearTopology(n int, clk *clock.Mock) (*Topology[key.Key8, kadtest.StrAddr], []*Node[key.Key8, kadtest.StrAddr]) { + nodes := make([]*Node[key.Key8, kadtest.StrAddr], n) + + top := NewTopology[key.Key8, kadtest.StrAddr](clk) + + for i := range nodes { + id := kadtest.NewID(key.Key8(byte(i))) + nodes[i] = &Node[key.Key8, kadtest.StrAddr]{ + NodeInfo: kadtest.NewInfo(id, []kadtest.StrAddr{}), + Router: NewRouter[key.Key8](id, top), + RoutingTable: simplert.New[key.Key8, kad.NodeID[key.Key8]](id, 2), + } + } + + // Define the network topology, with default network links between every node + for i := 0; i < len(nodes); i++ { + for j := i + 1; j < len(nodes); j++ { + top.ConnectNodes(nodes[i], nodes[j]) + } + } + + // Connect nodes in a chain + for i := 0; i < len(nodes); i++ { + if i > 0 { + nodes[i].Router.AddNodeInfo(context.Background(), nodes[i-1].NodeInfo, 0) + nodes[i].RoutingTable.AddNode(nodes[i-1].NodeInfo.ID()) + } + if i < len(nodes)-1 { + nodes[i].Router.AddNodeInfo(context.Background(), nodes[i+1].NodeInfo, 0) + nodes[i].RoutingTable.AddNode(nodes[i+1].NodeInfo.ID()) + } + } + + return top, nodes +} diff --git a/v2/coord/internal/nettest/routing.go b/v2/coord/internal/nettest/routing.go new file mode 100644 index 00000000..87492210 --- /dev/null +++ b/v2/coord/internal/nettest/routing.go @@ -0,0 +1,144 @@ +package nettest + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/network/endpoint" + "github.com/plprobelab/go-kademlia/sim" +) + +// Link represents the route between two nodes. It allows latency and transport failures to be simulated. +type Link interface { + ConnLatency() time.Duration // the simulated time taken to return an error or successful outcome + DialLatency() time.Duration // the simulated time taken to connect to a node + DialErr() error // an error that should be returned on dial, nil if the dial is successful +} + +// DefaultLink is the default link used if none is specified. +// It has zero latency and always succeeds. +type DefaultLink struct{} + +func (l *DefaultLink) DialErr() error { return nil } +func (l *DefaultLink) ConnLatency() time.Duration { return 0 } +func (l *DefaultLink) DialLatency() time.Duration { return 0 } + +type Router[K kad.Key[K], A kad.Address[A]] struct { + self kad.NodeID[K] + top *Topology[K, A] + mu sync.Mutex // guards nodes + nodes map[string]*nodeStatus[K, A] +} + +type nodeStatus[K kad.Key[K], A kad.Address[A]] struct { + NodeInfo kad.NodeInfo[K, A] + Connectedness endpoint.Connectedness +} + +func NewRouter[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], top *Topology[K, A]) *Router[K, A] { + return &Router[K, A]{ + self: self, + top: top, + nodes: make(map[string]*nodeStatus[K, A]), + } +} + +func (r *Router[K, A]) NodeID() kad.NodeID[K] { + return r.self +} + +func (r *Router[K, A]) SendMessage(ctx context.Context, to kad.NodeInfo[K, A], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) { + if err := r.AddNodeInfo(ctx, to, 0); err != nil { + return nil, fmt.Errorf("add node info: %w", err) + } + + if err := r.Dial(ctx, to); err != nil { + return nil, fmt.Errorf("dial: %w", err) + } + + return r.top.RouteMessage(ctx, r.self, to.ID(), protoID, req) +} + +func (r *Router[K, A]) HandleMessage(ctx context.Context, n kad.NodeID[K], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) { + closer := make([]kad.NodeInfo[K, A], 0) + + r.mu.Lock() + for _, n := range r.nodes { + // only include self if it was the target of the request + if key.Equal(n.NodeInfo.ID().Key(), r.self.Key()) && !key.Equal(n.NodeInfo.ID().Key(), req.Target()) { + continue + } + closer = append(closer, n.NodeInfo) + } + r.mu.Unlock() + + resp := sim.NewResponse(closer) + return resp, nil +} + +func (r *Router[K, A]) Dial(ctx context.Context, to kad.NodeInfo[K, A]) error { + tkey := key.HexString(to.ID().Key()) + + r.mu.Lock() + status, ok := r.nodes[tkey] + r.mu.Unlock() + + if ok { + switch status.Connectedness { + case endpoint.Connected: + return nil + case endpoint.CanConnect: + if _, err := r.top.Dial(ctx, r.self, to.ID()); err != nil { + return err + } + + status.Connectedness = endpoint.Connected + r.mu.Lock() + r.nodes[tkey] = status + r.mu.Unlock() + return nil + } + } + return endpoint.ErrUnknownPeer +} + +func (r *Router[K, A]) AddNodeInfo(ctx context.Context, info kad.NodeInfo[K, A], ttl time.Duration) error { + key := key.HexString(info.ID().Key()) + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.nodes[key]; !ok { + r.nodes[key] = &nodeStatus[K, A]{ + NodeInfo: info, + Connectedness: endpoint.CanConnect, + } + } + return nil +} + +func (r *Router[K, A]) GetNodeInfo(ctx context.Context, id kad.NodeID[K]) (kad.NodeInfo[K, A], error) { + key := key.HexString(id.Key()) + r.mu.Lock() + defer r.mu.Unlock() + + status, ok := r.nodes[key] + if !ok { + return nil, fmt.Errorf("unknown node") + } + return status.NodeInfo, nil +} + +func (r *Router[K, A]) GetClosestNodes(ctx context.Context, to kad.NodeInfo[K, A], target K) ([]kad.NodeInfo[K, A], error) { + protoID := address.ProtocolID("/test/1.0.0") + + resp, err := r.SendMessage(ctx, to, protoID, sim.NewRequest[K, A](target)) + if err != nil { + return nil, err + } + return resp.CloserNodes(), nil +} diff --git a/v2/coord/internal/nettest/topology.go b/v2/coord/internal/nettest/topology.go new file mode 100644 index 00000000..eb8710f2 --- /dev/null +++ b/v2/coord/internal/nettest/topology.go @@ -0,0 +1,145 @@ +package nettest + +import ( + "context" + "fmt" + + "github.com/benbjohnson/clock" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/routing" +) + +type Node[K kad.Key[K], A kad.Address[A]] struct { + NodeInfo kad.NodeInfo[K, A] + Router *Router[K, A] + RoutingTable routing.RoutingTableCpl[K, kad.NodeID[K]] +} + +type Topology[K kad.Key[K], A kad.Address[A]] struct { + clk *clock.Mock + links map[string]Link + nodes []*Node[K, A] + nodeIndex map[string]*Node[K, A] + routers map[string]*Router[K, A] +} + +func NewTopology[K kad.Key[K], A kad.Address[A]](clk *clock.Mock) *Topology[K, A] { + return &Topology[K, A]{ + clk: clk, + links: make(map[string]Link), + nodeIndex: make(map[string]*Node[K, A]), + routers: make(map[string]*Router[K, A]), + } +} + +func (t *Topology[K, A]) Nodes() []*Node[K, A] { + return t.nodes +} + +func (t *Topology[K, A]) ConnectNodes(a *Node[K, A], b *Node[K, A]) { + t.ConnectNodesWithRoute(a, b, &DefaultLink{}) +} + +func (t *Topology[K, A]) ConnectNodesWithRoute(a *Node[K, A], b *Node[K, A], l Link) { + akey := key.HexString(a.NodeInfo.ID().Key()) + if _, exists := t.nodeIndex[akey]; !exists { + t.nodeIndex[akey] = a + t.nodes = append(t.nodes, a) + t.routers[akey] = a.Router + } + + bkey := key.HexString(b.NodeInfo.ID().Key()) + if _, exists := t.nodeIndex[bkey]; !exists { + t.nodeIndex[bkey] = b + t.nodes = append(t.nodes, b) + t.routers[bkey] = b.Router + } + + atob := fmt.Sprintf("%s->%s", akey, bkey) + t.links[atob] = l + + // symmetrical routing assumed + btoa := fmt.Sprintf("%s->%s", bkey, akey) + t.links[btoa] = l +} + +func (t *Topology[K, A]) findRoute(ctx context.Context, from kad.NodeID[K], to kad.NodeID[K]) (Link, error) { + fkey := key.HexString(from.Key()) + tkey := key.HexString(to.Key()) + + key := fmt.Sprintf("%s->%s", fkey, tkey) + + route, ok := t.links[key] + if !ok { + return nil, fmt.Errorf("no route to node") + } + + return route, nil +} + +func (t *Topology[K, A]) Dial(ctx context.Context, from kad.NodeID[K], to kad.NodeID[K]) (kad.NodeInfo[K, A], error) { + if key.Equal(from.Key(), to.Key()) { + tkey := key.HexString(to.Key()) + node, ok := t.nodeIndex[tkey] + if !ok { + return nil, fmt.Errorf("unknown node") + } + + return node.NodeInfo, nil + } + + route, err := t.findRoute(ctx, from, to) + if err != nil { + return nil, fmt.Errorf("find route: %w", err) + } + + latency := route.DialLatency() + if latency > 0 { + t.clk.Sleep(latency) + } + + if err := route.DialErr(); err != nil { + return nil, err + } + + tkey := key.HexString(to.Key()) + node, ok := t.nodeIndex[tkey] + if !ok { + return nil, fmt.Errorf("unknown node") + } + + return node.NodeInfo, nil +} + +func (t *Topology[K, A]) RouteMessage(ctx context.Context, from kad.NodeID[K], to kad.NodeID[K], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) { + if key.Equal(from.Key(), to.Key()) { + tkey := key.HexString(to.Key()) + node, ok := t.nodeIndex[tkey] + if !ok { + return nil, fmt.Errorf("unknown node") + } + + return node.Router.HandleMessage(ctx, from, protoID, req) + } + + route, err := t.findRoute(ctx, from, to) + if err != nil { + return nil, fmt.Errorf("find route: %w", err) + } + + latency := route.ConnLatency() + if latency > 0 { + t.clk.Sleep(latency) + } + + tkey := key.HexString(to.Key()) + node, ok := t.nodeIndex[tkey] + if !ok { + return nil, fmt.Errorf("no route to node") + } + + return node.Router.HandleMessage(ctx, from, protoID, req) +} diff --git a/v2/coord/network.go b/v2/coord/network.go new file mode 100644 index 00000000..e641819a --- /dev/null +++ b/v2/coord/network.go @@ -0,0 +1,263 @@ +package kademlia + +import ( + "context" + "fmt" + "sync" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/query" + "golang.org/x/exp/slog" +) + +type NetworkBehaviour[K kad.Key[K], A kad.Address[A]] struct { + // rtr is the message router used to send messages + rtr Router[K, A] + + nodeHandlersMu sync.Mutex + nodeHandlers map[string]*NodeHandler[K, A] // TODO: garbage collect node handlers + + pendingMu sync.Mutex + pending []DhtEvent + ready chan struct{} + + logger *slog.Logger +} + +func NewNetworkBehaviour[K kad.Key[K], A kad.Address[A]](rtr Router[K, A], logger *slog.Logger) *NetworkBehaviour[K, A] { + b := &NetworkBehaviour[K, A]{ + rtr: rtr, + nodeHandlers: make(map[string]*NodeHandler[K, A]), + ready: make(chan struct{}, 1), + logger: logger, + } + + return b +} + +func (b *NetworkBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { + b.pendingMu.Lock() + defer b.pendingMu.Unlock() + + switch ev := ev.(type) { + case *EventOutboundGetClosestNodes[K, A]: + nodeKey := key.HexString(ev.To.ID().Key()) + b.nodeHandlersMu.Lock() + nh, ok := b.nodeHandlers[nodeKey] + if !ok { + nh = NewNodeHandler(ev.To, b.rtr, b.logger) + b.nodeHandlers[nodeKey] = nh + } + b.nodeHandlersMu.Unlock() + nh.Notify(ctx, ev) + default: + panic(fmt.Sprintf("unexpected dht event: %T", ev)) + } + + if len(b.pending) > 0 { + select { + case b.ready <- struct{}{}: + default: + } + } +} + +func (b *NetworkBehaviour[K, A]) Ready() <-chan struct{} { + return b.ready +} + +func (b *NetworkBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { + // No inbound work can be done until Perform is complete + b.pendingMu.Lock() + defer b.pendingMu.Unlock() + + // drain queued events. + if len(b.pending) > 0 { + var ev DhtEvent + ev, b.pending = b.pending[0], b.pending[1:] + + if len(b.pending) > 0 { + select { + case b.ready <- struct{}{}: + default: + } + } + return ev, true + } + + return nil, false +} + +func (b *NetworkBehaviour[K, A]) getNodeHandler(ctx context.Context, id kad.NodeID[K]) (*NodeHandler[K, A], error) { + nodeKey := key.HexString(id.Key()) + b.nodeHandlersMu.Lock() + nh, ok := b.nodeHandlers[nodeKey] + if !ok || len(nh.Addresses()) == 0 { + info, err := b.rtr.GetNodeInfo(ctx, id) + if err != nil { + return nil, err + } + nh = NewNodeHandler(info, b.rtr, b.logger) + b.nodeHandlers[nodeKey] = nh + } + b.nodeHandlersMu.Unlock() + return nh, nil +} + +type NodeHandler[K kad.Key[K], A kad.Address[A]] struct { + self kad.NodeInfo[K, A] + rtr Router[K, A] + queue *WorkQueue[NodeHandlerRequest] + logger *slog.Logger +} + +func NewNodeHandler[K kad.Key[K], A kad.Address[A]](self kad.NodeInfo[K, A], rtr Router[K, A], logger *slog.Logger) *NodeHandler[K, A] { + h := &NodeHandler[K, A]{ + self: self, + rtr: rtr, + logger: logger, + } + + h.queue = NewWorkQueue(h.send) + + return h +} + +func (h *NodeHandler[K, A]) Notify(ctx context.Context, ev NodeHandlerRequest) { + h.queue.Enqueue(ctx, ev) +} + +func (h *NodeHandler[K, A]) send(ctx context.Context, ev NodeHandlerRequest) bool { + switch cmd := ev.(type) { + case *EventOutboundGetClosestNodes[K, A]: + if cmd.Notify == nil { + break + } + nodes, err := h.rtr.GetClosestNodes(ctx, h.self, cmd.Target) + if err != nil { + cmd.Notify.Notify(ctx, &EventGetClosestNodesFailure[K, A]{ + QueryID: cmd.QueryID, + To: h.self, + Target: cmd.Target, + Err: fmt.Errorf("send: %w", err), + }) + return false + } + + cmd.Notify.Notify(ctx, &EventGetClosestNodesSuccess[K, A]{ + QueryID: cmd.QueryID, + To: h.self, + Target: cmd.Target, + ClosestNodes: nodes, + }) + default: + panic(fmt.Sprintf("unexpected command type: %T", cmd)) + } + + return false +} + +func (h *NodeHandler[K, A]) ID() kad.NodeID[K] { + return h.self.ID() +} + +func (h *NodeHandler[K, A]) Addresses() []A { + return h.self.Addresses() +} + +// GetClosestNodes requests the n closest nodes to the key from the node's local routing table. +// The node may return fewer nodes than requested. +func (h *NodeHandler[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([]Node[K, A], error) { + w := NewWaiter[DhtEvent]() + + ev := &EventOutboundGetClosestNodes[K, A]{ + QueryID: query.QueryID(key.HexString(k)), + To: h.self, + Target: k, + Notify: w, + } + + h.queue.Enqueue(ctx, ev) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case we := <-w.Chan(): + + switch res := we.Event.(type) { + case *EventGetClosestNodesSuccess[K, A]: + nodes := make([]Node[K, A], 0, len(res.ClosestNodes)) + for _, info := range res.ClosestNodes { + // TODO use a global registry of node handlers + nodes = append(nodes, NewNodeHandler(info, h.rtr, h.logger)) + n-- + if n == 0 { + break + } + } + return nodes, nil + + case *EventGetClosestNodesFailure[K, A]: + return nil, res.Err + default: + panic(fmt.Sprintf("unexpected node handler event: %T", ev)) + } + } +} + +// GetValue requests that the node return any value associated with the supplied key. +// If the node does not have a value for the key it returns ErrValueNotFound. +func (h *NodeHandler[K, A]) GetValue(ctx context.Context, key K) (Value[K], error) { + panic("not implemented") +} + +// PutValue requests that the node stores a value to be associated with the supplied key. +// If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. +func (h *NodeHandler[K, A]) PutValue(ctx context.Context, r Value[K], q int) error { + panic("not implemented") +} + +type NodeAddr[K kad.Key[K], A kad.Address[A]] struct { + id kad.NodeID[K] + addresses []A +} + +func NewNodeAddr[K kad.Key[K], A kad.Address[A]](id kad.NodeID[K], addresses []A) *NodeAddr[K, A] { + return &NodeAddr[K, A]{ + id: id, + addresses: addresses, + } +} + +func (n *NodeAddr[K, A]) ID() kad.NodeID[K] { + return n.id +} + +func (n *NodeAddr[K, A]) Addresses() []A { + return n.addresses +} + +func ClosestNodesFakeResponse[K kad.Key[K], A kad.Address[A]](key K, nodes []kad.NodeInfo[K, A]) kad.Response[K, A] { + return &fakeMessage[K, A]{ + key: key, + nodes: nodes, + } +} + +type fakeMessage[K kad.Key[K], A kad.Address[A]] struct { + key K + nodes []kad.NodeInfo[K, A] +} + +func (r fakeMessage[K, A]) Target() K { + return r.key +} + +func (r fakeMessage[K, A]) CloserNodes() []kad.NodeInfo[K, A] { + return r.nodes +} + +func (r fakeMessage[K, A]) EmptyResponse() kad.Response[K, A] { + return &fakeMessage[K, A]{} +} diff --git a/v2/coord/network_test.go b/v2/coord/network_test.go new file mode 100644 index 00000000..714c2a35 --- /dev/null +++ b/v2/coord/network_test.go @@ -0,0 +1,34 @@ +package kademlia + +import ( + "testing" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/key" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" +) + +var _ Router[key.Key8, kadtest.StrAddr] = (*nettest.Router[key.Key8, kadtest.StrAddr])(nil) + +// TODO: this is just a basic is-it-working test that needs to be improved +func TestGetClosestNodes(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + h := NewNodeHandler[key.Key8, kadtest.StrAddr](nodes[1].NodeInfo, nodes[1].Router, slog.Default()) + + // node 1 has node 2 in its routing table so it will return it along with node 0 + found, err := h.GetClosestNodes(ctx, nodes[2].NodeInfo.ID().Key(), 2) + require.NoError(t, err) + for _, f := range found { + t.Logf("found node %v", f.ID()) + } + require.Equal(t, 2, len(found)) +} diff --git a/v2/coord/query.go b/v2/coord/query.go new file mode 100644 index 00000000..a4f5f268 --- /dev/null +++ b/v2/coord/query.go @@ -0,0 +1,179 @@ +package kademlia + +import ( + "context" + "fmt" + "sync" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/query" + "github.com/plprobelab/go-kademlia/util" + "golang.org/x/exp/slog" +) + +type PooledQueryBehaviour[K kad.Key[K], A kad.Address[A]] struct { + pool *query.Pool[K, A] + waiters map[query.QueryID]NotifyCloser[DhtEvent] + + pendingMu sync.Mutex + pending []DhtEvent + ready chan struct{} + + logger *slog.Logger +} + +func NewPooledQueryBehaviour[K kad.Key[K], A kad.Address[A]](pool *query.Pool[K, A], logger *slog.Logger) *PooledQueryBehaviour[K, A] { + h := &PooledQueryBehaviour[K, A]{ + pool: pool, + waiters: make(map[query.QueryID]NotifyCloser[DhtEvent]), + ready: make(chan struct{}, 1), + logger: logger, + } + return h +} + +func (r *PooledQueryBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { + ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Notify") + defer span.End() + + r.pendingMu.Lock() + defer r.pendingMu.Unlock() + + var cmd query.PoolEvent + switch ev := ev.(type) { + case *EventStartQuery[K, A]: + cmd = &query.EventPoolAddQuery[K, A]{ + QueryID: ev.QueryID, + Target: ev.Target, + ProtocolID: ev.ProtocolID, + Message: ev.Message, + KnownClosestNodes: ev.KnownClosestNodes, + } + if ev.Notify != nil { + r.waiters[ev.QueryID] = ev.Notify + } + + case *EventStopQuery: + cmd = &query.EventPoolStopQuery{ + QueryID: ev.QueryID, + } + + case *EventGetClosestNodesSuccess[K, A]: + for _, info := range ev.ClosestNodes { + // TODO: do this after advancing pool + r.pending = append(r.pending, &EventDhtAddNodeInfo[K, A]{ + NodeInfo: info, + }) + } + waiter, ok := r.waiters[ev.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryProgressed[K, A]{ + NodeID: ev.To.ID(), + QueryID: ev.QueryID, + Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + // Stats: stats, + }) + } + cmd = &query.EventPoolMessageResponse[K, A]{ + NodeID: ev.To.ID(), + QueryID: ev.QueryID, + Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + } + case *EventGetClosestNodesFailure[K, A]: + cmd = &query.EventPoolMessageFailure[K]{ + NodeID: ev.To.ID(), + QueryID: ev.QueryID, + Error: ev.Err, + } + default: + panic(fmt.Sprintf("unexpected dht event: %T", ev)) + } + + // attempt to advance the query pool + ev, ok := r.advancePool(ctx, cmd) + if ok { + r.pending = append(r.pending, ev) + } + if len(r.pending) > 0 { + select { + case r.ready <- struct{}{}: + default: + } + } +} + +func (r *PooledQueryBehaviour[K, A]) Ready() <-chan struct{} { + return r.ready +} + +func (r *PooledQueryBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { + ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Perform") + defer span.End() + + // No inbound work can be done until Perform is complete + r.pendingMu.Lock() + defer r.pendingMu.Unlock() + + for { + // drain queued events first. + if len(r.pending) > 0 { + var ev DhtEvent + ev, r.pending = r.pending[0], r.pending[1:] + + if len(r.pending) > 0 { + select { + case r.ready <- struct{}{}: + default: + } + } + return ev, true + } + + // attempt to advance the query pool + ev, ok := r.advancePool(ctx, &query.EventPoolPoll{}) + if ok { + return ev, true + } + + if len(r.pending) == 0 { + return nil, false + } + } +} + +func (r *PooledQueryBehaviour[K, A]) advancePool(ctx context.Context, ev query.PoolEvent) (DhtEvent, bool) { + ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.advancePool") + defer span.End() + + pstate := r.pool.Advance(ctx, ev) + switch st := pstate.(type) { + case *query.StatePoolQueryMessage[K, A]: + return &EventOutboundGetClosestNodes[K, A]{ + QueryID: st.QueryID, + To: NewNodeAddr[K, A](st.NodeID, nil), + Target: st.Message.Target(), + Notify: r, + }, true + case *query.StatePoolWaitingAtCapacity: + // nothing to do except wait for message response or timeout + case *query.StatePoolWaitingWithCapacity: + // nothing to do except wait for message response or timeout + case *query.StatePoolQueryFinished: + waiter, ok := r.waiters[st.QueryID] + if ok { + waiter.Notify(ctx, &EventQueryFinished{ + QueryID: st.QueryID, + Stats: st.Stats, + }) + waiter.Close() + } + case *query.StatePoolQueryTimeout: + // TODO + case *query.StatePoolIdle: + // nothing to do + default: + panic(fmt.Sprintf("unexpected pool state: %T", st)) + } + + return nil, false +} diff --git a/v2/coord/routing.go b/v2/coord/routing.go new file mode 100644 index 00000000..545ca594 --- /dev/null +++ b/v2/coord/routing.go @@ -0,0 +1,356 @@ +package kademlia + +import ( + "context" + "fmt" + "sync" + + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing" + "github.com/plprobelab/go-kademlia/util" + "go.opentelemetry.io/otel/attribute" + "golang.org/x/exp/slog" +) + +type RoutingBehaviour[K kad.Key[K], A kad.Address[A]] struct { + // self is the node id of the system the dht is running on + self kad.NodeID[K] + // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table + bootstrap SM[routing.BootstrapEvent, routing.BootstrapState] + + // include is the inclusion state machine, responsible for vetting nodes before including them in the routing table + include SM[routing.IncludeEvent, routing.IncludeState] + + // probe is the node probing state machine, responsible for periodically checking connectivity of nodes in the routing table + probe SM[routing.ProbeEvent, routing.ProbeState] + + pendingMu sync.Mutex + pending []DhtEvent + ready chan struct{} + + logger *slog.Logger +} + +func NewRoutingBehaviour[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger) *RoutingBehaviour[K, A] { + r := &RoutingBehaviour[K, A]{ + self: self, + bootstrap: bootstrap, + include: include, + probe: probe, + ready: make(chan struct{}, 1), + logger: logger, + } + return r +} + +func (r *RoutingBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { + ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Notify") + defer span.End() + + r.pendingMu.Lock() + defer r.pendingMu.Unlock() + r.notify(ctx, ev) +} + +// notify must only be called while r.pendingMu is held +func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { + ctx, span := util.StartSpan(ctx, "RoutingBehaviour.notify") + defer span.End() + switch ev := ev.(type) { + case *EventDhtStartBootstrap[K, A]: + span.SetAttributes(attribute.String("event", "EventDhtStartBootstrap")) + cmd := &routing.EventBootstrapStart[K, A]{ + ProtocolID: ev.ProtocolID, + Message: ev.Message, + KnownClosestNodes: ev.SeedNodes, + } + // attempt to advance the bootstrap + next, ok := r.advanceBootstrap(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case *EventDhtAddNodeInfo[K, A]: + span.SetAttributes(attribute.String("event", "EventDhtAddNodeInfo")) + // Ignore self + if key.Equal(ev.NodeInfo.ID().Key(), r.self.Key()) { + break + } + cmd := &routing.EventIncludeAddCandidate[K, A]{ + NodeInfo: ev.NodeInfo, + } + // attempt to advance the include + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case *EventRoutingUpdated[K, A]: + span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) + cmd := &routing.EventProbeAdd[K]{ + NodeID: ev.NodeInfo.ID(), + } + // attempt to advance the probe state machine + next, ok := r.advanceProbe(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case *EventGetClosestNodesSuccess[K, A]: + span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", string(ev.To.ID().String()))) + switch ev.QueryID { + case "bootstrap": + for _, info := range ev.ClosestNodes { + // TODO: do this after advancing bootstrap + r.pending = append(r.pending, &EventDhtAddNodeInfo[K, A]{ + NodeInfo: info, + }) + } + cmd := &routing.EventBootstrapMessageResponse[K, A]{ + NodeID: ev.To.ID(), + Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + } + // attempt to advance the bootstrap + next, ok := r.advanceBootstrap(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case "include": + cmd := &routing.EventIncludeMessageResponse[K, A]{ + NodeInfo: ev.To, + Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + } + // attempt to advance the include + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + case "probe": + cmd := &routing.EventProbeMessageResponse[K, A]{ + NodeInfo: ev.To, + Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + } + // attempt to advance the probe state machine + next, ok := r.advanceProbe(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + default: + panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) + } + case *EventGetClosestNodesFailure[K, A]: + span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", string(ev.To.ID().String()))) + span.RecordError(ev.Err) + switch ev.QueryID { + case "bootstrap": + cmd := &routing.EventBootstrapMessageFailure[K]{ + NodeID: ev.To.ID(), + Error: ev.Err, + } + // attempt to advance the bootstrap + next, ok := r.advanceBootstrap(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + case "include": + cmd := &routing.EventIncludeMessageFailure[K, A]{ + NodeInfo: ev.To, + Error: ev.Err, + } + // attempt to advance the include state machine + next, ok := r.advanceInclude(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + case "probe": + cmd := &routing.EventProbeMessageFailure[K, A]{ + NodeInfo: ev.To, + Error: ev.Err, + } + // attempt to advance the probe state machine + next, ok := r.advanceProbe(ctx, cmd) + if ok { + r.pending = append(r.pending, next) + } + + default: + panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) + } + default: + panic(fmt.Sprintf("unexpected dht event: %T", ev)) + } + + if len(r.pending) > 0 { + select { + case r.ready <- struct{}{}: + default: + } + } +} + +func (r *RoutingBehaviour[K, A]) Ready() <-chan struct{} { + return r.ready +} + +func (r *RoutingBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { + ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Perform") + defer span.End() + + // No inbound work can be done until Perform is complete + r.pendingMu.Lock() + defer r.pendingMu.Unlock() + + for { + // drain queued events first. + if len(r.pending) > 0 { + var ev DhtEvent + ev, r.pending = r.pending[0], r.pending[1:] + + if len(r.pending) > 0 { + select { + case r.ready <- struct{}{}: + default: + } + } + return ev, true + } + + // poll the child state machines in priority order to give each an opportunity to perform work + + ev, ok := r.advanceBootstrap(ctx, &routing.EventBootstrapPoll{}) + if ok { + return ev, true + } + + ev, ok = r.advanceInclude(ctx, &routing.EventIncludePoll{}) + if ok { + return ev, true + } + + ev, ok = r.advanceProbe(ctx, &routing.EventProbePoll{}) + if ok { + return ev, true + } + + // finally check if any pending events were accumulated in the meantime + if len(r.pending) == 0 { + return nil, false + } + } +} + +func (r *RoutingBehaviour[K, A]) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (DhtEvent, bool) { + ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceBootstrap") + defer span.End() + bstate := r.bootstrap.Advance(ctx, ev) + switch st := bstate.(type) { + + case *routing.StateBootstrapMessage[K, A]: + return &EventOutboundGetClosestNodes[K, A]{ + QueryID: "bootstrap", + To: NewNodeAddr[K, A](st.NodeID, nil), + Target: st.Message.Target(), + Notify: r, + }, true + + case *routing.StateBootstrapWaiting: + // bootstrap waiting for a message response, nothing to do + case *routing.StateBootstrapFinished: + return &EventBootstrapFinished{ + Stats: st.Stats, + }, true + case *routing.StateBootstrapIdle: + // bootstrap not running, nothing to do + default: + panic(fmt.Sprintf("unexpected bootstrap state: %T", st)) + } + + return nil, false +} + +func (r *RoutingBehaviour[K, A]) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (DhtEvent, bool) { + ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceInclude") + defer span.End() + istate := r.include.Advance(ctx, ev) + switch st := istate.(type) { + case *routing.StateIncludeFindNodeMessage[K, A]: + // include wants to send a find node message to a node + return &EventOutboundGetClosestNodes[K, A]{ + QueryID: "include", + To: st.NodeInfo, + Target: st.NodeInfo.ID().Key(), + Notify: r, + }, true + + case *routing.StateIncludeRoutingUpdated[K, A]: + // a node has been included in the routing table + + // notify other routing state machines that there is a new node in the routing table + r.notify(ctx, &EventRoutingUpdated[K, A]{ + NodeInfo: st.NodeInfo, + }) + + // return the event to notify outwards too + return &EventRoutingUpdated[K, A]{ + NodeInfo: st.NodeInfo, + }, true + case *routing.StateIncludeWaitingAtCapacity: + // nothing to do except wait for message response or timeout + case *routing.StateIncludeWaitingWithCapacity: + // nothing to do except wait for message response or timeout + case *routing.StateIncludeWaitingFull: + // nothing to do except wait for message response or timeout + case *routing.StateIncludeIdle: + // nothing to do except wait for message response or timeout + default: + panic(fmt.Sprintf("unexpected include state: %T", st)) + } + + return nil, false +} + +func (r *RoutingBehaviour[K, A]) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (DhtEvent, bool) { + ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceProbe") + defer span.End() + st := r.probe.Advance(ctx, ev) + switch st := st.(type) { + case *routing.StateProbeConnectivityCheck[K]: + // include wants to send a find node message to a node + return &EventOutboundGetClosestNodes[K, A]{ + QueryID: "probe", + To: unaddressedNodeInfo[K, A]{NodeID: st.NodeID}, + Target: st.NodeID.Key(), + Notify: r, + }, true + case *routing.StateProbeNodeFailure[K]: + // a node has failed a connectivity check been removed from the routing table and the probe list + // add the node to the inclusion list for a second chance + r.notify(ctx, &EventDhtAddNodeInfo[K, A]{ + NodeInfo: unaddressedNodeInfo[K, A]{NodeID: st.NodeID}, + }) + case *routing.StateProbeWaitingAtCapacity: + // the probe state machine is waiting for responses for checks and the maximum number of concurrent checks has been reached. + // nothing to do except wait for message response or timeout + case *routing.StateProbeWaitingWithCapacity: + // the probe state machine is waiting for responses for checks but has capacity to perform more + // nothing to do except wait for message response or timeout + case *routing.StateProbeIdle: + // the probe state machine is not running any checks. + // nothing to do except wait for message response or timeout + default: + panic(fmt.Sprintf("unexpected include state: %T", st)) + } + + return nil, false +} + +type unaddressedNodeInfo[K kad.Key[K], A kad.Address[A]] struct { + NodeID kad.NodeID[K] +} + +func (u unaddressedNodeInfo[K, A]) ID() kad.NodeID[K] { return u.NodeID } +func (u unaddressedNodeInfo[K, A]) Addresses() []A { return nil } diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go new file mode 100644 index 00000000..19cb12ea --- /dev/null +++ b/v2/coord/routing_test.go @@ -0,0 +1,300 @@ +package kademlia + +import ( + "errors" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/network/address" + "github.com/plprobelab/go-kademlia/query" + "github.com/plprobelab/go-kademlia/routing" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" +) + +func TestRoutingStartBootstrapSendsEvent(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + + // records the event passed to bootstrap + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + ev := &EventDhtStartBootstrap[key.Key8, kadtest.StrAddr]{ + ProtocolID: address.ProtocolID("test"), + Message: kadtest.NewRequest("1", self.Key()), + SeedNodes: []kad.NodeID[key.Key8]{nodes[1].NodeInfo.ID()}, + } + + routingBehaviour.Notify(ctx, ev) + + // the event that should be passed to the bootstrap state machine + expected := &routing.EventBootstrapStart[key.Key8, kadtest.StrAddr]{ + ProtocolID: ev.ProtocolID, + Message: ev.Message, + KnownClosestNodes: ev.SeedNodes, + } + require.Equal(t, expected, bootstrap.Received) +} + +func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + + // records the event passed to bootstrap + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + ev := &EventGetClosestNodesSuccess[key.Key8, kadtest.StrAddr]{ + QueryID: query.QueryID("bootstrap"), + To: nodes[1].NodeInfo, + Target: nodes[0].NodeInfo.ID().Key(), + ClosestNodes: []kad.NodeInfo[key.Key8, kadtest.StrAddr]{nodes[2].NodeInfo}, + } + + routingBehaviour.Notify(ctx, ev) + + // bootstrap should receive message response event + require.IsType(t, &routing.EventBootstrapMessageResponse[key.Key8, kadtest.StrAddr]{}, bootstrap.Received) + + rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[key.Key8, kadtest.StrAddr]) + require.Equal(t, nodes[1].NodeInfo.ID(), rev.NodeID) + require.Equal(t, ev.ClosestNodes, rev.Response.CloserNodes()) +} + +func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + + // records the event passed to bootstrap + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + failure := errors.New("failed") + ev := &EventGetClosestNodesFailure[key.Key8, kadtest.StrAddr]{ + QueryID: query.QueryID("bootstrap"), + To: nodes[1].NodeInfo, + Target: nodes[0].NodeInfo.ID().Key(), + Err: failure, + } + + routingBehaviour.Notify(ctx, ev) + + // bootstrap should receive message response event + require.IsType(t, &routing.EventBootstrapMessageFailure[key.Key8]{}, bootstrap.Received) + + rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[key.Key8]) + require.Equal(t, nodes[1].NodeInfo.ID(), rev.NodeID) + require.Equal(t, failure, rev.Error) +} + +func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + + // records the event passed to include + include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) + + bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + ev := &EventDhtAddNodeInfo[key.Key8, kadtest.StrAddr]{ + NodeInfo: nodes[2].NodeInfo, + } + + routingBehaviour.Notify(ctx, ev) + + // the event that should be passed to the include state machine + expected := &routing.EventIncludeAddCandidate[key.Key8, kadtest.StrAddr]{ + NodeInfo: ev.NodeInfo, + } + require.Equal(t, expected, include.Received) +} + +func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + + // records the event passed to include + include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) + + bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + ev := &EventGetClosestNodesSuccess[key.Key8, kadtest.StrAddr]{ + QueryID: query.QueryID("include"), + To: nodes[1].NodeInfo, + Target: nodes[0].NodeInfo.ID().Key(), + ClosestNodes: []kad.NodeInfo[key.Key8, kadtest.StrAddr]{nodes[2].NodeInfo}, + } + + routingBehaviour.Notify(ctx, ev) + + // include should receive message response event + require.IsType(t, &routing.EventIncludeMessageResponse[key.Key8, kadtest.StrAddr]{}, include.Received) + + rev := include.Received.(*routing.EventIncludeMessageResponse[key.Key8, kadtest.StrAddr]) + require.Equal(t, nodes[1].NodeInfo, rev.NodeInfo) + require.Equal(t, ev.ClosestNodes, rev.Response.CloserNodes()) +} + +func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + + // records the event passed to include + include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) + + bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) + probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + failure := errors.New("failed") + ev := &EventGetClosestNodesFailure[key.Key8, kadtest.StrAddr]{ + QueryID: query.QueryID("include"), + To: nodes[1].NodeInfo, + Target: nodes[0].NodeInfo.ID().Key(), + Err: failure, + } + + routingBehaviour.Notify(ctx, ev) + + // include should receive message response event + require.IsType(t, &routing.EventIncludeMessageFailure[key.Key8, kadtest.StrAddr]{}, include.Received) + + rev := include.Received.(*routing.EventIncludeMessageFailure[key.Key8, kadtest.StrAddr]) + require.Equal(t, nodes[1].NodeInfo, rev.NodeInfo) + require.Equal(t, failure, rev.Error) +} + +func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + clk := clock.NewMock() + _, nodes := nettest.LinearTopology(4, clk) + + self := nodes[0].NodeInfo.ID() + rt := nodes[0].RoutingTable + + includeCfg := routing.DefaultIncludeConfig() + includeCfg.Clock = clk + include, err := routing.NewInclude[key.Key8, kadtest.StrAddr](rt, includeCfg) + require.NoError(t, err) + + probeCfg := routing.DefaultProbeConfig() + probeCfg.Clock = clk + probeCfg.CheckInterval = 5 * time.Minute + probe, err := routing.NewProbe[key.Key8, kadtest.StrAddr](rt, probeCfg) + require.NoError(t, err) + + // ensure bootstrap is always idle + bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) + + routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + + // a new node to be included + candidate := nodes[len(nodes)-1].NodeInfo + + // the routing table should not contain the node yet + _, intable := rt.GetNode(candidate.ID().Key()) + require.False(t, intable) + + // notify that there is a new node to be included + routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo[key.Key8, kadtest.StrAddr]{ + NodeInfo: candidate, + }) + + // collect the result of the notify + dev, ok := routingBehaviour.Perform(ctx) + require.True(t, ok) + + // include should be asking to send a message to the node + require.IsType(t, &EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]{}, dev) + + oev := dev.(*EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]) + + // advance time a little + clk.Add(time.Second) + + // notify a successful response back (best to use the notify included in the event even though it will be the behaviour's Notify method) + oev.Notify.Notify(ctx, &EventGetClosestNodesSuccess[key.Key8, kadtest.StrAddr]{ + QueryID: oev.QueryID, + To: oev.To, + Target: oev.Target, + ClosestNodes: []kad.NodeInfo[key.Key8, kadtest.StrAddr]{nodes[1].NodeInfo}, // must include one for include check to pass + }) + + // the routing table should now contain the node + _, intable = rt.GetNode(candidate.ID().Key()) + require.True(t, intable) + + // routing update event should be emitted from the include state machine + dev, ok = routingBehaviour.Perform(ctx) + require.True(t, ok) + require.IsType(t, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}, dev) + + // advance time past the probe check interval + clk.Add(probeCfg.CheckInterval) + + // routing update event should be emitted from the include state machine + dev, ok = routingBehaviour.Perform(ctx) + require.True(t, ok) + require.IsType(t, &EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]{}, dev) + + // confirm that the message is for the correct node + oev = dev.(*EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]) + require.Equal(t, query.QueryID("probe"), oev.QueryID) + require.Equal(t, candidate.ID(), oev.To.ID()) + require.Equal(t, candidate.ID().Key(), oev.Target) +} From 55fac36b30d17adc1954b7ef9d789916a5e1373d Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 4 Sep 2023 13:59:09 +0100 Subject: [PATCH 09/26] Remove generics from zikade imported code --- v2/coord/behaviour.go | 2 +- v2/coord/behaviour_test.go | 28 +--- v2/coord/conversion.go | 58 +++++++ v2/coord/{dht.go => coordinator.go} | 98 +++++------ v2/coord/{dht_test.go => coordinator_test.go} | 83 +++++----- v2/coord/coretypes.go | 36 +++-- v2/coord/event.go | 85 +++++----- v2/coord/internal/kadtest/bench.go | 11 -- v2/coord/internal/kadtest/bench_pre120.go | 10 -- v2/coord/internal/kadtest/kadtypes.go | 107 ------------ v2/coord/internal/kadtest/message.go | 81 ---------- v2/coord/internal/kadtest/rand.go | 45 ------ v2/coord/internal/nettest/layouts.go | 38 +++-- v2/coord/internal/nettest/routing.go | 102 +++++++----- v2/coord/internal/nettest/topology.go | 73 ++++----- v2/coord/network.go | 143 ++++++++-------- v2/coord/network_test.go | 13 +- v2/coord/query.go | 58 +++---- v2/coord/routing.go | 126 +++++++-------- v2/coord/routing_test.go | 152 ++++++++++-------- v2/kadt/kadt.go | 5 + 21 files changed, 593 insertions(+), 761 deletions(-) create mode 100644 v2/coord/conversion.go rename v2/coord/{dht.go => coordinator.go} (77%) rename v2/coord/{dht_test.go => coordinator_test.go} (74%) delete mode 100644 v2/coord/internal/kadtest/bench.go delete mode 100644 v2/coord/internal/kadtest/bench_pre120.go delete mode 100644 v2/coord/internal/kadtest/kadtypes.go delete mode 100644 v2/coord/internal/kadtest/message.go delete mode 100644 v2/coord/internal/kadtest/rand.go diff --git a/v2/coord/behaviour.go b/v2/coord/behaviour.go index b8cdc40c..c2574517 100644 --- a/v2/coord/behaviour.go +++ b/v2/coord/behaviour.go @@ -1,4 +1,4 @@ -package kademlia +package coord import ( "context" diff --git a/v2/coord/behaviour_test.go b/v2/coord/behaviour_test.go index 0efd04a7..20464c30 100644 --- a/v2/coord/behaviour_test.go +++ b/v2/coord/behaviour_test.go @@ -1,10 +1,7 @@ -package kademlia +package coord import ( "context" - "fmt" - "reflect" - "testing" ) type NullSM[E any, S any] struct{} @@ -29,26 +26,3 @@ func (r *RecordingSM[E, S]) Advance(ctx context.Context, e E) S { r.Received = e return r.State } - -// expectBehaviourEvent selects on a behaviour's ready channel until it becomes ready and then checks the perform -// mehtod for the expected event type. Unexpected events are ignored and selecting resumes. -// The function returns when an event matching the type of expected is received or when the context is cancelled. -func expectBehaviourEvent[I DhtEvent, O DhtEvent](t *testing.T, ctx context.Context, b Behaviour[I, O], expected O) (O, error) { - t.Helper() - for { - select { - case <-b.Ready(): - ev, ok := b.Perform(ctx) - if !ok { - continue - } - t.Logf("saw event: %T\n", ev) - if reflect.TypeOf(ev) == reflect.TypeOf(expected) { - return ev, nil - } - case <-ctx.Done(): - var v O - return v, fmt.Errorf("test deadline exceeded") - } - } -} diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go new file mode 100644 index 00000000..19fec751 --- /dev/null +++ b/v2/coord/conversion.go @@ -0,0 +1,58 @@ +package coord + +import ( + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" +) + +// NodeInfoToAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. +// This function will panic if info.ID() does not return a kadt.PeerID +func NodeInfoToAddrInfo(info kad.NodeInfo[key.Key256, ma.Multiaddr]) peer.AddrInfo { + peerID := info.ID().(kadt.PeerID) + return peer.AddrInfo{ + ID: peer.ID(peerID), + Addrs: info.Addresses(), + } +} + +// NodeIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. +// This function will panic if id's underlying type is not kadt.PeerID +func NodeIDToAddrInfo(id kad.NodeID[key.Key256]) peer.AddrInfo { + peerID := id.(kadt.PeerID) + return peer.AddrInfo{ + ID: peer.ID(peerID), + } +} + +// SliceOfNodeInfoToSliceOfAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. +// This function will panic if any info.ID() does not return a kadt.PeerID +func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[key.Key256, ma.Multiaddr]) []peer.AddrInfo { + peers := make([]peer.AddrInfo, len(infos)) + for i := range infos { + peerID := infos[i].ID().(kadt.PeerID) + peers[i] = peer.AddrInfo{ + ID: peer.ID(peerID), + Addrs: infos[i].Addresses(), + } + } + return peers +} + +// SliceOfPeerIDToSliceOfNodeID converts a slice peer.ID to a slice of kad.NodeID +func SliceOfPeerIDToSliceOfNodeID(peers []peer.ID) []kad.NodeID[key.Key256] { + nodes := make([]kad.NodeID[key.Key256], len(peers)) + for i := range peers { + nodes[i] = kadt.PeerID(peers[i]) + } + return nodes +} + +// NodeIDToPeerID converts a kad.NodeID to a peer.ID. +// This function will panic if id's underlying type is not kadt.PeerID +func NodeIDToPeerID(id kad.NodeID[key.Key256]) peer.ID { + return peer.ID(id.(kadt.PeerID)) +} diff --git a/v2/coord/dht.go b/v2/coord/coordinator.go similarity index 77% rename from v2/coord/dht.go rename to v2/coord/coordinator.go index 50f62563..4ceab9f0 100644 --- a/v2/coord/dht.go +++ b/v2/coord/coordinator.go @@ -1,4 +1,4 @@ -package kademlia +package coord import ( "context" @@ -8,6 +8,8 @@ import ( "github.com/benbjohnson/clock" logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" "github.com/plprobelab/go-kademlia/key" @@ -17,27 +19,29 @@ import ( "github.com/plprobelab/go-kademlia/util" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT // It is only one possible configuration of the DHT components, others are possible. -type Coordinator[K kad.Key[K], A kad.Address[A]] struct { +type Coordinator struct { // self is the node id of the system the dht is running on - self kad.NodeID[K] + self peer.ID // cfg is a copy of the optional configuration supplied to the dht cfg Config // rt is the routing table used to look up nodes by distance - rt kad.RoutingTable[K, kad.NodeID[K]] + rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] // rtr is the message router used to send messages - rtr Router[K, A] + rtr Router routingNotifications chan RoutingNotification // networkBehaviour is the behaviour responsible for communicating with the network - networkBehaviour *NetworkBehaviour[K, A] + networkBehaviour *NetworkBehaviour // routingBehaviour is the behaviour responsible for maintaining the routing table routingBehaviour Behaviour[DhtEvent, DhtEvent] @@ -119,7 +123,7 @@ func DefaultConfig() *Config { } } -func NewDht[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], rtr Router[K, A], rt routing.RoutingTableCpl[K, kad.NodeID[K]], cfg *Config) (*Coordinator[K, A], error) { +func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]], cfg *Config) (*Coordinator, error) { if cfg == nil { cfg = DefaultConfig() } else if err := cfg.Validate(); err != nil { @@ -133,19 +137,19 @@ func NewDht[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], rtr Router[K, A] qpCfg.QueryConcurrency = cfg.RequestConcurrency qpCfg.RequestTimeout = cfg.RequestTimeout - qp, err := query.NewPool[K, A](self, qpCfg) + qp, err := query.NewPool[key.Key256, ma.Multiaddr](kadt.PeerID(self), qpCfg) if err != nil { return nil, fmt.Errorf("query pool: %w", err) } queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger) - bootstrapCfg := routing.DefaultBootstrapConfig[K, A]() + bootstrapCfg := routing.DefaultBootstrapConfig[key.Key256, ma.Multiaddr]() bootstrapCfg.Clock = cfg.Clock bootstrapCfg.Timeout = cfg.QueryTimeout bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency bootstrapCfg.RequestTimeout = cfg.RequestTimeout - bootstrap, err := routing.NewBootstrap(self, bootstrapCfg) + bootstrap, err := routing.NewBootstrap[key.Key256, ma.Multiaddr](kadt.PeerID(self), bootstrapCfg) if err != nil { return nil, fmt.Errorf("bootstrap: %w", err) } @@ -159,7 +163,7 @@ func NewDht[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], rtr Router[K, A] // includeCfg.Concurrency = cfg.IncludeConcurrency // includeCfg.Timeout = cfg.IncludeTimeout - include, err := routing.NewInclude[K, A](rt, includeCfg) + include, err := routing.NewInclude[key.Key256, ma.Multiaddr](rt, includeCfg) if err != nil { return nil, fmt.Errorf("include: %w", err) } @@ -170,16 +174,16 @@ func NewDht[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], rtr Router[K, A] // TODO: expose config // probeCfg.Concurrency = cfg.ProbeConcurrency - probe, err := routing.NewProbe[K, A](rt, probeCfg) + probe, err := routing.NewProbe[key.Key256, ma.Multiaddr](rt, probeCfg) if err != nil { - return nil, fmt.Errorf("include: %w", err) + return nil, fmt.Errorf("probe: %w", err) } - routingBehaviour := NewRoutingBehaviour[K, A](self, bootstrap, include, probe, cfg.Logger) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger) networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger) - d := &Coordinator[K, A]{ + d := &Coordinator{ self: self, cfg: *cfg, rtr: rtr, @@ -196,25 +200,25 @@ func NewDht[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], rtr Router[K, A] return d, nil } -func (d *Coordinator[K, A]) ID() kad.NodeID[K] { +func (d *Coordinator) ID() peer.ID { return d.self } -func (d *Coordinator[K, A]) Addresses() []A { +func (d *Coordinator) Addresses() []ma.Multiaddr { // TODO: return configured listen addresses info, err := d.rtr.GetNodeInfo(context.TODO(), d.self) if err != nil { return nil } - return info.Addresses() + return info.Addrs } // RoutingNotifications returns a channel that may be read to be notified of routing updates -func (d *Coordinator[K, A]) RoutingNotifications() <-chan RoutingNotification { +func (d *Coordinator) RoutingNotifications() <-chan RoutingNotification { return d.routingNotifications } -func (d *Coordinator[K, A]) eventLoop() { +func (d *Coordinator) eventLoop() { ctx := context.Background() for { @@ -235,20 +239,20 @@ func (d *Coordinator[K, A]) eventLoop() { } } -func (c *Coordinator[K, A]) dispatchDhtNotice(ctx context.Context, ev DhtEvent) { - ctx, span := util.StartSpan(ctx, "Dht.dispatchDhtNotice") +func (c *Coordinator) dispatchDhtNotice(ctx context.Context, ev DhtEvent) { + ctx, span := util.StartSpan(ctx, "Coordinator.dispatchDhtNotice") defer span.End() switch ev := ev.(type) { - case *EventDhtStartBootstrap[K, A]: + case *EventDhtStartBootstrap: c.routingBehaviour.Notify(ctx, ev) - case *EventOutboundGetClosestNodes[K, A]: + case *EventOutboundGetClosestNodes: c.networkBehaviour.Notify(ctx, ev) - case *EventStartQuery[K, A]: + case *EventStartQuery: c.queryBehaviour.Notify(ctx, ev) case *EventStopQuery: c.queryBehaviour.Notify(ctx, ev) - case *EventDhtAddNodeInfo[K, A]: + case *EventDhtAddNodeInfo: c.routingBehaviour.Notify(ctx, ev) case RoutingNotification: select { @@ -261,8 +265,8 @@ func (c *Coordinator[K, A]) dispatchDhtNotice(ctx context.Context, ev DhtEvent) // GetNode retrieves the node associated with the given node id from the DHT's local routing table. // If the node isn't found in the table, it returns ErrNodeNotFound. -func (d *Coordinator[K, A]) GetNode(ctx context.Context, id kad.NodeID[K]) (Node[K, A], error) { - if _, exists := d.rt.GetNode(id.Key()); !exists { +func (d *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { + if _, exists := d.rt.GetNode(kadt.PeerID(id).Key()); !exists { return nil, ErrNodeNotFound } @@ -274,11 +278,11 @@ func (d *Coordinator[K, A]) GetNode(ctx context.Context, id kad.NodeID[K]) (Node } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. -func (d *Coordinator[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([]Node[K, A], error) { +func (d *Coordinator) GetClosestNodes(ctx context.Context, k key.Key256, n int) ([]Node, error) { closest := d.rt.NearestNodes(k, n) - nodes := make([]Node[K, A], 0, len(closest)) + nodes := make([]Node, 0, len(closest)) for _, id := range closest { - nh, err := d.networkBehaviour.getNodeHandler(ctx, id) + nh, err := d.networkBehaviour.getNodeHandler(ctx, NodeIDToPeerID(id)) if err != nil { return nil, err } @@ -289,19 +293,19 @@ func (d *Coordinator[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([] // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (d *Coordinator[K, A]) GetValue(ctx context.Context, key K) (Value[K], error) { +func (d *Coordinator) GetValue(ctx context.Context, k key.Key256) (Value, error) { panic("not implemented") } // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. -func (d *Coordinator[K, A]) PutValue(ctx context.Context, r Value[K], q int) error { +func (d *Coordinator) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } // Query traverses the DHT calling fn for each node visited. -func (d *Coordinator[K, A]) Query(ctx context.Context, target K, fn QueryFunc[K, A]) (QueryStats, error) { - ctx, span := util.StartSpan(ctx, "Dht.Query") +func (d *Coordinator) Query(ctx context.Context, target key.Key256, fn QueryFunc) (QueryStats, error) { + ctx, span := util.StartSpan(ctx, "Coordinator.Query") defer span.End() ctx, cancel := context.WithCancel(ctx) @@ -312,7 +316,7 @@ func (d *Coordinator[K, A]) Query(ctx context.Context, target K, fn QueryFunc[K, return QueryStats{}, err } - seedIDs := make([]kad.NodeID[K], 0, len(seeds)) + seedIDs := make([]peer.ID, 0, len(seeds)) for _, s := range seeds { seedIDs = append(seedIDs, s.ID()) } @@ -320,11 +324,11 @@ func (d *Coordinator[K, A]) Query(ctx context.Context, target K, fn QueryFunc[K, waiter := NewWaiter[DhtEvent]() queryID := query.QueryID("foo") - cmd := &EventStartQuery[K, A]{ + cmd := &EventStartQuery{ QueryID: queryID, Target: target, ProtocolID: address.ProtocolID("TODO"), - Message: &fakeMessage[K, A]{key: target}, + Message: &fakeMessage{key: target}, KnownClosestNodes: seedIDs, Notify: waiter, } @@ -340,7 +344,7 @@ func (d *Coordinator[K, A]) Query(ctx context.Context, target K, fn QueryFunc[K, case wev := <-waiter.Chan(): ctx, ev := wev.Ctx, wev.Event switch ev := ev.(type) { - case *EventQueryProgressed[K, A]: + case *EventQueryProgressed: lastStats = QueryStats{ Start: ev.Stats.Start, Requests: ev.Stats.Requests, @@ -384,16 +388,16 @@ func (d *Coordinator[K, A]) Query(ctx context.Context, target K, fn QueryFunc[K, // AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. -func (d *Coordinator[K, A]) AddNodes(ctx context.Context, infos []kad.NodeInfo[K, A]) error { - ctx, span := util.StartSpan(ctx, "Dht.AddNodes") +func (d *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error { + ctx, span := util.StartSpan(ctx, "Coordinator.AddNodes") defer span.End() for _, info := range infos { - if key.Equal(info.ID().Key(), d.self.Key()) { + if info.ID == d.self { // skip self continue } - d.routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo[K, A]{ + d.routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo{ NodeInfo: info, }) @@ -403,12 +407,12 @@ func (d *Coordinator[K, A]) AddNodes(ctx context.Context, infos []kad.NodeInfo[K } // Bootstrap instructs the dht to begin bootstrapping the routing table. -func (d *Coordinator[K, A]) Bootstrap(ctx context.Context, seeds []kad.NodeID[K]) error { - ctx, span := util.StartSpan(ctx, "Dht.Bootstrap") +func (d *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { + ctx, span := util.StartSpan(ctx, "Coordinator.Bootstrap") defer span.End() - d.routingBehaviour.Notify(ctx, &EventDhtStartBootstrap[K, A]{ + d.routingBehaviour.Notify(ctx, &EventDhtStartBootstrap{ // Bootstrap state machine uses the message - Message: &fakeMessage[K, A]{key: d.self.Key()}, + Message: &fakeMessage{key: kadt.PeerID(d.self).Key()}, SeedNodes: seeds, }) diff --git a/v2/coord/dht_test.go b/v2/coord/coordinator_test.go similarity index 74% rename from v2/coord/dht_test.go rename to v2/coord/coordinator_test.go index f9dba8c1..4fb221da 100644 --- a/v2/coord/dht_test.go +++ b/v2/coord/coordinator_test.go @@ -1,4 +1,4 @@ -package kademlia +package coord import ( "context" @@ -9,13 +9,12 @@ import ( "time" "github.com/benbjohnson/clock" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) const peerstoreTTL = 10 * time.Minute @@ -92,7 +91,8 @@ func TestExhaustiveQuery(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) ccfg := DefaultConfig() ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -100,16 +100,16 @@ func TestExhaustiveQuery(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := nodes[0].NodeInfo.ID() - c, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + self := nodes[0].NodeInfo.ID + c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) - target := nodes[3].NodeInfo.ID().Key() + target := kadt.PeerID(nodes[3].NodeInfo.ID).Key() visited := make(map[string]int) // Record the nodes as they are visited - qfn := func(ctx context.Context, node Node[key.Key8, kadtest.StrAddr], stats QueryStats) error { + qfn := func(ctx context.Context, node Node, stats QueryStats) error { visited[node.ID().String()]++ return nil } @@ -119,9 +119,9 @@ func TestExhaustiveQuery(t *testing.T) { require.NoError(t, err) require.Equal(t, 3, len(visited)) - require.Contains(t, visited, nodes[1].NodeInfo.ID().String()) - require.Contains(t, visited, nodes[2].NodeInfo.ID().String()) - require.Contains(t, visited, nodes[3].NodeInfo.ID().String()) + require.Contains(t, visited, nodes[1].NodeInfo.ID.String()) + require.Contains(t, visited, nodes[2].NodeInfo.ID.String()) + require.Contains(t, visited, nodes[3].NodeInfo.ID.String()) } func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { @@ -129,7 +129,8 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) ccfg := DefaultConfig() ccfg.Clock = clk @@ -138,8 +139,8 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { // A (ids[0]) is looking for D (ids[3]) // A will first ask B, B will reply with C's address (and A's address) // A will then ask C, C will reply with D's address (and B's address) - self := nodes[0].NodeInfo.ID() - c, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + self := nodes[0].NodeInfo.ID + c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating dht: %v", err) } @@ -156,34 +157,34 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { } }() - qfn := func(ctx context.Context, node Node[key.Key8, kadtest.StrAddr], stats QueryStats) error { + qfn := func(ctx context.Context, node Node, stats QueryStats) error { return nil } // Run a query to find the value - target := nodes[3].NodeInfo.ID().Key() + target := kadt.PeerID(nodes[3].NodeInfo.ID).Key() _, err = c.Query(ctx, target, qfn) require.NoError(t, err) // the query run by the dht should have received a response from nodes[1] with closer nodes // nodes[0] and nodes[2] which should trigger a routing table update since nodes[2] was // not in the dht's routing table. - ev, err := expectEventType(t, ctx, buffer, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}) + ev, err := expectEventType(t, ctx, buffer, &EventRoutingUpdated{}) require.NoError(t, err) - tev := ev.(*EventRoutingUpdated[key.Key8, kadtest.StrAddr]) - require.Equal(t, nodes[2].NodeInfo.ID(), tev.NodeInfo.ID()) + tev := ev.(*EventRoutingUpdated) + require.Equal(t, nodes[2].NodeInfo.ID, NodeIDToPeerID(tev.NodeInfo.ID())) // no EventRoutingUpdated is sent for the self node // the query continues and should have received a response from nodes[2] with closer nodes // nodes[1] and nodes[3] which should trigger a routing table update since nodes[3] was // not in the dht's routing table. - ev, err = expectEventType(t, ctx, buffer, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}) + ev, err = expectEventType(t, ctx, buffer, &EventRoutingUpdated{}) require.NoError(t, err) - tev = ev.(*EventRoutingUpdated[key.Key8, kadtest.StrAddr]) - require.Equal(t, nodes[3].NodeInfo.ID(), tev.NodeInfo.ID()) + tev = ev.(*EventRoutingUpdated) + require.Equal(t, nodes[3].NodeInfo.ID, NodeIDToPeerID(tev.NodeInfo.ID())) } func TestBootstrap(t *testing.T) { @@ -191,14 +192,15 @@ func TestBootstrap(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) ccfg := DefaultConfig() ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL - self := nodes[0].NodeInfo.ID() - d, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + self := nodes[0].NodeInfo.ID + d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating dht: %v", err) } @@ -215,8 +217,8 @@ func TestBootstrap(t *testing.T) { } }() - seeds := []kad.NodeID[key.Key8]{ - nodes[1].NodeInfo.ID(), + seeds := []peer.ID{ + nodes[1].NodeInfo.ID, } err = d.Bootstrap(ctx, seeds) require.NoError(t, err) @@ -232,15 +234,15 @@ func TestBootstrap(t *testing.T) { require.Equal(t, 0, tevf.Stats.Failure) // DHT should now have node1 in its routing table - _, err = d.GetNode(ctx, nodes[1].NodeInfo.ID()) + _, err = d.GetNode(ctx, nodes[1].NodeInfo.ID) require.NoError(t, err) // DHT should now have node2 in its routing table - _, err = d.GetNode(ctx, nodes[2].NodeInfo.ID()) + _, err = d.GetNode(ctx, nodes[2].NodeInfo.ID) require.NoError(t, err) // DHT should now have node3 in its routing table - _, err = d.GetNode(ctx, nodes[3].NodeInfo.ID()) + _, err = d.GetNode(ctx, nodes[3].NodeInfo.ID) require.NoError(t, err) } @@ -249,7 +251,8 @@ func TestIncludeNode(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) ccfg := DefaultConfig() ccfg.Clock = clk @@ -257,30 +260,30 @@ func TestIncludeNode(t *testing.T) { candidate := nodes[len(nodes)-1].NodeInfo // not in nodes[0] routing table - self := nodes[0].NodeInfo.ID() - d, err := NewDht[key.Key8, kadtest.StrAddr](self, nodes[0].Router, nodes[0].RoutingTable, ccfg) + self := nodes[0].NodeInfo.ID + d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { log.Fatalf("unexpected error creating dht: %v", err) } // the routing table should not contain the node yet - _, err = d.GetNode(ctx, candidate.ID()) + _, err = d.GetNode(ctx, candidate.ID) require.ErrorIs(t, err, ErrNodeNotFound) events := d.RoutingNotifications() // inject a new node into the dht's includeEvents queue - err = d.AddNodes(ctx, []kad.NodeInfo[key.Key8, kadtest.StrAddr]{candidate}) + err = d.AddNodes(ctx, []peer.AddrInfo{candidate}) require.NoError(t, err) // the include state machine runs in the background and eventually should add the node to routing table - ev, err := expectEventType(t, ctx, events, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}) + ev, err := expectEventType(t, ctx, events, &EventRoutingUpdated{}) require.NoError(t, err) - tev := ev.(*EventRoutingUpdated[key.Key8, kadtest.StrAddr]) - require.Equal(t, candidate.ID(), tev.NodeInfo.ID()) + tev := ev.(*EventRoutingUpdated) + require.Equal(t, candidate.ID, NodeIDToPeerID(tev.NodeInfo.ID())) // the routing table should not contain the node yet - _, err = d.GetNode(ctx, candidate.ID()) + _, err = d.GetNode(ctx, candidate.ID) require.NoError(t, err) } diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index 6e18f0f4..55476efe 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -1,35 +1,41 @@ -package kademlia +package coord import ( "context" "errors" "time" - "github.com/plprobelab/go-kademlia/kad" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" ) // Value is a value that may be stored in the DHT. -type Value[K kad.Key[K]] interface { - Key() K +type Value interface { + Key() key.Key256 MarshalBinary() ([]byte, error) } // Node represent a remote node, a participant in the DHT. -type Node[K kad.Key[K], A kad.Address[A]] interface { - kad.NodeInfo[K, A] +type Node interface { + ID() peer.ID + + // Addresses returns the network addresses associated with the given node. + Addresses() []ma.Multiaddr // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. - GetClosestNodes(ctx context.Context, key K, n int) ([]Node[K, A], error) + GetClosestNodes(ctx context.Context, key key.Key256, n int) ([]Node, error) // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. - GetValue(ctx context.Context, key K) (Value[K], error) + GetValue(ctx context.Context, key key.Key256) (Value, error) // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. - PutValue(ctx context.Context, r Value[K], q int) error + PutValue(ctx context.Context, r Value, q int) error } var ( @@ -46,7 +52,7 @@ var ( // Query stops entirely and returns that error. // // The stats argument contains statistics on the progress of the query so far. -type QueryFunc[K kad.Key[K], A kad.Address[A]] func(ctx context.Context, node Node[K, A], stats QueryStats) error +type QueryFunc func(ctx context.Context, node Node, stats QueryStats) error type QueryStats struct { Start time.Time // Start is the time the query began executing. @@ -67,15 +73,15 @@ var ( // Router its a work in progress // TODO figure out the role of protocol identifiers -type Router[K kad.Key[K], A kad.Address[A]] interface { +type Router interface { // SendMessage attempts to send a request to another node. The Router will absorb the addresses in to into its // internal nodestore. This method blocks until a response is received or an error is encountered. - SendMessage(ctx context.Context, to kad.NodeInfo[K, A], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) + SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) - AddNodeInfo(ctx context.Context, info kad.NodeInfo[K, A], ttl time.Duration) error - GetNodeInfo(ctx context.Context, id kad.NodeID[K]) (kad.NodeInfo[K, A], error) + AddNodeInfo(ctx context.Context, info peer.AddrInfo, ttl time.Duration) error + GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) // GetClosestNodes attempts to send a request to another node asking it for nodes that it considers to be // closest to the target key. - GetClosestNodes(ctx context.Context, to kad.NodeInfo[K, A], target K) ([]kad.NodeInfo[K, A], error) + GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) } diff --git a/v2/coord/event.go b/v2/coord/event.go index 697289b2..9f65638e 100644 --- a/v2/coord/event.go +++ b/v2/coord/event.go @@ -1,7 +1,10 @@ -package kademlia +package coord import ( + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" + "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" ) @@ -30,36 +33,36 @@ type RoutingNotification interface { routingNotificationEvent() } -type EventDhtStartBootstrap[K kad.Key[K], A kad.Address[A]] struct { +type EventDhtStartBootstrap struct { ProtocolID address.ProtocolID - Message kad.Request[K, A] - SeedNodes []kad.NodeID[K] + Message kad.Request[key.Key256, ma.Multiaddr] + SeedNodes []peer.ID // TODO: peer.AddrInfo } -func (EventDhtStartBootstrap[K, A]) dhtEvent() {} -func (EventDhtStartBootstrap[K, A]) dhtCommand() {} +func (EventDhtStartBootstrap) dhtEvent() {} +func (EventDhtStartBootstrap) dhtCommand() {} -type EventOutboundGetClosestNodes[K kad.Key[K], A kad.Address[A]] struct { +type EventOutboundGetClosestNodes struct { QueryID query.QueryID - To kad.NodeInfo[K, A] - Target K + To peer.AddrInfo + Target key.Key256 Notify Notify[DhtEvent] } -func (EventOutboundGetClosestNodes[K, A]) dhtEvent() {} -func (EventOutboundGetClosestNodes[K, A]) nodeHandlerRequest() {} +func (EventOutboundGetClosestNodes) dhtEvent() {} +func (EventOutboundGetClosestNodes) nodeHandlerRequest() {} -type EventStartQuery[K kad.Key[K], A kad.Address[A]] struct { +type EventStartQuery struct { QueryID query.QueryID - Target K + Target key.Key256 ProtocolID address.ProtocolID - Message kad.Request[K, A] - KnownClosestNodes []kad.NodeID[K] + Message kad.Request[key.Key256, ma.Multiaddr] + KnownClosestNodes []peer.ID Notify NotifyCloser[DhtEvent] } -func (EventStartQuery[K, A]) dhtEvent() {} -func (EventStartQuery[K, A]) dhtCommand() {} +func (EventStartQuery) dhtEvent() {} +func (EventStartQuery) dhtCommand() {} type EventStopQuery struct { QueryID query.QueryID @@ -68,43 +71,43 @@ type EventStopQuery struct { func (EventStopQuery) dhtEvent() {} func (EventStopQuery) dhtCommand() {} -type EventDhtAddNodeInfo[K kad.Key[K], A kad.Address[A]] struct { - NodeInfo kad.NodeInfo[K, A] +type EventDhtAddNodeInfo struct { + NodeInfo peer.AddrInfo } -func (EventDhtAddNodeInfo[K, A]) dhtEvent() {} -func (EventDhtAddNodeInfo[K, A]) dhtCommand() {} +func (EventDhtAddNodeInfo) dhtEvent() {} +func (EventDhtAddNodeInfo) dhtCommand() {} -type EventGetClosestNodesSuccess[K kad.Key[K], A kad.Address[A]] struct { - QueryID query.QueryID - To kad.NodeInfo[K, A] - Target K - ClosestNodes []kad.NodeInfo[K, A] +type EventGetCloserNodesSuccess struct { + QueryID query.QueryID + To peer.AddrInfo + Target key.Key256 + CloserNodes []peer.AddrInfo } -func (EventGetClosestNodesSuccess[K, A]) dhtEvent() {} -func (EventGetClosestNodesSuccess[K, A]) nodeHandlerResponse() {} +func (EventGetCloserNodesSuccess) dhtEvent() {} +func (EventGetCloserNodesSuccess) nodeHandlerResponse() {} -type EventGetClosestNodesFailure[K kad.Key[K], A kad.Address[A]] struct { +type EventGetCloserNodesFailure struct { QueryID query.QueryID - To kad.NodeInfo[K, A] - Target K + To peer.AddrInfo + Target key.Key256 Err error } -func (EventGetClosestNodesFailure[K, A]) dhtEvent() {} -func (EventGetClosestNodesFailure[K, A]) nodeHandlerResponse() {} +func (EventGetCloserNodesFailure) dhtEvent() {} +func (EventGetCloserNodesFailure) nodeHandlerResponse() {} // EventQueryProgressed is emitted by the dht when a query has received a // response from a node. -type EventQueryProgressed[K kad.Key[K], A kad.Address[A]] struct { +type EventQueryProgressed struct { QueryID query.QueryID - NodeID kad.NodeID[K] - Response kad.Response[K, A] + NodeID peer.ID + Response kad.Response[key.Key256, ma.Multiaddr] Stats query.QueryStats } -func (*EventQueryProgressed[K, A]) dhtEvent() {} +func (*EventQueryProgressed) dhtEvent() {} // EventQueryFinished is emitted by the dht when a query has finished, either through // running to completion or by being canceled. @@ -116,12 +119,12 @@ type EventQueryFinished struct { func (*EventQueryFinished) dhtEvent() {} // EventRoutingUpdated is emitted by the dht when a new node has been verified and added to the routing table. -type EventRoutingUpdated[K kad.Key[K], A kad.Address[A]] struct { - NodeInfo kad.NodeInfo[K, A] +type EventRoutingUpdated struct { + NodeInfo kad.NodeInfo[key.Key256, ma.Multiaddr] } -func (*EventRoutingUpdated[K, A]) dhtEvent() {} -func (*EventRoutingUpdated[K, A]) routingNotificationEvent() {} +func (*EventRoutingUpdated) dhtEvent() {} +func (*EventRoutingUpdated) routingNotificationEvent() {} // EventBootstrapFinished is emitted by the dht when a bootstrap has finished, either through // running to completion or by being canceled. diff --git a/v2/coord/internal/kadtest/bench.go b/v2/coord/internal/kadtest/bench.go deleted file mode 100644 index 7bdc9035..00000000 --- a/v2/coord/internal/kadtest/bench.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build go1.20 - -package kadtest - -import "testing" - -// ReportTimePerItemMetric adds a custom metric to a benchmark that reports the number of nanoseconds taken per item. -func ReportTimePerItemMetric(b *testing.B, n int, name string) { - // b.Elapsed was added in Go 1.20 - b.ReportMetric(float64(b.Elapsed().Nanoseconds())/float64(n), "ns/"+name) -} diff --git a/v2/coord/internal/kadtest/bench_pre120.go b/v2/coord/internal/kadtest/bench_pre120.go deleted file mode 100644 index 643dff49..00000000 --- a/v2/coord/internal/kadtest/bench_pre120.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.20 - -package kadtest - -import "testing" - -// ReportTimePerItemMetric is a no-op on versions of Go before 1.20 -func ReportTimePerItemMetric(b *testing.B, n int, name string) { - // no-op -} diff --git a/v2/coord/internal/kadtest/kadtypes.go b/v2/coord/internal/kadtest/kadtypes.go deleted file mode 100644 index 6ce09dd1..00000000 --- a/v2/coord/internal/kadtest/kadtypes.go +++ /dev/null @@ -1,107 +0,0 @@ -package kadtest - -import ( - "crypto/sha256" - "net" - - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" -) - -// ID is a concrete implementation of the NodeID interface. -type ID[K kad.Key[K]] struct { - key K -} - -// interface assertion. Using the concrete key type of key.Key8 does not -// limit the validity of the assertion for other key types. -var _ kad.NodeID[key.Key8] = (*ID[key.Key8])(nil) - -// NewID returns a new Kademlia identifier that implements the NodeID interface. -// Instead of deriving the Kademlia key from a NodeID, this method directly takes -// the Kademlia key. -func NewID[K kad.Key[K]](k K) *ID[K] { - return &ID[K]{key: k} -} - -// Key returns the Kademlia key that is used by, e.g., the routing table -// implementation to group nodes into buckets. The returned key was manually -// defined in the ID constructor NewID and not derived via, e.g., hashing -// a preimage. -func (i ID[K]) Key() K { - return i.key -} - -func (i ID[K]) Equal(other K) bool { - return i.key.Compare(other) == 0 -} - -func (i ID[K]) String() string { - return key.HexString(i.key) -} - -type StringID string - -var _ kad.NodeID[key.Key256] = (*StringID)(nil) - -func NewStringID(s string) *StringID { - return (*StringID)(&s) -} - -func (s StringID) Key() key.Key256 { - h := sha256.New() - h.Write([]byte(s)) - return key.NewKey256(h.Sum(nil)) -} - -func (s StringID) NodeID() kad.NodeID[key.Key256] { - return &s -} - -func (s StringID) Equal(other string) bool { - return string(s) == other -} - -func (s StringID) String() string { - return string(s) -} - -type Info[K kad.Key[K], A kad.Address[A]] struct { - id *ID[K] - addrs []A -} - -var _ kad.NodeInfo[key.Key8, net.IP] = (*Info[key.Key8, net.IP])(nil) - -func NewInfo[K kad.Key[K], A kad.Address[A]](id *ID[K], addrs []A) *Info[K, A] { - return &Info[K, A]{ - id: id, - addrs: addrs, - } -} - -func (a *Info[K, A]) AddAddr(addr A) { - a.addrs = append(a.addrs, addr) -} - -func (a *Info[K, A]) RemoveAddr(addr A) { - writeIndex := 0 - // remove all occurrences of addr - for _, ad := range a.addrs { - if !ad.Equal(addr) { - a.addrs[writeIndex] = ad - writeIndex++ - } - } - a.addrs = a.addrs[:writeIndex] -} - -func (a *Info[K, A]) ID() kad.NodeID[K] { - return a.id -} - -func (a *Info[K, A]) Addresses() []A { - addresses := make([]A, len(a.addrs)) - copy(addresses, a.addrs) - return addresses -} diff --git a/v2/coord/internal/kadtest/message.go b/v2/coord/internal/kadtest/message.go deleted file mode 100644 index 05071cee..00000000 --- a/v2/coord/internal/kadtest/message.go +++ /dev/null @@ -1,81 +0,0 @@ -package kadtest - -import ( - "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" -) - -// StrAddr is a simple implementation of kad.Address that uses a string to represent the address. -type StrAddr string - -var _ kad.Address[StrAddr] = StrAddr("") - -func (a StrAddr) Equal(b StrAddr) bool { return a == b } - -type Request[K kad.Key[K]] struct { - target K - id string -} - -func NewRequest[K kad.Key[K]](id string, target K) *Request[K] { - return &Request[K]{ - target: target, - id: id, - } -} - -func (r *Request[K]) Target() K { - return r.target -} - -func (r *Request[K]) ID() string { - return r.id -} - -func (r *Request[K]) EmptyResponse() kad.Response[K, StrAddr] { - return &Response[K]{} -} - -type Response[K kad.Key[K]] struct { - id string - closer []kad.NodeInfo[K, StrAddr] -} - -func NewResponse[K kad.Key[K]](id string, closer []kad.NodeInfo[K, StrAddr]) *Response[K] { - return &Response[K]{ - id: id, - closer: closer, - } -} - -func (r *Response[K]) ID() string { - return r.id -} - -func (r *Response[K]) CloserNodes() []kad.NodeInfo[K, StrAddr] { - return r.closer -} - -type ( - // Request8 is a Request message that uses key.Key8 - Request8 = Request[key.Key8] - - // Response8 is a Response message that uses key.Key8 - Response8 = Response[key.Key8] - - // Request8 is a Request message that uses key.Key256 - Request256 = Request[key.Key256] - - // Response256 is a Response message that uses key.Key256 - Response256 = Response[key.Key256] -) - -var ( - _ kad.Request[key.Key8, StrAddr] = (*Request8)(nil) - _ kad.Response[key.Key8, StrAddr] = (*Response8)(nil) -) - -var ( - _ kad.Request[key.Key256, StrAddr] = (*Request256)(nil) - _ kad.Response[key.Key256, StrAddr] = (*Response256)(nil) -) diff --git a/v2/coord/internal/kadtest/rand.go b/v2/coord/internal/kadtest/rand.go deleted file mode 100644 index 7c4be431..00000000 --- a/v2/coord/internal/kadtest/rand.go +++ /dev/null @@ -1,45 +0,0 @@ -package kadtest - -import ( - "math/rand" - "strconv" - - "github.com/plprobelab/go-kademlia/key" -) - -var rng = rand.New(rand.NewSource(299792458)) - -// RandomKey returns a random 32-bit Kademlia key. -func RandomKey() key.Key32 { - return key.Key32(rng.Uint32()) -} - -// RandomKeyWithPrefix returns a 32-bit Kademlia key having a prefix equal to the bit pattern held in s and -// random following bits. A prefix of up to 32 bits is supported. -func RandomKeyWithPrefix(s string) key.Key32 { - kk := RandomKey() - if s == "" { - return kk - } - - prefixbits := len(s) - if prefixbits > 32 { - panic("RandomKeyWithPrefix: prefix too long") - } - n, err := strconv.ParseInt(s, 2, 32) - if err != nil { - panic("RandomKeyWithPrefix: " + err.Error()) - } - prefix := uint32(n) << (32 - prefixbits) - - v := uint32(kk) << prefixbits - v >>= prefixbits - - return key.Key32(v | prefix) -} - -// Key256WithLeadingBytes returns a 256-bit Kademlia key consisting of the given leading bytes padded by -// zero bytes to the end of the key. -func Key256WithLeadingBytes(in []byte) key.Key256 { - return key.NewKey256(append(in, make([]byte, 32-len(in))...)) -} diff --git a/v2/coord/internal/nettest/layouts.go b/v2/coord/internal/nettest/layouts.go index 66e53a27..7aa548bf 100644 --- a/v2/coord/internal/nettest/layouts.go +++ b/v2/coord/internal/nettest/layouts.go @@ -2,14 +2,15 @@ package nettest import ( "context" + "fmt" "github.com/benbjohnson/clock" - + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing/simplert" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // LinearTopology creates a network topology consisting of n nodes peered in a linear chain. @@ -18,17 +19,26 @@ import ( // The topology is not a ring: nodes[0] only has nodes[1] in its table and nodes[n-1] only has nodes[n-2] in its table. // nodes[1] has nodes[0] and nodes[2] in its routing table. // If n > 2 then the first and last nodes will not have one another in their routing tables. -func LinearTopology(n int, clk *clock.Mock) (*Topology[key.Key8, kadtest.StrAddr], []*Node[key.Key8, kadtest.StrAddr]) { - nodes := make([]*Node[key.Key8, kadtest.StrAddr], n) - - top := NewTopology[key.Key8, kadtest.StrAddr](clk) +func LinearTopology(n int, clk *clock.Mock) (*Topology, []*Node, error) { + nodes := make([]*Node, n) + top := INewTopology(clk) for i := range nodes { - id := kadtest.NewID(key.Key8(byte(i))) - nodes[i] = &Node[key.Key8, kadtest.StrAddr]{ - NodeInfo: kadtest.NewInfo(id, []kadtest.StrAddr{}), - Router: NewRouter[key.Key8](id, top), - RoutingTable: simplert.New[key.Key8, kad.NodeID[key.Key8]](id, 2), + + a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) + if err != nil { + return nil, nil, err + } + + ai, err := NewAddrInfo([]ma.Multiaddr{a}) + if err != nil { + return nil, nil, err + } + + nodes[i] = &Node{ + NodeInfo: ai, + Router: NewRouter(ai.ID, top), + RoutingTable: simplert.New[key.Key256, kad.NodeID[key.Key256]](kadt.PeerID(ai.ID), 2), } } @@ -43,13 +53,13 @@ func LinearTopology(n int, clk *clock.Mock) (*Topology[key.Key8, kadtest.StrAddr for i := 0; i < len(nodes); i++ { if i > 0 { nodes[i].Router.AddNodeInfo(context.Background(), nodes[i-1].NodeInfo, 0) - nodes[i].RoutingTable.AddNode(nodes[i-1].NodeInfo.ID()) + nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i-1].NodeInfo.ID)) } if i < len(nodes)-1 { nodes[i].Router.AddNodeInfo(context.Background(), nodes[i+1].NodeInfo, 0) - nodes[i].RoutingTable.AddNode(nodes[i+1].NodeInfo.ID()) + nodes[i].RoutingTable.AddNode(kadt.PeerID(nodes[i+1].NodeInfo.ID)) } } - return top, nodes + return top, nodes, nil } diff --git a/v2/coord/internal/nettest/routing.go b/v2/coord/internal/nettest/routing.go index 87492210..e0217052 100644 --- a/v2/coord/internal/nettest/routing.go +++ b/v2/coord/internal/nettest/routing.go @@ -3,16 +3,40 @@ package nettest import ( "context" "fmt" + "math/rand" "sync" "time" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/network/endpoint" - "github.com/plprobelab/go-kademlia/sim" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) +var rng = rand.New(rand.NewSource(6283185)) + +func NewAddrInfo(addrs []ma.Multiaddr) (peer.AddrInfo, error) { + _, pub, err := crypto.GenerateEd25519Key(rng) + if err != nil { + return peer.AddrInfo{}, err + } + pid, err := peer.IDFromPublicKey(pub) + if err != nil { + return peer.AddrInfo{}, err + } + + return peer.AddrInfo{ + ID: pid, + Addrs: addrs, + }, nil +} + // Link represents the route between two nodes. It allows latency and transport failures to be simulated. type Link interface { ConnLatency() time.Duration // the simulated time taken to return an error or successful outcome @@ -28,31 +52,31 @@ func (l *DefaultLink) DialErr() error { return nil } func (l *DefaultLink) ConnLatency() time.Duration { return 0 } func (l *DefaultLink) DialLatency() time.Duration { return 0 } -type Router[K kad.Key[K], A kad.Address[A]] struct { - self kad.NodeID[K] - top *Topology[K, A] +type Router struct { + self peer.ID + top *Topology mu sync.Mutex // guards nodes - nodes map[string]*nodeStatus[K, A] + nodes map[peer.ID]*nodeStatus } -type nodeStatus[K kad.Key[K], A kad.Address[A]] struct { - NodeInfo kad.NodeInfo[K, A] +type nodeStatus struct { + NodeInfo peer.AddrInfo Connectedness endpoint.Connectedness } -func NewRouter[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], top *Topology[K, A]) *Router[K, A] { - return &Router[K, A]{ +func NewRouter(self peer.ID, top *Topology) *Router { + return &Router{ self: self, top: top, - nodes: make(map[string]*nodeStatus[K, A]), + nodes: make(map[peer.ID]*nodeStatus), } } -func (r *Router[K, A]) NodeID() kad.NodeID[K] { - return r.self +func (r *Router) NodeID() kad.NodeID[key.Key256] { + return kadt.PeerID(r.self) } -func (r *Router[K, A]) SendMessage(ctx context.Context, to kad.NodeInfo[K, A], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) { +func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { if err := r.AddNodeInfo(ctx, to, 0); err != nil { return nil, fmt.Errorf("add node info: %w", err) } @@ -61,31 +85,34 @@ func (r *Router[K, A]) SendMessage(ctx context.Context, to kad.NodeInfo[K, A], p return nil, fmt.Errorf("dial: %w", err) } - return r.top.RouteMessage(ctx, r.self, to.ID(), protoID, req) + return r.top.RouteMessage(ctx, r.self, to.ID, protoID, req) } -func (r *Router[K, A]) HandleMessage(ctx context.Context, n kad.NodeID[K], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) { - closer := make([]kad.NodeInfo[K, A], 0) +func (r *Router) HandleMessage(ctx context.Context, n peer.ID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { + closer := make([]*pb.Message_Peer, 0) r.mu.Lock() for _, n := range r.nodes { // only include self if it was the target of the request - if key.Equal(n.NodeInfo.ID().Key(), r.self.Key()) && !key.Equal(n.NodeInfo.ID().Key(), req.Target()) { + if n.NodeInfo.ID == r.self && !key.Equal(kadt.PeerID(n.NodeInfo.ID).Key(), req.Target()) { continue } - closer = append(closer, n.NodeInfo) + closer = append(closer, pb.FromAddrInfo(n.NodeInfo)) } r.mu.Unlock() - resp := sim.NewResponse(closer) + // initialize the response message + resp := &pb.Message{ + Type: req.GetType(), + Key: req.GetKey(), + } + resp.CloserPeers = closer return resp, nil } -func (r *Router[K, A]) Dial(ctx context.Context, to kad.NodeInfo[K, A]) error { - tkey := key.HexString(to.ID().Key()) - +func (r *Router) Dial(ctx context.Context, to peer.AddrInfo) error { r.mu.Lock() - status, ok := r.nodes[tkey] + status, ok := r.nodes[to.ID] r.mu.Unlock() if ok { @@ -93,13 +120,13 @@ func (r *Router[K, A]) Dial(ctx context.Context, to kad.NodeInfo[K, A]) error { case endpoint.Connected: return nil case endpoint.CanConnect: - if _, err := r.top.Dial(ctx, r.self, to.ID()); err != nil { + if _, err := r.top.Dial(ctx, r.self, to.ID); err != nil { return err } status.Connectedness = endpoint.Connected r.mu.Lock() - r.nodes[tkey] = status + r.nodes[to.ID] = status r.mu.Unlock() return nil } @@ -107,13 +134,12 @@ func (r *Router[K, A]) Dial(ctx context.Context, to kad.NodeInfo[K, A]) error { return endpoint.ErrUnknownPeer } -func (r *Router[K, A]) AddNodeInfo(ctx context.Context, info kad.NodeInfo[K, A], ttl time.Duration) error { - key := key.HexString(info.ID().Key()) +func (r *Router) AddNodeInfo(ctx context.Context, info peer.AddrInfo, ttl time.Duration) error { r.mu.Lock() defer r.mu.Unlock() - if _, ok := r.nodes[key]; !ok { - r.nodes[key] = &nodeStatus[K, A]{ + if _, ok := r.nodes[info.ID]; !ok { + r.nodes[info.ID] = &nodeStatus{ NodeInfo: info, Connectedness: endpoint.CanConnect, } @@ -121,24 +147,28 @@ func (r *Router[K, A]) AddNodeInfo(ctx context.Context, info kad.NodeInfo[K, A], return nil } -func (r *Router[K, A]) GetNodeInfo(ctx context.Context, id kad.NodeID[K]) (kad.NodeInfo[K, A], error) { - key := key.HexString(id.Key()) +func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { r.mu.Lock() defer r.mu.Unlock() - status, ok := r.nodes[key] + status, ok := r.nodes[id] if !ok { - return nil, fmt.Errorf("unknown node") + return peer.AddrInfo{}, fmt.Errorf("unknown node") } return status.NodeInfo, nil } -func (r *Router[K, A]) GetClosestNodes(ctx context.Context, to kad.NodeInfo[K, A], target K) ([]kad.NodeInfo[K, A], error) { +func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) { protoID := address.ProtocolID("/test/1.0.0") - resp, err := r.SendMessage(ctx, to, protoID, sim.NewRequest[K, A](target)) + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte("random-key"), + } + + resp, err := r.SendMessage(ctx, to, protoID, req) if err != nil { return nil, err } - return resp.CloserNodes(), nil + return resp.CloserPeersAddrInfos(), nil } diff --git a/v2/coord/internal/nettest/topology.go b/v2/coord/internal/nettest/topology.go index eb8710f2..c275e1ad 100644 --- a/v2/coord/internal/nettest/topology.go +++ b/v2/coord/internal/nettest/topology.go @@ -5,53 +5,55 @@ import ( "fmt" "github.com/benbjohnson/clock" - + "github.com/libp2p/go-libp2p/core/peer" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/routing" + + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) -type Node[K kad.Key[K], A kad.Address[A]] struct { - NodeInfo kad.NodeInfo[K, A] - Router *Router[K, A] - RoutingTable routing.RoutingTableCpl[K, kad.NodeID[K]] +type Node struct { + NodeInfo peer.AddrInfo + Router *Router + RoutingTable routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] } -type Topology[K kad.Key[K], A kad.Address[A]] struct { +type Topology struct { clk *clock.Mock links map[string]Link - nodes []*Node[K, A] - nodeIndex map[string]*Node[K, A] - routers map[string]*Router[K, A] + nodes []*Node + nodeIndex map[peer.ID]*Node + routers map[peer.ID]*Router } -func NewTopology[K kad.Key[K], A kad.Address[A]](clk *clock.Mock) *Topology[K, A] { - return &Topology[K, A]{ +func INewTopology(clk *clock.Mock) *Topology { + return &Topology{ clk: clk, links: make(map[string]Link), - nodeIndex: make(map[string]*Node[K, A]), - routers: make(map[string]*Router[K, A]), + nodeIndex: make(map[peer.ID]*Node), + routers: make(map[peer.ID]*Router), } } -func (t *Topology[K, A]) Nodes() []*Node[K, A] { +func (t *Topology) Nodes() []*Node { return t.nodes } -func (t *Topology[K, A]) ConnectNodes(a *Node[K, A], b *Node[K, A]) { +func (t *Topology) ConnectNodes(a *Node, b *Node) { t.ConnectNodesWithRoute(a, b, &DefaultLink{}) } -func (t *Topology[K, A]) ConnectNodesWithRoute(a *Node[K, A], b *Node[K, A], l Link) { - akey := key.HexString(a.NodeInfo.ID().Key()) +func (t *Topology) ConnectNodesWithRoute(a *Node, b *Node, l Link) { + akey := a.NodeInfo.ID if _, exists := t.nodeIndex[akey]; !exists { t.nodeIndex[akey] = a t.nodes = append(t.nodes, a) t.routers[akey] = a.Router } - bkey := key.HexString(b.NodeInfo.ID().Key()) + bkey := b.NodeInfo.ID if _, exists := t.nodeIndex[bkey]; !exists { t.nodeIndex[bkey] = b t.nodes = append(t.nodes, b) @@ -66,11 +68,8 @@ func (t *Topology[K, A]) ConnectNodesWithRoute(a *Node[K, A], b *Node[K, A], l L t.links[btoa] = l } -func (t *Topology[K, A]) findRoute(ctx context.Context, from kad.NodeID[K], to kad.NodeID[K]) (Link, error) { - fkey := key.HexString(from.Key()) - tkey := key.HexString(to.Key()) - - key := fmt.Sprintf("%s->%s", fkey, tkey) +func (t *Topology) findRoute(ctx context.Context, from peer.ID, to peer.ID) (Link, error) { + key := fmt.Sprintf("%s->%s", from, to) route, ok := t.links[key] if !ok { @@ -80,12 +79,11 @@ func (t *Topology[K, A]) findRoute(ctx context.Context, from kad.NodeID[K], to k return route, nil } -func (t *Topology[K, A]) Dial(ctx context.Context, from kad.NodeID[K], to kad.NodeID[K]) (kad.NodeInfo[K, A], error) { - if key.Equal(from.Key(), to.Key()) { - tkey := key.HexString(to.Key()) - node, ok := t.nodeIndex[tkey] +func (t *Topology) Dial(ctx context.Context, from peer.ID, to peer.ID) (peer.AddrInfo, error) { + if from == to { + node, ok := t.nodeIndex[to] if !ok { - return nil, fmt.Errorf("unknown node") + return peer.AddrInfo{}, fmt.Errorf("unknown node") } return node.NodeInfo, nil @@ -93,7 +91,7 @@ func (t *Topology[K, A]) Dial(ctx context.Context, from kad.NodeID[K], to kad.No route, err := t.findRoute(ctx, from, to) if err != nil { - return nil, fmt.Errorf("find route: %w", err) + return peer.AddrInfo{}, fmt.Errorf("find route: %w", err) } latency := route.DialLatency() @@ -102,22 +100,20 @@ func (t *Topology[K, A]) Dial(ctx context.Context, from kad.NodeID[K], to kad.No } if err := route.DialErr(); err != nil { - return nil, err + return peer.AddrInfo{}, err } - tkey := key.HexString(to.Key()) - node, ok := t.nodeIndex[tkey] + node, ok := t.nodeIndex[to] if !ok { - return nil, fmt.Errorf("unknown node") + return peer.AddrInfo{}, fmt.Errorf("unknown node") } return node.NodeInfo, nil } -func (t *Topology[K, A]) RouteMessage(ctx context.Context, from kad.NodeID[K], to kad.NodeID[K], protoID address.ProtocolID, req kad.Request[K, A]) (kad.Response[K, A], error) { - if key.Equal(from.Key(), to.Key()) { - tkey := key.HexString(to.Key()) - node, ok := t.nodeIndex[tkey] +func (t *Topology) RouteMessage(ctx context.Context, from peer.ID, to peer.ID, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { + if from == to { + node, ok := t.nodeIndex[to] if !ok { return nil, fmt.Errorf("unknown node") } @@ -135,8 +131,7 @@ func (t *Topology[K, A]) RouteMessage(ctx context.Context, from kad.NodeID[K], t t.clk.Sleep(latency) } - tkey := key.HexString(to.Key()) - node, ok := t.nodeIndex[tkey] + node, ok := t.nodeIndex[to] if !ok { return nil, fmt.Errorf("no route to node") } diff --git a/v2/coord/network.go b/v2/coord/network.go index e641819a..11bbea91 100644 --- a/v2/coord/network.go +++ b/v2/coord/network.go @@ -1,22 +1,26 @@ -package kademlia +package coord import ( "context" "fmt" "sync" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/query" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -type NetworkBehaviour[K kad.Key[K], A kad.Address[A]] struct { +type NetworkBehaviour struct { // rtr is the message router used to send messages - rtr Router[K, A] + rtr Router nodeHandlersMu sync.Mutex - nodeHandlers map[string]*NodeHandler[K, A] // TODO: garbage collect node handlers + nodeHandlers map[peer.ID]*NodeHandler // TODO: garbage collect node handlers pendingMu sync.Mutex pending []DhtEvent @@ -25,10 +29,10 @@ type NetworkBehaviour[K kad.Key[K], A kad.Address[A]] struct { logger *slog.Logger } -func NewNetworkBehaviour[K kad.Key[K], A kad.Address[A]](rtr Router[K, A], logger *slog.Logger) *NetworkBehaviour[K, A] { - b := &NetworkBehaviour[K, A]{ +func NewNetworkBehaviour(rtr Router, logger *slog.Logger) *NetworkBehaviour { + b := &NetworkBehaviour{ rtr: rtr, - nodeHandlers: make(map[string]*NodeHandler[K, A]), + nodeHandlers: make(map[peer.ID]*NodeHandler), ready: make(chan struct{}, 1), logger: logger, } @@ -36,18 +40,17 @@ func NewNetworkBehaviour[K kad.Key[K], A kad.Address[A]](rtr Router[K, A], logge return b } -func (b *NetworkBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { +func (b *NetworkBehaviour) Notify(ctx context.Context, ev DhtEvent) { b.pendingMu.Lock() defer b.pendingMu.Unlock() switch ev := ev.(type) { - case *EventOutboundGetClosestNodes[K, A]: - nodeKey := key.HexString(ev.To.ID().Key()) + case *EventOutboundGetClosestNodes: b.nodeHandlersMu.Lock() - nh, ok := b.nodeHandlers[nodeKey] + nh, ok := b.nodeHandlers[ev.To.ID] if !ok { nh = NewNodeHandler(ev.To, b.rtr, b.logger) - b.nodeHandlers[nodeKey] = nh + b.nodeHandlers[ev.To.ID] = nh } b.nodeHandlersMu.Unlock() nh.Notify(ctx, ev) @@ -63,11 +66,11 @@ func (b *NetworkBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { } } -func (b *NetworkBehaviour[K, A]) Ready() <-chan struct{} { +func (b *NetworkBehaviour) Ready() <-chan struct{} { return b.ready } -func (b *NetworkBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { +func (b *NetworkBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { // No inbound work can be done until Perform is complete b.pendingMu.Lock() defer b.pendingMu.Unlock() @@ -89,31 +92,30 @@ func (b *NetworkBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { return nil, false } -func (b *NetworkBehaviour[K, A]) getNodeHandler(ctx context.Context, id kad.NodeID[K]) (*NodeHandler[K, A], error) { - nodeKey := key.HexString(id.Key()) +func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id peer.ID) (*NodeHandler, error) { b.nodeHandlersMu.Lock() - nh, ok := b.nodeHandlers[nodeKey] + nh, ok := b.nodeHandlers[id] if !ok || len(nh.Addresses()) == 0 { info, err := b.rtr.GetNodeInfo(ctx, id) if err != nil { return nil, err } nh = NewNodeHandler(info, b.rtr, b.logger) - b.nodeHandlers[nodeKey] = nh + b.nodeHandlers[id] = nh } b.nodeHandlersMu.Unlock() return nh, nil } -type NodeHandler[K kad.Key[K], A kad.Address[A]] struct { - self kad.NodeInfo[K, A] - rtr Router[K, A] +type NodeHandler struct { + self peer.AddrInfo + rtr Router queue *WorkQueue[NodeHandlerRequest] logger *slog.Logger } -func NewNodeHandler[K kad.Key[K], A kad.Address[A]](self kad.NodeInfo[K, A], rtr Router[K, A], logger *slog.Logger) *NodeHandler[K, A] { - h := &NodeHandler[K, A]{ +func NewNodeHandler(self peer.AddrInfo, rtr Router, logger *slog.Logger) *NodeHandler { + h := &NodeHandler{ self: self, rtr: rtr, logger: logger, @@ -124,19 +126,19 @@ func NewNodeHandler[K kad.Key[K], A kad.Address[A]](self kad.NodeInfo[K, A], rtr return h } -func (h *NodeHandler[K, A]) Notify(ctx context.Context, ev NodeHandlerRequest) { +func (h *NodeHandler) Notify(ctx context.Context, ev NodeHandlerRequest) { h.queue.Enqueue(ctx, ev) } -func (h *NodeHandler[K, A]) send(ctx context.Context, ev NodeHandlerRequest) bool { +func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { switch cmd := ev.(type) { - case *EventOutboundGetClosestNodes[K, A]: + case *EventOutboundGetClosestNodes: if cmd.Notify == nil { break } nodes, err := h.rtr.GetClosestNodes(ctx, h.self, cmd.Target) if err != nil { - cmd.Notify.Notify(ctx, &EventGetClosestNodesFailure[K, A]{ + cmd.Notify.Notify(ctx, &EventGetCloserNodesFailure{ QueryID: cmd.QueryID, To: h.self, Target: cmd.Target, @@ -145,11 +147,11 @@ func (h *NodeHandler[K, A]) send(ctx context.Context, ev NodeHandlerRequest) boo return false } - cmd.Notify.Notify(ctx, &EventGetClosestNodesSuccess[K, A]{ - QueryID: cmd.QueryID, - To: h.self, - Target: cmd.Target, - ClosestNodes: nodes, + cmd.Notify.Notify(ctx, &EventGetCloserNodesSuccess{ + QueryID: cmd.QueryID, + To: h.self, + Target: cmd.Target, + CloserNodes: nodes, }) default: panic(fmt.Sprintf("unexpected command type: %T", cmd)) @@ -158,24 +160,24 @@ func (h *NodeHandler[K, A]) send(ctx context.Context, ev NodeHandlerRequest) boo return false } -func (h *NodeHandler[K, A]) ID() kad.NodeID[K] { - return h.self.ID() +func (h *NodeHandler) ID() peer.ID { + return h.self.ID } -func (h *NodeHandler[K, A]) Addresses() []A { - return h.self.Addresses() +func (h *NodeHandler) Addresses() []ma.Multiaddr { + return h.self.Addrs } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. -func (h *NodeHandler[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([]Node[K, A], error) { +func (h *NodeHandler) GetClosestNodes(ctx context.Context, k key.Key256, n int) ([]Node, error) { w := NewWaiter[DhtEvent]() - ev := &EventOutboundGetClosestNodes[K, A]{ - QueryID: query.QueryID(key.HexString(k)), - To: h.self, - Target: k, - Notify: w, + ev := &EventOutboundGetClosestNodes{ + QueryID: query.QueryID(key.HexString(k)), + To: h.self, + Target: k, + Notify: w, } h.queue.Enqueue(ctx, ev) @@ -186,9 +188,9 @@ func (h *NodeHandler[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([] case we := <-w.Chan(): switch res := we.Event.(type) { - case *EventGetClosestNodesSuccess[K, A]: - nodes := make([]Node[K, A], 0, len(res.ClosestNodes)) - for _, info := range res.ClosestNodes { + case *EventGetCloserNodesSuccess: + nodes := make([]Node, 0, len(res.CloserNodes)) + for _, info := range res.CloserNodes { // TODO use a global registry of node handlers nodes = append(nodes, NewNodeHandler(info, h.rtr, h.logger)) n-- @@ -198,7 +200,7 @@ func (h *NodeHandler[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([] } return nodes, nil - case *EventGetClosestNodesFailure[K, A]: + case *EventGetCloserNodesFailure: return nil, res.Err default: panic(fmt.Sprintf("unexpected node handler event: %T", ev)) @@ -208,56 +210,41 @@ func (h *NodeHandler[K, A]) GetClosestNodes(ctx context.Context, k K, n int) ([] // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (h *NodeHandler[K, A]) GetValue(ctx context.Context, key K) (Value[K], error) { +func (h *NodeHandler) GetValue(ctx context.Context, key key.Key256) (Value, error) { panic("not implemented") } // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. -func (h *NodeHandler[K, A]) PutValue(ctx context.Context, r Value[K], q int) error { +func (h *NodeHandler) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } -type NodeAddr[K kad.Key[K], A kad.Address[A]] struct { - id kad.NodeID[K] - addresses []A -} - -func NewNodeAddr[K kad.Key[K], A kad.Address[A]](id kad.NodeID[K], addresses []A) *NodeAddr[K, A] { - return &NodeAddr[K, A]{ - id: id, - addresses: addresses, +func CloserNodesResponse(k key.Key256, nodes []peer.AddrInfo) kad.Response[key.Key256, ma.Multiaddr] { + infos := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], len(nodes)) + for i := range nodes { + infos[i] = kadt.AddrInfo{Info: nodes[i]} } -} - -func (n *NodeAddr[K, A]) ID() kad.NodeID[K] { - return n.id -} - -func (n *NodeAddr[K, A]) Addresses() []A { - return n.addresses -} -func ClosestNodesFakeResponse[K kad.Key[K], A kad.Address[A]](key K, nodes []kad.NodeInfo[K, A]) kad.Response[K, A] { - return &fakeMessage[K, A]{ - key: key, - nodes: nodes, + return &fakeMessage{ + key: k, + infos: infos, } } -type fakeMessage[K kad.Key[K], A kad.Address[A]] struct { - key K - nodes []kad.NodeInfo[K, A] +type fakeMessage struct { + key key.Key256 + infos []kad.NodeInfo[key.Key256, ma.Multiaddr] } -func (r fakeMessage[K, A]) Target() K { +func (r fakeMessage) Target() key.Key256 { return r.key } -func (r fakeMessage[K, A]) CloserNodes() []kad.NodeInfo[K, A] { - return r.nodes +func (r fakeMessage) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { + return r.infos } -func (r fakeMessage[K, A]) EmptyResponse() kad.Response[K, A] { - return &fakeMessage[K, A]{} +func (r fakeMessage) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { + return &fakeMessage{} } diff --git a/v2/coord/network_test.go b/v2/coord/network_test.go index 714c2a35..a596edfb 100644 --- a/v2/coord/network_test.go +++ b/v2/coord/network_test.go @@ -1,31 +1,30 @@ -package kademlia +package coord import ( "testing" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/key" "github.com/stretchr/testify/require" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -var _ Router[key.Key8, kadtest.StrAddr] = (*nettest.Router[key.Key8, kadtest.StrAddr])(nil) - // TODO: this is just a basic is-it-working test that needs to be improved func TestGetClosestNodes(t *testing.T) { ctx, cancel := kadtest.CtxShort(t) defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - h := NewNodeHandler[key.Key8, kadtest.StrAddr](nodes[1].NodeInfo, nodes[1].Router, slog.Default()) + h := NewNodeHandler(nodes[1].NodeInfo, nodes[1].Router, slog.Default()) // node 1 has node 2 in its routing table so it will return it along with node 0 - found, err := h.GetClosestNodes(ctx, nodes[2].NodeInfo.ID().Key(), 2) + found, err := h.GetClosestNodes(ctx, kadt.PeerID(nodes[2].NodeInfo.ID).Key(), 2) require.NoError(t, err) for _, f := range found { t.Logf("found node %v", f.ID()) diff --git a/v2/coord/query.go b/v2/coord/query.go index a4f5f268..78fcec67 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -1,18 +1,20 @@ -package kademlia +package coord import ( "context" "fmt" "sync" - "github.com/plprobelab/go-kademlia/kad" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + ma "github.com/multiformats/go-multiaddr" + "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/query" "github.com/plprobelab/go-kademlia/util" "golang.org/x/exp/slog" ) -type PooledQueryBehaviour[K kad.Key[K], A kad.Address[A]] struct { - pool *query.Pool[K, A] +type PooledQueryBehaviour struct { + pool *query.Pool[key.Key256, ma.Multiaddr] waiters map[query.QueryID]NotifyCloser[DhtEvent] pendingMu sync.Mutex @@ -22,8 +24,8 @@ type PooledQueryBehaviour[K kad.Key[K], A kad.Address[A]] struct { logger *slog.Logger } -func NewPooledQueryBehaviour[K kad.Key[K], A kad.Address[A]](pool *query.Pool[K, A], logger *slog.Logger) *PooledQueryBehaviour[K, A] { - h := &PooledQueryBehaviour[K, A]{ +func NewPooledQueryBehaviour(pool *query.Pool[key.Key256, ma.Multiaddr], logger *slog.Logger) *PooledQueryBehaviour { + h := &PooledQueryBehaviour{ pool: pool, waiters: make(map[query.QueryID]NotifyCloser[DhtEvent]), ready: make(chan struct{}, 1), @@ -32,7 +34,7 @@ func NewPooledQueryBehaviour[K kad.Key[K], A kad.Address[A]](pool *query.Pool[K, return h } -func (r *PooledQueryBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { +func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev DhtEvent) { ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Notify") defer span.End() @@ -41,13 +43,13 @@ func (r *PooledQueryBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { var cmd query.PoolEvent switch ev := ev.(type) { - case *EventStartQuery[K, A]: - cmd = &query.EventPoolAddQuery[K, A]{ + case *EventStartQuery: + cmd = &query.EventPoolAddQuery[key.Key256, ma.Multiaddr]{ QueryID: ev.QueryID, Target: ev.Target, ProtocolID: ev.ProtocolID, Message: ev.Message, - KnownClosestNodes: ev.KnownClosestNodes, + KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.KnownClosestNodes), } if ev.Notify != nil { r.waiters[ev.QueryID] = ev.Notify @@ -58,30 +60,30 @@ func (r *PooledQueryBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { QueryID: ev.QueryID, } - case *EventGetClosestNodesSuccess[K, A]: - for _, info := range ev.ClosestNodes { + case *EventGetCloserNodesSuccess: + for _, info := range ev.CloserNodes { // TODO: do this after advancing pool - r.pending = append(r.pending, &EventDhtAddNodeInfo[K, A]{ + r.pending = append(r.pending, &EventDhtAddNodeInfo{ NodeInfo: info, }) } waiter, ok := r.waiters[ev.QueryID] if ok { - waiter.Notify(ctx, &EventQueryProgressed[K, A]{ - NodeID: ev.To.ID(), + waiter.Notify(ctx, &EventQueryProgressed{ + NodeID: ev.To.ID, QueryID: ev.QueryID, - Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), // Stats: stats, }) } - cmd = &query.EventPoolMessageResponse[K, A]{ - NodeID: ev.To.ID(), + cmd = &query.EventPoolMessageResponse[key.Key256, ma.Multiaddr]{ + NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, - Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } - case *EventGetClosestNodesFailure[K, A]: - cmd = &query.EventPoolMessageFailure[K]{ - NodeID: ev.To.ID(), + case *EventGetCloserNodesFailure: + cmd = &query.EventPoolMessageFailure[key.Key256]{ + NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, Error: ev.Err, } @@ -102,11 +104,11 @@ func (r *PooledQueryBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { } } -func (r *PooledQueryBehaviour[K, A]) Ready() <-chan struct{} { +func (r *PooledQueryBehaviour) Ready() <-chan struct{} { return r.ready } -func (r *PooledQueryBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { +func (r *PooledQueryBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Perform") defer span.End() @@ -141,16 +143,16 @@ func (r *PooledQueryBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, boo } } -func (r *PooledQueryBehaviour[K, A]) advancePool(ctx context.Context, ev query.PoolEvent) (DhtEvent, bool) { +func (r *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (DhtEvent, bool) { ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.advancePool") defer span.End() pstate := r.pool.Advance(ctx, ev) switch st := pstate.(type) { - case *query.StatePoolQueryMessage[K, A]: - return &EventOutboundGetClosestNodes[K, A]{ + case *query.StatePoolQueryMessage[key.Key256, ma.Multiaddr]: + return &EventOutboundGetClosestNodes{ QueryID: st.QueryID, - To: NewNodeAddr[K, A](st.NodeID, nil), + To: NodeIDToAddrInfo(st.NodeID), Target: st.Message.Target(), Notify: r, }, true diff --git a/v2/coord/routing.go b/v2/coord/routing.go index 545ca594..f36801b2 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -1,21 +1,24 @@ -package kademlia +package coord import ( "context" "fmt" "sync" - "github.com/plprobelab/go-kademlia/kad" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing" "github.com/plprobelab/go-kademlia/util" "go.opentelemetry.io/otel/attribute" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) -type RoutingBehaviour[K kad.Key[K], A kad.Address[A]] struct { +type RoutingBehaviour struct { // self is the node id of the system the dht is running on - self kad.NodeID[K] + self peer.ID // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table bootstrap SM[routing.BootstrapEvent, routing.BootstrapState] @@ -32,8 +35,8 @@ type RoutingBehaviour[K kad.Key[K], A kad.Address[A]] struct { logger *slog.Logger } -func NewRoutingBehaviour[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger) *RoutingBehaviour[K, A] { - r := &RoutingBehaviour[K, A]{ +func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger) *RoutingBehaviour { + r := &RoutingBehaviour{ self: self, bootstrap: bootstrap, include: include, @@ -44,7 +47,7 @@ func NewRoutingBehaviour[K kad.Key[K], A kad.Address[A]](self kad.NodeID[K], boo return r } -func (r *RoutingBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { +func (r *RoutingBehaviour) Notify(ctx context.Context, ev DhtEvent) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Notify") defer span.End() @@ -54,16 +57,16 @@ func (r *RoutingBehaviour[K, A]) Notify(ctx context.Context, ev DhtEvent) { } // notify must only be called while r.pendingMu is held -func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { +func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.notify") defer span.End() switch ev := ev.(type) { - case *EventDhtStartBootstrap[K, A]: + case *EventDhtStartBootstrap: span.SetAttributes(attribute.String("event", "EventDhtStartBootstrap")) - cmd := &routing.EventBootstrapStart[K, A]{ + cmd := &routing.EventBootstrapStart[key.Key256, ma.Multiaddr]{ ProtocolID: ev.ProtocolID, Message: ev.Message, - KnownClosestNodes: ev.SeedNodes, + KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -71,14 +74,14 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } - case *EventDhtAddNodeInfo[K, A]: + case *EventDhtAddNodeInfo: span.SetAttributes(attribute.String("event", "EventDhtAddNodeInfo")) // Ignore self - if key.Equal(ev.NodeInfo.ID().Key(), r.self.Key()) { + if ev.NodeInfo.ID == r.self { break } - cmd := &routing.EventIncludeAddCandidate[K, A]{ - NodeInfo: ev.NodeInfo, + cmd := &routing.EventIncludeAddCandidate[key.Key256, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, } // attempt to advance the include next, ok := r.advanceInclude(ctx, cmd) @@ -86,9 +89,9 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } - case *EventRoutingUpdated[K, A]: + case *EventRoutingUpdated: span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) - cmd := &routing.EventProbeAdd[K]{ + cmd := &routing.EventProbeAdd[key.Key256]{ NodeID: ev.NodeInfo.ID(), } // attempt to advance the probe state machine @@ -97,19 +100,19 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } - case *EventGetClosestNodesSuccess[K, A]: - span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", string(ev.To.ID().String()))) + case *EventGetCloserNodesSuccess: + span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) switch ev.QueryID { case "bootstrap": - for _, info := range ev.ClosestNodes { + for _, info := range ev.CloserNodes { // TODO: do this after advancing bootstrap - r.pending = append(r.pending, &EventDhtAddNodeInfo[K, A]{ + r.pending = append(r.pending, &EventDhtAddNodeInfo{ NodeInfo: info, }) } - cmd := &routing.EventBootstrapMessageResponse[K, A]{ - NodeID: ev.To.ID(), - Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + cmd := &routing.EventBootstrapMessageResponse[key.Key256, ma.Multiaddr]{ + NodeID: kadt.PeerID(ev.To.ID), + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } // attempt to advance the bootstrap next, ok := r.advanceBootstrap(ctx, cmd) @@ -118,9 +121,9 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { } case "include": - cmd := &routing.EventIncludeMessageResponse[K, A]{ - NodeInfo: ev.To, - Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + cmd := &routing.EventIncludeMessageResponse[key.Key256, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } // attempt to advance the include next, ok := r.advanceInclude(ctx, cmd) @@ -129,9 +132,9 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { } case "probe": - cmd := &routing.EventProbeMessageResponse[K, A]{ - NodeInfo: ev.To, - Response: ClosestNodesFakeResponse(ev.Target, ev.ClosestNodes), + cmd := &routing.EventProbeMessageResponse[key.Key256, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, + Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -142,13 +145,13 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { default: panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) } - case *EventGetClosestNodesFailure[K, A]: - span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", string(ev.To.ID().String()))) + case *EventGetCloserNodesFailure: + span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) span.RecordError(ev.Err) switch ev.QueryID { case "bootstrap": - cmd := &routing.EventBootstrapMessageFailure[K]{ - NodeID: ev.To.ID(), + cmd := &routing.EventBootstrapMessageFailure[key.Key256]{ + NodeID: kadt.PeerID(ev.To.ID), Error: ev.Err, } // attempt to advance the bootstrap @@ -157,8 +160,8 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } case "include": - cmd := &routing.EventIncludeMessageFailure[K, A]{ - NodeInfo: ev.To, + cmd := &routing.EventIncludeMessageFailure[key.Key256, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, Error: ev.Err, } // attempt to advance the include state machine @@ -167,8 +170,8 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } case "probe": - cmd := &routing.EventProbeMessageFailure[K, A]{ - NodeInfo: ev.To, + cmd := &routing.EventProbeMessageFailure[key.Key256, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.To}, Error: ev.Err, } // attempt to advance the probe state machine @@ -192,11 +195,11 @@ func (r *RoutingBehaviour[K, A]) notify(ctx context.Context, ev DhtEvent) { } } -func (r *RoutingBehaviour[K, A]) Ready() <-chan struct{} { +func (r *RoutingBehaviour) Ready() <-chan struct{} { return r.ready } -func (r *RoutingBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { +func (r *RoutingBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Perform") defer span.End() @@ -243,16 +246,16 @@ func (r *RoutingBehaviour[K, A]) Perform(ctx context.Context) (DhtEvent, bool) { } } -func (r *RoutingBehaviour[K, A]) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (DhtEvent, bool) { +func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (DhtEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceBootstrap") defer span.End() bstate := r.bootstrap.Advance(ctx, ev) switch st := bstate.(type) { - case *routing.StateBootstrapMessage[K, A]: - return &EventOutboundGetClosestNodes[K, A]{ + case *routing.StateBootstrapMessage[key.Key256, ma.Multiaddr]: + return &EventOutboundGetClosestNodes{ QueryID: "bootstrap", - To: NewNodeAddr[K, A](st.NodeID, nil), + To: NodeIDToAddrInfo(st.NodeID), Target: st.Message.Target(), Notify: r, }, true @@ -272,30 +275,30 @@ func (r *RoutingBehaviour[K, A]) advanceBootstrap(ctx context.Context, ev routin return nil, false } -func (r *RoutingBehaviour[K, A]) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (DhtEvent, bool) { +func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (DhtEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceInclude") defer span.End() istate := r.include.Advance(ctx, ev) switch st := istate.(type) { - case *routing.StateIncludeFindNodeMessage[K, A]: + case *routing.StateIncludeFindNodeMessage[key.Key256, ma.Multiaddr]: // include wants to send a find node message to a node - return &EventOutboundGetClosestNodes[K, A]{ + return &EventOutboundGetClosestNodes{ QueryID: "include", - To: st.NodeInfo, + To: NodeInfoToAddrInfo(st.NodeInfo), Target: st.NodeInfo.ID().Key(), Notify: r, }, true - case *routing.StateIncludeRoutingUpdated[K, A]: + case *routing.StateIncludeRoutingUpdated[key.Key256, ma.Multiaddr]: // a node has been included in the routing table // notify other routing state machines that there is a new node in the routing table - r.notify(ctx, &EventRoutingUpdated[K, A]{ + r.notify(ctx, &EventRoutingUpdated{ NodeInfo: st.NodeInfo, }) // return the event to notify outwards too - return &EventRoutingUpdated[K, A]{ + return &EventRoutingUpdated{ NodeInfo: st.NodeInfo, }, true case *routing.StateIncludeWaitingAtCapacity: @@ -313,24 +316,24 @@ func (r *RoutingBehaviour[K, A]) advanceInclude(ctx context.Context, ev routing. return nil, false } -func (r *RoutingBehaviour[K, A]) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (DhtEvent, bool) { +func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (DhtEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceProbe") defer span.End() st := r.probe.Advance(ctx, ev) switch st := st.(type) { - case *routing.StateProbeConnectivityCheck[K]: + case *routing.StateProbeConnectivityCheck[key.Key256]: // include wants to send a find node message to a node - return &EventOutboundGetClosestNodes[K, A]{ + return &EventOutboundGetClosestNodes{ QueryID: "probe", - To: unaddressedNodeInfo[K, A]{NodeID: st.NodeID}, + To: NodeIDToAddrInfo(st.NodeID), Target: st.NodeID.Key(), Notify: r, }, true - case *routing.StateProbeNodeFailure[K]: + case *routing.StateProbeNodeFailure[key.Key256]: // a node has failed a connectivity check been removed from the routing table and the probe list // add the node to the inclusion list for a second chance - r.notify(ctx, &EventDhtAddNodeInfo[K, A]{ - NodeInfo: unaddressedNodeInfo[K, A]{NodeID: st.NodeID}, + r.notify(ctx, &EventDhtAddNodeInfo{ + NodeInfo: NodeIDToAddrInfo(st.NodeID), }) case *routing.StateProbeWaitingAtCapacity: // the probe state machine is waiting for responses for checks and the maximum number of concurrent checks has been reached. @@ -347,10 +350,3 @@ func (r *RoutingBehaviour[K, A]) advanceProbe(ctx context.Context, ev routing.Pr return nil, false } - -type unaddressedNodeInfo[K kad.Key[K], A kad.Address[A]] struct { - NodeID kad.NodeID[K] -} - -func (u unaddressedNodeInfo[K, A]) ID() kad.NodeID[K] { return u.NodeID } -func (u unaddressedNodeInfo[K, A]) Addresses() []A { return nil } diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index 19cb12ea..58f56bac 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -1,4 +1,4 @@ -package kademlia +package coord import ( "errors" @@ -6,7 +6,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/plprobelab/go-kademlia/kad" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" @@ -16,6 +17,8 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) func TestRoutingStartBootstrapSendsEvent(t *testing.T) { @@ -23,30 +26,36 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + + req := &pb.Message{ + Type: pb.Message_FIND_NODE, + Key: []byte(self), + } - ev := &EventDhtStartBootstrap[key.Key8, kadtest.StrAddr]{ + ev := &EventDhtStartBootstrap{ ProtocolID: address.ProtocolID("test"), - Message: kadtest.NewRequest("1", self.Key()), - SeedNodes: []kad.NodeID[key.Key8]{nodes[1].NodeInfo.ID()}, + Message: req, + SeedNodes: []peer.ID{nodes[1].NodeInfo.ID}, } routingBehaviour.Notify(ctx, ev) // the event that should be passed to the bootstrap state machine - expected := &routing.EventBootstrapStart[key.Key8, kadtest.StrAddr]{ + expected := &routing.EventBootstrapStart[key.Key256, ma.Multiaddr]{ ProtocolID: ev.ProtocolID, Message: ev.Message, - KnownClosestNodes: ev.SeedNodes, + KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), } require.Equal(t, expected, bootstrap.Received) } @@ -56,32 +65,33 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) - ev := &EventGetClosestNodesSuccess[key.Key8, kadtest.StrAddr]{ + ev := &EventGetCloserNodesSuccess{ QueryID: query.QueryID("bootstrap"), To: nodes[1].NodeInfo, - Target: nodes[0].NodeInfo.ID().Key(), - ClosestNodes: []kad.NodeInfo[key.Key8, kadtest.StrAddr]{nodes[2].NodeInfo}, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, } routingBehaviour.Notify(ctx, ev) // bootstrap should receive message response event - require.IsType(t, &routing.EventBootstrapMessageResponse[key.Key8, kadtest.StrAddr]{}, bootstrap.Received) + require.IsType(t, &routing.EventBootstrapMessageResponse[key.Key256, ma.Multiaddr]{}, bootstrap.Received) - rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[key.Key8, kadtest.StrAddr]) - require.Equal(t, nodes[1].NodeInfo.ID(), rev.NodeID) - require.Equal(t, ev.ClosestNodes, rev.Response.CloserNodes()) + rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[key.Key256, ma.Multiaddr]) + require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) + require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) } func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { @@ -89,32 +99,33 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID // records the event passed to bootstrap bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) failure := errors.New("failed") - ev := &EventGetClosestNodesFailure[key.Key8, kadtest.StrAddr]{ + ev := &EventGetCloserNodesFailure{ QueryID: query.QueryID("bootstrap"), To: nodes[1].NodeInfo, - Target: nodes[0].NodeInfo.ID().Key(), + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), Err: failure, } routingBehaviour.Notify(ctx, ev) // bootstrap should receive message response event - require.IsType(t, &routing.EventBootstrapMessageFailure[key.Key8]{}, bootstrap.Received) + require.IsType(t, &routing.EventBootstrapMessageFailure[key.Key256]{}, bootstrap.Received) - rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[key.Key8]) - require.Equal(t, nodes[1].NodeInfo.ID(), rev.NodeID) + rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[key.Key256]) + require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) require.Equal(t, failure, rev.Error) } @@ -123,9 +134,10 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -133,17 +145,17 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) - ev := &EventDhtAddNodeInfo[key.Key8, kadtest.StrAddr]{ + ev := &EventDhtAddNodeInfo{ NodeInfo: nodes[2].NodeInfo, } routingBehaviour.Notify(ctx, ev) // the event that should be passed to the include state machine - expected := &routing.EventIncludeAddCandidate[key.Key8, kadtest.StrAddr]{ - NodeInfo: ev.NodeInfo, + expected := &routing.EventIncludeAddCandidate[key.Key256, ma.Multiaddr]{ + NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, } require.Equal(t, expected, include.Received) } @@ -153,9 +165,10 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -163,23 +176,23 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) - ev := &EventGetClosestNodesSuccess[key.Key8, kadtest.StrAddr]{ + ev := &EventGetCloserNodesSuccess{ QueryID: query.QueryID("include"), To: nodes[1].NodeInfo, - Target: nodes[0].NodeInfo.ID().Key(), - ClosestNodes: []kad.NodeInfo[key.Key8, kadtest.StrAddr]{nodes[2].NodeInfo}, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, } routingBehaviour.Notify(ctx, ev) // include should receive message response event - require.IsType(t, &routing.EventIncludeMessageResponse[key.Key8, kadtest.StrAddr]{}, include.Received) + require.IsType(t, &routing.EventIncludeMessageResponse[key.Key256, ma.Multiaddr]{}, include.Received) - rev := include.Received.(*routing.EventIncludeMessageResponse[key.Key8, kadtest.StrAddr]) - require.Equal(t, nodes[1].NodeInfo, rev.NodeInfo) - require.Equal(t, ev.ClosestNodes, rev.Response.CloserNodes()) + rev := include.Received.(*routing.EventIncludeMessageResponse[key.Key256, ma.Multiaddr]) + require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) + require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) } func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { @@ -187,9 +200,10 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID // records the event passed to include include := NewRecordingSM[routing.IncludeEvent, routing.IncludeState](&routing.StateIncludeIdle{}) @@ -197,23 +211,23 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) failure := errors.New("failed") - ev := &EventGetClosestNodesFailure[key.Key8, kadtest.StrAddr]{ + ev := &EventGetCloserNodesFailure{ QueryID: query.QueryID("include"), To: nodes[1].NodeInfo, - Target: nodes[0].NodeInfo.ID().Key(), + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), Err: failure, } routingBehaviour.Notify(ctx, ev) // include should receive message response event - require.IsType(t, &routing.EventIncludeMessageFailure[key.Key8, kadtest.StrAddr]{}, include.Received) + require.IsType(t, &routing.EventIncludeMessageFailure[key.Key256, ma.Multiaddr]{}, include.Received) - rev := include.Received.(*routing.EventIncludeMessageFailure[key.Key8, kadtest.StrAddr]) - require.Equal(t, nodes[1].NodeInfo, rev.NodeInfo) + rev := include.Received.(*routing.EventIncludeMessageFailure[key.Key256, ma.Multiaddr]) + require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) require.Equal(t, failure, rev.Error) } @@ -222,36 +236,37 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { defer cancel() clk := clock.NewMock() - _, nodes := nettest.LinearTopology(4, clk) + _, nodes, err := nettest.LinearTopology(4, clk) + require.NoError(t, err) - self := nodes[0].NodeInfo.ID() + self := nodes[0].NodeInfo.ID rt := nodes[0].RoutingTable includeCfg := routing.DefaultIncludeConfig() includeCfg.Clock = clk - include, err := routing.NewInclude[key.Key8, kadtest.StrAddr](rt, includeCfg) + include, err := routing.NewInclude[key.Key256, ma.Multiaddr](rt, includeCfg) require.NoError(t, err) probeCfg := routing.DefaultProbeConfig() probeCfg.Clock = clk probeCfg.CheckInterval = 5 * time.Minute - probe, err := routing.NewProbe[key.Key8, kadtest.StrAddr](rt, probeCfg) + probe, err := routing.NewProbe[key.Key256, ma.Multiaddr](rt, probeCfg) require.NoError(t, err) // ensure bootstrap is always idle bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - routingBehaviour := NewRoutingBehaviour[key.Key8, kadtest.StrAddr](self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) // a new node to be included candidate := nodes[len(nodes)-1].NodeInfo // the routing table should not contain the node yet - _, intable := rt.GetNode(candidate.ID().Key()) + _, intable := rt.GetNode(kadt.PeerID(candidate.ID).Key()) require.False(t, intable) // notify that there is a new node to be included - routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo[key.Key8, kadtest.StrAddr]{ + routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo{ NodeInfo: candidate, }) @@ -260,29 +275,29 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { require.True(t, ok) // include should be asking to send a message to the node - require.IsType(t, &EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]{}, dev) + require.IsType(t, &EventOutboundGetClosestNodes{}, dev) - oev := dev.(*EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]) + oev := dev.(*EventOutboundGetClosestNodes) // advance time a little clk.Add(time.Second) // notify a successful response back (best to use the notify included in the event even though it will be the behaviour's Notify method) - oev.Notify.Notify(ctx, &EventGetClosestNodesSuccess[key.Key8, kadtest.StrAddr]{ + oev.Notify.Notify(ctx, &EventGetCloserNodesSuccess{ QueryID: oev.QueryID, To: oev.To, Target: oev.Target, - ClosestNodes: []kad.NodeInfo[key.Key8, kadtest.StrAddr]{nodes[1].NodeInfo}, // must include one for include check to pass + CloserNodes: []peer.AddrInfo{nodes[1].NodeInfo}, // must include one for include check to pass }) // the routing table should now contain the node - _, intable = rt.GetNode(candidate.ID().Key()) + _, intable = rt.GetNode(kadt.PeerID(candidate.ID).Key()) require.True(t, intable) // routing update event should be emitted from the include state machine dev, ok = routingBehaviour.Perform(ctx) require.True(t, ok) - require.IsType(t, &EventRoutingUpdated[key.Key8, kadtest.StrAddr]{}, dev) + require.IsType(t, &EventRoutingUpdated{}, dev) // advance time past the probe check interval clk.Add(probeCfg.CheckInterval) @@ -290,11 +305,10 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { // routing update event should be emitted from the include state machine dev, ok = routingBehaviour.Perform(ctx) require.True(t, ok) - require.IsType(t, &EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]{}, dev) + require.IsType(t, &EventOutboundGetClosestNodes{}, dev) // confirm that the message is for the correct node - oev = dev.(*EventOutboundGetClosestNodes[key.Key8, kadtest.StrAddr]) + oev = dev.(*EventOutboundGetClosestNodes) require.Equal(t, query.QueryID("probe"), oev.QueryID) - require.Equal(t, candidate.ID(), oev.To.ID()) - require.Equal(t, candidate.ID().Key(), oev.Target) + require.Equal(t, candidate.ID, oev.To.ID) } diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index b2d586f9..d7ec659c 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -53,6 +53,11 @@ func (ai AddrInfo) ID() kad.NodeID[key.Key256] { return PeerID(ai.Info.ID) } +// ID returns the peer ID of this peer's information struct as a PeerID +func (ai AddrInfo) PeerID() PeerID { + return PeerID(ai.Info.ID) +} + // Addresses returns all Multiaddresses of this peer. func (ai AddrInfo) Addresses() []ma.Multiaddr { addrs := make([]ma.Multiaddr, len(ai.Info.Addrs)) From 7add530fa3f943f7c78383ba04596591dc5bd1be Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 4 Sep 2023 15:45:43 +0100 Subject: [PATCH 10/26] Update to latest go-kademlia --- v2/go.mod | 2 +- v2/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/go.mod b/v2/go.mod index d951ff3d..dba7a727 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -16,7 +16,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.11.0 - github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 + github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.16.0 diff --git a/v2/go.sum b/v2/go.sum index 6f0a62b5..c5ae0a1a 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -283,8 +283,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9 h1:qqrJgUNOCAozZDkL0gH57FUi+aXj/d/SdldaLAZUFUU= -github.com/plprobelab/go-kademlia v0.0.0-20230823114513-9b9e606066c9/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= +github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 h1:fgo8NhFeL+p7atahZNtvo1BfWClUNRvAjzC2ikEwvsY= +github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60/go.mod h1:OMu6Kyh5AetV3uLRVSZlp6WcwrZUn3nyRFaRuJxVWJQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= From 25bcc7f2538c4251720677ecc300374a39821f2a Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 4 Sep 2023 16:45:43 +0100 Subject: [PATCH 11/26] Cleanup naming of events --- v2/coord/behaviour.go | 22 ++++---- v2/coord/conversion.go | 13 +++-- v2/coord/coordinator.go | 76 +++++++++++++-------------- v2/coord/coretypes.go | 10 ++-- v2/coord/event.go | 107 ++++++++++++++++++++++----------------- v2/coord/event_test.go | 25 +++++++++ v2/coord/network.go | 34 ++++++------- v2/coord/query.go | 31 ++++++------ v2/coord/routing.go | 70 ++++++++++++------------- v2/coord/routing_test.go | 57 ++++++++++----------- 10 files changed, 239 insertions(+), 206 deletions(-) create mode 100644 v2/coord/event_test.go diff --git a/v2/coord/behaviour.go b/v2/coord/behaviour.go index c2574517..4077d347 100644 --- a/v2/coord/behaviour.go +++ b/v2/coord/behaviour.go @@ -6,22 +6,22 @@ import ( "sync/atomic" ) -type Notify[C DhtEvent] interface { +type Notify[C BehaviourEvent] interface { Notify(ctx context.Context, ev C) } -type NotifyCloser[C DhtEvent] interface { +type NotifyCloser[C BehaviourEvent] interface { Notify[C] Close() } -type NotifyFunc[C DhtEvent] func(ctx context.Context, ev C) +type NotifyFunc[C BehaviourEvent] func(ctx context.Context, ev C) func (f NotifyFunc[C]) Notify(ctx context.Context, ev C) { f(ctx, ev) } -type Behaviour[I DhtEvent, O DhtEvent] interface { +type Behaviour[I BehaviourEvent, O BehaviourEvent] interface { // Ready returns a channel that signals when the behaviour is ready to perform work. Ready() <-chan struct{} @@ -39,20 +39,20 @@ type SM[E any, S any] interface { Advance(context.Context, E) S } -type WorkQueueFunc[E DhtEvent] func(context.Context, E) bool +type WorkQueueFunc[E BehaviourEvent] func(context.Context, E) bool // WorkQueue is buffered queue of work to be performed. // The queue automatically drains the queue sequentially by calling a // WorkQueueFunc for each work item, passing the original context // and event. -type WorkQueue[E DhtEvent] struct { +type WorkQueue[E BehaviourEvent] struct { pending chan pendingEvent[E] fn WorkQueueFunc[E] done atomic.Bool once sync.Once } -func NewWorkQueue[E DhtEvent](fn WorkQueueFunc[E]) *WorkQueue[E] { +func NewWorkQueue[E BehaviourEvent](fn WorkQueueFunc[E]) *WorkQueue[E] { w := &WorkQueue[E]{ pending: make(chan pendingEvent[E], 16), fn: fn, @@ -102,21 +102,21 @@ func (w *WorkQueue[E]) Enqueue(ctx context.Context, cmd E) error { // A Waiter is a Notifiee whose Notify method forwards the // notified event to a channel which a client can wait on. -type Waiter[E DhtEvent] struct { +type Waiter[E BehaviourEvent] struct { pending chan WaiterEvent[E] done atomic.Bool } -var _ Notify[DhtEvent] = (*Waiter[DhtEvent])(nil) +var _ Notify[BehaviourEvent] = (*Waiter[BehaviourEvent])(nil) -func NewWaiter[E DhtEvent]() *Waiter[E] { +func NewWaiter[E BehaviourEvent]() *Waiter[E] { w := &Waiter[E]{ pending: make(chan WaiterEvent[E], 16), } return w } -type WaiterEvent[E DhtEvent] struct { +type WaiterEvent[E BehaviourEvent] struct { Ctx context.Context Event E } diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go index 19fec751..dadc0bcc 100644 --- a/v2/coord/conversion.go +++ b/v2/coord/conversion.go @@ -4,14 +4,13 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // NodeInfoToAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. // This function will panic if info.ID() does not return a kadt.PeerID -func NodeInfoToAddrInfo(info kad.NodeInfo[key.Key256, ma.Multiaddr]) peer.AddrInfo { +func NodeInfoToAddrInfo(info kad.NodeInfo[KadKey, ma.Multiaddr]) peer.AddrInfo { peerID := info.ID().(kadt.PeerID) return peer.AddrInfo{ ID: peer.ID(peerID), @@ -21,7 +20,7 @@ func NodeInfoToAddrInfo(info kad.NodeInfo[key.Key256, ma.Multiaddr]) peer.AddrIn // NodeIDToAddrInfo converts a kad.NodeID to a peer.AddrInfo with no addresses. // This function will panic if id's underlying type is not kadt.PeerID -func NodeIDToAddrInfo(id kad.NodeID[key.Key256]) peer.AddrInfo { +func NodeIDToAddrInfo(id kad.NodeID[KadKey]) peer.AddrInfo { peerID := id.(kadt.PeerID) return peer.AddrInfo{ ID: peer.ID(peerID), @@ -30,7 +29,7 @@ func NodeIDToAddrInfo(id kad.NodeID[key.Key256]) peer.AddrInfo { // SliceOfNodeInfoToSliceOfAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. // This function will panic if any info.ID() does not return a kadt.PeerID -func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[key.Key256, ma.Multiaddr]) []peer.AddrInfo { +func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[KadKey, ma.Multiaddr]) []peer.AddrInfo { peers := make([]peer.AddrInfo, len(infos)) for i := range infos { peerID := infos[i].ID().(kadt.PeerID) @@ -43,8 +42,8 @@ func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[key.Key256, ma.Multia } // SliceOfPeerIDToSliceOfNodeID converts a slice peer.ID to a slice of kad.NodeID -func SliceOfPeerIDToSliceOfNodeID(peers []peer.ID) []kad.NodeID[key.Key256] { - nodes := make([]kad.NodeID[key.Key256], len(peers)) +func SliceOfPeerIDToSliceOfNodeID(peers []peer.ID) []kad.NodeID[KadKey] { + nodes := make([]kad.NodeID[KadKey], len(peers)) for i := range peers { nodes[i] = kadt.PeerID(peers[i]) } @@ -53,6 +52,6 @@ func SliceOfPeerIDToSliceOfNodeID(peers []peer.ID) []kad.NodeID[key.Key256] { // NodeIDToPeerID converts a kad.NodeID to a peer.ID. // This function will panic if id's underlying type is not kadt.PeerID -func NodeIDToPeerID(id kad.NodeID[key.Key256]) peer.ID { +func NodeIDToPeerID(id kad.NodeID[KadKey]) peer.ID { return peer.ID(id.(kadt.PeerID)) } diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index 4ceab9f0..ee9a2a52 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -12,7 +12,6 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/kaderr" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" "github.com/plprobelab/go-kademlia/routing" @@ -24,16 +23,15 @@ import ( ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT -// It is only one possible configuration of the DHT components, others are possible. type Coordinator struct { - // self is the node id of the system the dht is running on + // self is the peer id of the system the dht is running on self peer.ID // cfg is a copy of the optional configuration supplied to the dht - cfg Config + cfg CoordinatorConfig // rt is the routing table used to look up nodes by distance - rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + rt kad.RoutingTable[KadKey, kad.NodeID[KadKey]] // rtr is the message router used to send messages rtr Router @@ -44,15 +42,15 @@ type Coordinator struct { networkBehaviour *NetworkBehaviour // routingBehaviour is the behaviour responsible for maintaining the routing table - routingBehaviour Behaviour[DhtEvent, DhtEvent] + routingBehaviour Behaviour[BehaviourEvent, BehaviourEvent] // queryBehaviour is the behaviour responsible for running user-submitted queries - queryBehaviour Behaviour[DhtEvent, DhtEvent] + queryBehaviour Behaviour[BehaviourEvent, BehaviourEvent] } const DefaultChanqueueCapacity = 1024 -type Config struct { +type CoordinatorConfig struct { PeerstoreTTL time.Duration // duration for which a peer is kept in the peerstore Clock clock.Clock // a clock that may replaced by a mock when testing @@ -67,52 +65,52 @@ type Config struct { } // Validate checks the configuration options and returns an error if any have invalid values. -func (cfg *Config) Validate() error { +func (cfg *CoordinatorConfig) Validate() error { if cfg.Clock == nil { return &kaderr.ConfigurationError{ - Component: "DhtConfig", + Component: "CoordinatorConfig", Err: fmt.Errorf("clock must not be nil"), } } if cfg.QueryConcurrency < 1 { return &kaderr.ConfigurationError{ - Component: "DhtConfig", + Component: "CoordinatorConfig", Err: fmt.Errorf("query concurrency must be greater than zero"), } } if cfg.QueryTimeout < 1 { return &kaderr.ConfigurationError{ - Component: "DhtConfig", + Component: "CoordinatorConfig", Err: fmt.Errorf("query timeout must be greater than zero"), } } if cfg.RequestConcurrency < 1 { return &kaderr.ConfigurationError{ - Component: "DhtConfig", + Component: "CoordinatorConfig", Err: fmt.Errorf("request concurrency must be greater than zero"), } } if cfg.RequestTimeout < 1 { return &kaderr.ConfigurationError{ - Component: "DhtConfig", + Component: "CoordinatorConfig", Err: fmt.Errorf("request timeout must be greater than zero"), } } if cfg.Logger == nil { return &kaderr.ConfigurationError{ - Component: "DhtConfig", + Component: "CoordinatorConfig", Err: fmt.Errorf("logger must not be nil"), } } return nil } -func DefaultConfig() *Config { - return &Config{ +func DefaultConfig() *CoordinatorConfig { + return &CoordinatorConfig{ Clock: clock.New(), // use standard time PeerstoreTTL: 10 * time.Minute, QueryConcurrency: 3, @@ -123,7 +121,7 @@ func DefaultConfig() *Config { } } -func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]], cfg *Config) (*Coordinator, error) { +func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, kad.NodeID[KadKey]], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { cfg = DefaultConfig() } else if err := cfg.Validate(); err != nil { @@ -137,19 +135,19 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[key.Key qpCfg.QueryConcurrency = cfg.RequestConcurrency qpCfg.RequestTimeout = cfg.RequestTimeout - qp, err := query.NewPool[key.Key256, ma.Multiaddr](kadt.PeerID(self), qpCfg) + qp, err := query.NewPool[KadKey, ma.Multiaddr](kadt.PeerID(self), qpCfg) if err != nil { return nil, fmt.Errorf("query pool: %w", err) } queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger) - bootstrapCfg := routing.DefaultBootstrapConfig[key.Key256, ma.Multiaddr]() + bootstrapCfg := routing.DefaultBootstrapConfig[KadKey, ma.Multiaddr]() bootstrapCfg.Clock = cfg.Clock bootstrapCfg.Timeout = cfg.QueryTimeout bootstrapCfg.RequestConcurrency = cfg.RequestConcurrency bootstrapCfg.RequestTimeout = cfg.RequestTimeout - bootstrap, err := routing.NewBootstrap[key.Key256, ma.Multiaddr](kadt.PeerID(self), bootstrapCfg) + bootstrap, err := routing.NewBootstrap[KadKey, ma.Multiaddr](kadt.PeerID(self), bootstrapCfg) if err != nil { return nil, fmt.Errorf("bootstrap: %w", err) } @@ -163,7 +161,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[key.Key // includeCfg.Concurrency = cfg.IncludeConcurrency // includeCfg.Timeout = cfg.IncludeTimeout - include, err := routing.NewInclude[key.Key256, ma.Multiaddr](rt, includeCfg) + include, err := routing.NewInclude[KadKey, ma.Multiaddr](rt, includeCfg) if err != nil { return nil, fmt.Errorf("include: %w", err) } @@ -174,7 +172,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[key.Key // TODO: expose config // probeCfg.Concurrency = cfg.ProbeConcurrency - probe, err := routing.NewProbe[key.Key256, ma.Multiaddr](rt, probeCfg) + probe, err := routing.NewProbe[KadKey, ma.Multiaddr](rt, probeCfg) if err != nil { return nil, fmt.Errorf("probe: %w", err) } @@ -222,7 +220,7 @@ func (d *Coordinator) eventLoop() { ctx := context.Background() for { - var ev DhtEvent + var ev BehaviourEvent var ok bool select { case <-d.networkBehaviour.Ready(): @@ -234,25 +232,21 @@ func (d *Coordinator) eventLoop() { } if ok { - d.dispatchDhtNotice(ctx, ev) + d.dispatchEvent(ctx, ev) } } } -func (c *Coordinator) dispatchDhtNotice(ctx context.Context, ev DhtEvent) { - ctx, span := util.StartSpan(ctx, "Coordinator.dispatchDhtNotice") +func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { + ctx, span := util.StartSpan(ctx, "Coordinator.dispatchEvent") defer span.End() switch ev := ev.(type) { - case *EventDhtStartBootstrap: - c.routingBehaviour.Notify(ctx, ev) - case *EventOutboundGetClosestNodes: + case NetworkCommand: c.networkBehaviour.Notify(ctx, ev) - case *EventStartQuery: - c.queryBehaviour.Notify(ctx, ev) - case *EventStopQuery: + case QueryCommand: c.queryBehaviour.Notify(ctx, ev) - case *EventDhtAddNodeInfo: + case RoutingCommand: c.routingBehaviour.Notify(ctx, ev) case RoutingNotification: select { @@ -260,6 +254,8 @@ func (c *Coordinator) dispatchDhtNotice(ctx context.Context, ev DhtEvent) { case c.routingNotifications <- ev: default: } + default: + panic(fmt.Sprintf("unexpected event: %T", ev)) } } @@ -278,7 +274,7 @@ func (d *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. -func (d *Coordinator) GetClosestNodes(ctx context.Context, k key.Key256, n int) ([]Node, error) { +func (d *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { closest := d.rt.NearestNodes(k, n) nodes := make([]Node, 0, len(closest)) for _, id := range closest { @@ -293,7 +289,7 @@ func (d *Coordinator) GetClosestNodes(ctx context.Context, k key.Key256, n int) // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (d *Coordinator) GetValue(ctx context.Context, k key.Key256) (Value, error) { +func (d *Coordinator) GetValue(ctx context.Context, k KadKey) (Value, error) { panic("not implemented") } @@ -304,7 +300,7 @@ func (d *Coordinator) PutValue(ctx context.Context, r Value, q int) error { } // Query traverses the DHT calling fn for each node visited. -func (d *Coordinator) Query(ctx context.Context, target key.Key256, fn QueryFunc) (QueryStats, error) { +func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (QueryStats, error) { ctx, span := util.StartSpan(ctx, "Coordinator.Query") defer span.End() @@ -321,7 +317,7 @@ func (d *Coordinator) Query(ctx context.Context, target key.Key256, fn QueryFunc seedIDs = append(seedIDs, s.ID()) } - waiter := NewWaiter[DhtEvent]() + waiter := NewWaiter[BehaviourEvent]() queryID := query.QueryID("foo") cmd := &EventStartQuery{ @@ -397,7 +393,7 @@ func (d *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error continue } - d.routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo{ + d.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ NodeInfo: info, }) @@ -410,7 +406,7 @@ func (d *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error func (d *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { ctx, span := util.StartSpan(ctx, "Coordinator.Bootstrap") defer span.End() - d.routingBehaviour.Notify(ctx, &EventDhtStartBootstrap{ + d.routingBehaviour.Notify(ctx, &EventStartBootstrap{ // Bootstrap state machine uses the message Message: &fakeMessage{key: kadt.PeerID(d.self).Key()}, SeedNodes: seeds, diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index 55476efe..ad7036b1 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -12,9 +12,11 @@ import ( "github.com/plprobelab/go-kademlia/network/address" ) +type KadKey = key.Key256 + // Value is a value that may be stored in the DHT. type Value interface { - Key() key.Key256 + Key() KadKey MarshalBinary() ([]byte, error) } @@ -27,11 +29,11 @@ type Node interface { // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. - GetClosestNodes(ctx context.Context, key key.Key256, n int) ([]Node, error) + GetClosestNodes(ctx context.Context, key KadKey, n int) ([]Node, error) // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. - GetValue(ctx context.Context, key key.Key256) (Value, error) + GetValue(ctx context.Context, key KadKey) (Value, error) // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. @@ -83,5 +85,5 @@ type Router interface { // GetClosestNodes attempts to send a request to another node asking it for nodes that it considers to be // closest to the target key. - GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) + GetClosestNodes(ctx context.Context, to peer.AddrInfo, target KadKey) ([]peer.AddrInfo, error) } diff --git a/v2/coord/event.go b/v2/coord/event.go index 9f65638e..f4ecae4d 100644 --- a/v2/coord/event.go +++ b/v2/coord/event.go @@ -4,133 +4,146 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" ) -type DhtEvent interface { - dhtEvent() +type BehaviourEvent interface { + behaviourEvent() } -type DhtCommand interface { - DhtEvent - dhtCommand() +// RoutingCommand is a type of [BehaviourEvent] that instructs a [RoutingBehaviour] to perform an action. +type RoutingCommand interface { + BehaviourEvent + routingCommand() +} + +// NetworkCommand is a type of [BehaviourEvent] that instructs a [NetworkBehaviour] to perform an action. +type NetworkCommand interface { + BehaviourEvent + networkCommand() +} + +// QueryCommand is a type of [BehaviourEvent] that instructs a [QueryBehaviour] to perform an action. +type QueryCommand interface { + BehaviourEvent + queryCommand() } type NodeHandlerRequest interface { - DhtEvent + BehaviourEvent nodeHandlerRequest() } type NodeHandlerResponse interface { - DhtEvent + BehaviourEvent nodeHandlerResponse() } type RoutingNotification interface { - DhtEvent - routingNotificationEvent() + BehaviourEvent + routingNotification() } -type EventDhtStartBootstrap struct { +type EventStartBootstrap struct { ProtocolID address.ProtocolID - Message kad.Request[key.Key256, ma.Multiaddr] + Message kad.Request[KadKey, ma.Multiaddr] SeedNodes []peer.ID // TODO: peer.AddrInfo } -func (EventDhtStartBootstrap) dhtEvent() {} -func (EventDhtStartBootstrap) dhtCommand() {} +func (*EventStartBootstrap) behaviourEvent() {} +func (*EventStartBootstrap) routingCommand() {} -type EventOutboundGetClosestNodes struct { +type EventOutboundGetCloserNodes struct { QueryID query.QueryID To peer.AddrInfo - Target key.Key256 - Notify Notify[DhtEvent] + Target KadKey + Notify Notify[BehaviourEvent] } -func (EventOutboundGetClosestNodes) dhtEvent() {} -func (EventOutboundGetClosestNodes) nodeHandlerRequest() {} +func (*EventOutboundGetCloserNodes) behaviourEvent() {} +func (*EventOutboundGetCloserNodes) nodeHandlerRequest() {} +func (*EventOutboundGetCloserNodes) networkCommand() {} type EventStartQuery struct { QueryID query.QueryID - Target key.Key256 + Target KadKey ProtocolID address.ProtocolID - Message kad.Request[key.Key256, ma.Multiaddr] + Message kad.Request[KadKey, ma.Multiaddr] KnownClosestNodes []peer.ID - Notify NotifyCloser[DhtEvent] + Notify NotifyCloser[BehaviourEvent] } -func (EventStartQuery) dhtEvent() {} -func (EventStartQuery) dhtCommand() {} +func (*EventStartQuery) behaviourEvent() {} +func (*EventStartQuery) queryCommand() {} type EventStopQuery struct { QueryID query.QueryID } -func (EventStopQuery) dhtEvent() {} -func (EventStopQuery) dhtCommand() {} +func (*EventStopQuery) behaviourEvent() {} +func (*EventStopQuery) queryCommand() {} -type EventDhtAddNodeInfo struct { +type EventAddAddrInfo struct { NodeInfo peer.AddrInfo } -func (EventDhtAddNodeInfo) dhtEvent() {} -func (EventDhtAddNodeInfo) dhtCommand() {} +func (*EventAddAddrInfo) behaviourEvent() {} +func (*EventAddAddrInfo) routingCommand() {} type EventGetCloserNodesSuccess struct { QueryID query.QueryID To peer.AddrInfo - Target key.Key256 + Target KadKey CloserNodes []peer.AddrInfo } -func (EventGetCloserNodesSuccess) dhtEvent() {} -func (EventGetCloserNodesSuccess) nodeHandlerResponse() {} +func (*EventGetCloserNodesSuccess) behaviourEvent() {} +func (*EventGetCloserNodesSuccess) nodeHandlerResponse() {} type EventGetCloserNodesFailure struct { QueryID query.QueryID To peer.AddrInfo - Target key.Key256 + Target KadKey Err error } -func (EventGetCloserNodesFailure) dhtEvent() {} -func (EventGetCloserNodesFailure) nodeHandlerResponse() {} +func (*EventGetCloserNodesFailure) behaviourEvent() {} +func (*EventGetCloserNodesFailure) nodeHandlerResponse() {} -// EventQueryProgressed is emitted by the dht when a query has received a +// EventQueryProgressed is emitted by the coordinator when a query has received a // response from a node. type EventQueryProgressed struct { QueryID query.QueryID NodeID peer.ID - Response kad.Response[key.Key256, ma.Multiaddr] + Response kad.Response[KadKey, ma.Multiaddr] Stats query.QueryStats } -func (*EventQueryProgressed) dhtEvent() {} +func (*EventQueryProgressed) behaviourEvent() {} -// EventQueryFinished is emitted by the dht when a query has finished, either through +// EventQueryFinished is emitted by the coordinator when a query has finished, either through // running to completion or by being canceled. type EventQueryFinished struct { QueryID query.QueryID Stats query.QueryStats } -func (*EventQueryFinished) dhtEvent() {} +func (*EventQueryFinished) behaviourEvent() {} -// EventRoutingUpdated is emitted by the dht when a new node has been verified and added to the routing table. +// EventRoutingUpdated is emitted by the coordinator when a new node has been verified and added to the routing table. type EventRoutingUpdated struct { - NodeInfo kad.NodeInfo[key.Key256, ma.Multiaddr] + NodeInfo kad.NodeInfo[KadKey, ma.Multiaddr] } -func (*EventRoutingUpdated) dhtEvent() {} -func (*EventRoutingUpdated) routingNotificationEvent() {} +func (*EventRoutingUpdated) behaviourEvent() {} +func (*EventRoutingUpdated) routingNotification() {} -// EventBootstrapFinished is emitted by the dht when a bootstrap has finished, either through +// EventBootstrapFinished is emitted by the coordinator when a bootstrap has finished, either through // running to completion or by being canceled. type EventBootstrapFinished struct { Stats query.QueryStats } -func (*EventBootstrapFinished) dhtEvent() {} -func (*EventBootstrapFinished) routingNotificationEvent() {} +func (*EventBootstrapFinished) behaviourEvent() {} +func (*EventBootstrapFinished) routingNotification() {} diff --git a/v2/coord/event_test.go b/v2/coord/event_test.go new file mode 100644 index 00000000..b6afdd4a --- /dev/null +++ b/v2/coord/event_test.go @@ -0,0 +1,25 @@ +package coord + +var _ NetworkCommand = (*EventOutboundGetCloserNodes)(nil) + +var ( + _ RoutingCommand = (*EventAddAddrInfo)(nil) + _ RoutingCommand = (*EventStartBootstrap)(nil) +) + +var ( + _ QueryCommand = (*EventStartQuery)(nil) + _ QueryCommand = (*EventStopQuery)(nil) +) + +var ( + _ RoutingNotification = (*EventRoutingUpdated)(nil) + _ RoutingNotification = (*EventBootstrapFinished)(nil) +) + +var _ NodeHandlerRequest = (*EventOutboundGetCloserNodes)(nil) + +var ( + _ NodeHandlerResponse = (*EventGetCloserNodesSuccess)(nil) + _ NodeHandlerResponse = (*EventGetCloserNodesFailure)(nil) +) diff --git a/v2/coord/network.go b/v2/coord/network.go index 11bbea91..bf241b5d 100644 --- a/v2/coord/network.go +++ b/v2/coord/network.go @@ -23,7 +23,7 @@ type NetworkBehaviour struct { nodeHandlers map[peer.ID]*NodeHandler // TODO: garbage collect node handlers pendingMu sync.Mutex - pending []DhtEvent + pending []BehaviourEvent ready chan struct{} logger *slog.Logger @@ -40,12 +40,12 @@ func NewNetworkBehaviour(rtr Router, logger *slog.Logger) *NetworkBehaviour { return b } -func (b *NetworkBehaviour) Notify(ctx context.Context, ev DhtEvent) { +func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { b.pendingMu.Lock() defer b.pendingMu.Unlock() switch ev := ev.(type) { - case *EventOutboundGetClosestNodes: + case *EventOutboundGetCloserNodes: b.nodeHandlersMu.Lock() nh, ok := b.nodeHandlers[ev.To.ID] if !ok { @@ -70,14 +70,14 @@ func (b *NetworkBehaviour) Ready() <-chan struct{} { return b.ready } -func (b *NetworkBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { +func (b *NetworkBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { // No inbound work can be done until Perform is complete b.pendingMu.Lock() defer b.pendingMu.Unlock() // drain queued events. if len(b.pending) > 0 { - var ev DhtEvent + var ev BehaviourEvent ev, b.pending = b.pending[0], b.pending[1:] if len(b.pending) > 0 { @@ -132,7 +132,7 @@ func (h *NodeHandler) Notify(ctx context.Context, ev NodeHandlerRequest) { func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { switch cmd := ev.(type) { - case *EventOutboundGetClosestNodes: + case *EventOutboundGetCloserNodes: if cmd.Notify == nil { break } @@ -170,10 +170,10 @@ func (h *NodeHandler) Addresses() []ma.Multiaddr { // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. -func (h *NodeHandler) GetClosestNodes(ctx context.Context, k key.Key256, n int) ([]Node, error) { - w := NewWaiter[DhtEvent]() +func (h *NodeHandler) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { + w := NewWaiter[BehaviourEvent]() - ev := &EventOutboundGetClosestNodes{ + ev := &EventOutboundGetCloserNodes{ QueryID: query.QueryID(key.HexString(k)), To: h.self, Target: k, @@ -210,7 +210,7 @@ func (h *NodeHandler) GetClosestNodes(ctx context.Context, k key.Key256, n int) // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (h *NodeHandler) GetValue(ctx context.Context, key key.Key256) (Value, error) { +func (h *NodeHandler) GetValue(ctx context.Context, key KadKey) (Value, error) { panic("not implemented") } @@ -220,8 +220,8 @@ func (h *NodeHandler) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } -func CloserNodesResponse(k key.Key256, nodes []peer.AddrInfo) kad.Response[key.Key256, ma.Multiaddr] { - infos := make([]kad.NodeInfo[key.Key256, ma.Multiaddr], len(nodes)) +func CloserNodesResponse(k KadKey, nodes []peer.AddrInfo) kad.Response[KadKey, ma.Multiaddr] { + infos := make([]kad.NodeInfo[KadKey, ma.Multiaddr], len(nodes)) for i := range nodes { infos[i] = kadt.AddrInfo{Info: nodes[i]} } @@ -233,18 +233,18 @@ func CloserNodesResponse(k key.Key256, nodes []peer.AddrInfo) kad.Response[key.K } type fakeMessage struct { - key key.Key256 - infos []kad.NodeInfo[key.Key256, ma.Multiaddr] + key KadKey + infos []kad.NodeInfo[KadKey, ma.Multiaddr] } -func (r fakeMessage) Target() key.Key256 { +func (r fakeMessage) Target() KadKey { return r.key } -func (r fakeMessage) CloserNodes() []kad.NodeInfo[key.Key256, ma.Multiaddr] { +func (r fakeMessage) CloserNodes() []kad.NodeInfo[KadKey, ma.Multiaddr] { return r.infos } -func (r fakeMessage) EmptyResponse() kad.Response[key.Key256, ma.Multiaddr] { +func (r fakeMessage) EmptyResponse() kad.Response[KadKey, ma.Multiaddr] { return &fakeMessage{} } diff --git a/v2/coord/query.go b/v2/coord/query.go index 78fcec67..69766bb4 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -7,34 +7,33 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/query" "github.com/plprobelab/go-kademlia/util" "golang.org/x/exp/slog" ) type PooledQueryBehaviour struct { - pool *query.Pool[key.Key256, ma.Multiaddr] - waiters map[query.QueryID]NotifyCloser[DhtEvent] + pool *query.Pool[KadKey, ma.Multiaddr] + waiters map[query.QueryID]NotifyCloser[BehaviourEvent] pendingMu sync.Mutex - pending []DhtEvent + pending []BehaviourEvent ready chan struct{} logger *slog.Logger } -func NewPooledQueryBehaviour(pool *query.Pool[key.Key256, ma.Multiaddr], logger *slog.Logger) *PooledQueryBehaviour { +func NewPooledQueryBehaviour(pool *query.Pool[KadKey, ma.Multiaddr], logger *slog.Logger) *PooledQueryBehaviour { h := &PooledQueryBehaviour{ pool: pool, - waiters: make(map[query.QueryID]NotifyCloser[DhtEvent]), + waiters: make(map[query.QueryID]NotifyCloser[BehaviourEvent]), ready: make(chan struct{}, 1), logger: logger, } return h } -func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev DhtEvent) { +func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Notify") defer span.End() @@ -44,7 +43,7 @@ func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev DhtEvent) { var cmd query.PoolEvent switch ev := ev.(type) { case *EventStartQuery: - cmd = &query.EventPoolAddQuery[key.Key256, ma.Multiaddr]{ + cmd = &query.EventPoolAddQuery[KadKey, ma.Multiaddr]{ QueryID: ev.QueryID, Target: ev.Target, ProtocolID: ev.ProtocolID, @@ -63,7 +62,7 @@ func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev DhtEvent) { case *EventGetCloserNodesSuccess: for _, info := range ev.CloserNodes { // TODO: do this after advancing pool - r.pending = append(r.pending, &EventDhtAddNodeInfo{ + r.pending = append(r.pending, &EventAddAddrInfo{ NodeInfo: info, }) } @@ -76,13 +75,13 @@ func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev DhtEvent) { // Stats: stats, }) } - cmd = &query.EventPoolMessageResponse[key.Key256, ma.Multiaddr]{ + cmd = &query.EventPoolMessageResponse[KadKey, ma.Multiaddr]{ NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } case *EventGetCloserNodesFailure: - cmd = &query.EventPoolMessageFailure[key.Key256]{ + cmd = &query.EventPoolMessageFailure[KadKey]{ NodeID: kadt.PeerID(ev.To.ID), QueryID: ev.QueryID, Error: ev.Err, @@ -108,7 +107,7 @@ func (r *PooledQueryBehaviour) Ready() <-chan struct{} { return r.ready } -func (r *PooledQueryBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { +func (r *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Perform") defer span.End() @@ -119,7 +118,7 @@ func (r *PooledQueryBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { for { // drain queued events first. if len(r.pending) > 0 { - var ev DhtEvent + var ev BehaviourEvent ev, r.pending = r.pending[0], r.pending[1:] if len(r.pending) > 0 { @@ -143,14 +142,14 @@ func (r *PooledQueryBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { } } -func (r *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (DhtEvent, bool) { +func (r *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (BehaviourEvent, bool) { ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.advancePool") defer span.End() pstate := r.pool.Advance(ctx, ev) switch st := pstate.(type) { - case *query.StatePoolQueryMessage[key.Key256, ma.Multiaddr]: - return &EventOutboundGetClosestNodes{ + case *query.StatePoolQueryMessage[KadKey, ma.Multiaddr]: + return &EventOutboundGetCloserNodes{ QueryID: st.QueryID, To: NodeIDToAddrInfo(st.NodeID), Target: st.Message.Target(), diff --git a/v2/coord/routing.go b/v2/coord/routing.go index f36801b2..11e70c41 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -7,7 +7,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing" "github.com/plprobelab/go-kademlia/util" "go.opentelemetry.io/otel/attribute" @@ -16,8 +15,9 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) +// A RoutingBehaviour provices the behaviours for bootstrapping and maintaining a DHT's routing table. type RoutingBehaviour struct { - // self is the node id of the system the dht is running on + // self is the peer id of the system the dht is running on self peer.ID // bootstrap is the bootstrap state machine, responsible for bootstrapping the routing table bootstrap SM[routing.BootstrapEvent, routing.BootstrapState] @@ -29,7 +29,7 @@ type RoutingBehaviour struct { probe SM[routing.ProbeEvent, routing.ProbeState] pendingMu sync.Mutex - pending []DhtEvent + pending []BehaviourEvent ready chan struct{} logger *slog.Logger @@ -47,7 +47,7 @@ func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, rout return r } -func (r *RoutingBehaviour) Notify(ctx context.Context, ev DhtEvent) { +func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Notify") defer span.End() @@ -57,13 +57,13 @@ func (r *RoutingBehaviour) Notify(ctx context.Context, ev DhtEvent) { } // notify must only be called while r.pendingMu is held -func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { +func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.notify") defer span.End() switch ev := ev.(type) { - case *EventDhtStartBootstrap: - span.SetAttributes(attribute.String("event", "EventDhtStartBootstrap")) - cmd := &routing.EventBootstrapStart[key.Key256, ma.Multiaddr]{ + case *EventStartBootstrap: + span.SetAttributes(attribute.String("event", "EventStartBootstrap")) + cmd := &routing.EventBootstrapStart[KadKey, ma.Multiaddr]{ ProtocolID: ev.ProtocolID, Message: ev.Message, KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), @@ -74,13 +74,13 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } - case *EventDhtAddNodeInfo: - span.SetAttributes(attribute.String("event", "EventDhtAddNodeInfo")) + case *EventAddAddrInfo: + span.SetAttributes(attribute.String("event", "EventAddAddrInfo")) // Ignore self if ev.NodeInfo.ID == r.self { break } - cmd := &routing.EventIncludeAddCandidate[key.Key256, ma.Multiaddr]{ + cmd := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, } // attempt to advance the include @@ -91,7 +91,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { case *EventRoutingUpdated: span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) - cmd := &routing.EventProbeAdd[key.Key256]{ + cmd := &routing.EventProbeAdd[KadKey]{ NodeID: ev.NodeInfo.ID(), } // attempt to advance the probe state machine @@ -101,16 +101,16 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { } case *EventGetCloserNodesSuccess: - span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) + span.SetAttributes(attribute.String("event", "EventGetCloserNodesSuccess"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) switch ev.QueryID { case "bootstrap": for _, info := range ev.CloserNodes { // TODO: do this after advancing bootstrap - r.pending = append(r.pending, &EventDhtAddNodeInfo{ + r.pending = append(r.pending, &EventAddAddrInfo{ NodeInfo: info, }) } - cmd := &routing.EventBootstrapMessageResponse[key.Key256, ma.Multiaddr]{ + cmd := &routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]{ NodeID: kadt.PeerID(ev.To.ID), Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } @@ -121,7 +121,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { } case "include": - cmd := &routing.EventIncludeMessageResponse[key.Key256, ma.Multiaddr]{ + cmd := &routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.To}, Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } @@ -132,7 +132,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { } case "probe": - cmd := &routing.EventProbeMessageResponse[key.Key256, ma.Multiaddr]{ + cmd := &routing.EventProbeMessageResponse[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.To}, Response: CloserNodesResponse(ev.Target, ev.CloserNodes), } @@ -146,11 +146,11 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { panic(fmt.Sprintf("unexpected query id: %s", ev.QueryID)) } case *EventGetCloserNodesFailure: - span.SetAttributes(attribute.String("event", "EventGetClosestNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) + span.SetAttributes(attribute.String("event", "EventGetCloserNodesFailure"), attribute.String("queryid", string(ev.QueryID)), attribute.String("nodeid", ev.To.String())) span.RecordError(ev.Err) switch ev.QueryID { case "bootstrap": - cmd := &routing.EventBootstrapMessageFailure[key.Key256]{ + cmd := &routing.EventBootstrapMessageFailure[KadKey]{ NodeID: kadt.PeerID(ev.To.ID), Error: ev.Err, } @@ -160,7 +160,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } case "include": - cmd := &routing.EventIncludeMessageFailure[key.Key256, ma.Multiaddr]{ + cmd := &routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.To}, Error: ev.Err, } @@ -170,7 +170,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev DhtEvent) { r.pending = append(r.pending, next) } case "probe": - cmd := &routing.EventProbeMessageFailure[key.Key256, ma.Multiaddr]{ + cmd := &routing.EventProbeMessageFailure[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.To}, Error: ev.Err, } @@ -199,7 +199,7 @@ func (r *RoutingBehaviour) Ready() <-chan struct{} { return r.ready } -func (r *RoutingBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { +func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Perform") defer span.End() @@ -210,7 +210,7 @@ func (r *RoutingBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { for { // drain queued events first. if len(r.pending) > 0 { - var ev DhtEvent + var ev BehaviourEvent ev, r.pending = r.pending[0], r.pending[1:] if len(r.pending) > 0 { @@ -246,14 +246,14 @@ func (r *RoutingBehaviour) Perform(ctx context.Context) (DhtEvent, bool) { } } -func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (DhtEvent, bool) { +func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (BehaviourEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceBootstrap") defer span.End() bstate := r.bootstrap.Advance(ctx, ev) switch st := bstate.(type) { - case *routing.StateBootstrapMessage[key.Key256, ma.Multiaddr]: - return &EventOutboundGetClosestNodes{ + case *routing.StateBootstrapMessage[KadKey, ma.Multiaddr]: + return &EventOutboundGetCloserNodes{ QueryID: "bootstrap", To: NodeIDToAddrInfo(st.NodeID), Target: st.Message.Target(), @@ -275,21 +275,21 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot return nil, false } -func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (DhtEvent, bool) { +func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (BehaviourEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceInclude") defer span.End() istate := r.include.Advance(ctx, ev) switch st := istate.(type) { - case *routing.StateIncludeFindNodeMessage[key.Key256, ma.Multiaddr]: + case *routing.StateIncludeFindNodeMessage[KadKey, ma.Multiaddr]: // include wants to send a find node message to a node - return &EventOutboundGetClosestNodes{ + return &EventOutboundGetCloserNodes{ QueryID: "include", To: NodeInfoToAddrInfo(st.NodeInfo), Target: st.NodeInfo.ID().Key(), Notify: r, }, true - case *routing.StateIncludeRoutingUpdated[key.Key256, ma.Multiaddr]: + case *routing.StateIncludeRoutingUpdated[KadKey, ma.Multiaddr]: // a node has been included in the routing table // notify other routing state machines that there is a new node in the routing table @@ -316,23 +316,23 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ return nil, false } -func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (DhtEvent, bool) { +func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (BehaviourEvent, bool) { ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceProbe") defer span.End() st := r.probe.Advance(ctx, ev) switch st := st.(type) { - case *routing.StateProbeConnectivityCheck[key.Key256]: + case *routing.StateProbeConnectivityCheck[KadKey]: // include wants to send a find node message to a node - return &EventOutboundGetClosestNodes{ + return &EventOutboundGetCloserNodes{ QueryID: "probe", To: NodeIDToAddrInfo(st.NodeID), Target: st.NodeID.Key(), Notify: r, }, true - case *routing.StateProbeNodeFailure[key.Key256]: + case *routing.StateProbeNodeFailure[KadKey]: // a node has failed a connectivity check been removed from the routing table and the probe list // add the node to the inclusion list for a second chance - r.notify(ctx, &EventDhtAddNodeInfo{ + r.notify(ctx, &EventAddAddrInfo{ NodeInfo: NodeIDToAddrInfo(st.NodeID), }) case *routing.StateProbeWaitingAtCapacity: diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index 58f56bac..6b6e0498 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -8,7 +8,6 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" "github.com/plprobelab/go-kademlia/routing" @@ -43,7 +42,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { Key: []byte(self), } - ev := &EventDhtStartBootstrap{ + ev := &EventStartBootstrap{ ProtocolID: address.ProtocolID("test"), Message: req, SeedNodes: []peer.ID{nodes[1].NodeInfo.ID}, @@ -52,7 +51,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { routingBehaviour.Notify(ctx, ev) // the event that should be passed to the bootstrap state machine - expected := &routing.EventBootstrapStart[key.Key256, ma.Multiaddr]{ + expected := &routing.EventBootstrapStart[KadKey, ma.Multiaddr]{ ProtocolID: ev.ProtocolID, Message: ev.Message, KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.SeedNodes), @@ -78,18 +77,18 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) ev := &EventGetCloserNodesSuccess{ - QueryID: query.QueryID("bootstrap"), - To: nodes[1].NodeInfo, - Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + QueryID: query.QueryID("bootstrap"), + To: nodes[1].NodeInfo, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, } routingBehaviour.Notify(ctx, ev) // bootstrap should receive message response event - require.IsType(t, &routing.EventBootstrapMessageResponse[key.Key256, ma.Multiaddr]{}, bootstrap.Received) + require.IsType(t, &routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]{}, bootstrap.Received) - rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[key.Key256, ma.Multiaddr]) + rev := bootstrap.Received.(*routing.EventBootstrapMessageResponse[KadKey, ma.Multiaddr]) require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) } @@ -122,9 +121,9 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { routingBehaviour.Notify(ctx, ev) // bootstrap should receive message response event - require.IsType(t, &routing.EventBootstrapMessageFailure[key.Key256]{}, bootstrap.Received) + require.IsType(t, &routing.EventBootstrapMessageFailure[KadKey]{}, bootstrap.Received) - rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[key.Key256]) + rev := bootstrap.Received.(*routing.EventBootstrapMessageFailure[KadKey]) require.Equal(t, nodes[1].NodeInfo.ID, NodeIDToPeerID(rev.NodeID)) require.Equal(t, failure, rev.Error) } @@ -147,14 +146,14 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) - ev := &EventDhtAddNodeInfo{ + ev := &EventAddAddrInfo{ NodeInfo: nodes[2].NodeInfo, } routingBehaviour.Notify(ctx, ev) // the event that should be passed to the include state machine - expected := &routing.EventIncludeAddCandidate[key.Key256, ma.Multiaddr]{ + expected := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, } require.Equal(t, expected, include.Received) @@ -179,18 +178,18 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) ev := &EventGetCloserNodesSuccess{ - QueryID: query.QueryID("include"), - To: nodes[1].NodeInfo, - Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), + QueryID: query.QueryID("include"), + To: nodes[1].NodeInfo, + Target: kadt.PeerID(nodes[0].NodeInfo.ID).Key(), CloserNodes: []peer.AddrInfo{nodes[2].NodeInfo}, } routingBehaviour.Notify(ctx, ev) // include should receive message response event - require.IsType(t, &routing.EventIncludeMessageResponse[key.Key256, ma.Multiaddr]{}, include.Received) + require.IsType(t, &routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]{}, include.Received) - rev := include.Received.(*routing.EventIncludeMessageResponse[key.Key256, ma.Multiaddr]) + rev := include.Received.(*routing.EventIncludeMessageResponse[KadKey, ma.Multiaddr]) require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) require.Equal(t, ev.CloserNodes, SliceOfNodeInfoToSliceOfAddrInfo(rev.Response.CloserNodes())) } @@ -224,9 +223,9 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { routingBehaviour.Notify(ctx, ev) // include should receive message response event - require.IsType(t, &routing.EventIncludeMessageFailure[key.Key256, ma.Multiaddr]{}, include.Received) + require.IsType(t, &routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]{}, include.Received) - rev := include.Received.(*routing.EventIncludeMessageFailure[key.Key256, ma.Multiaddr]) + rev := include.Received.(*routing.EventIncludeMessageFailure[KadKey, ma.Multiaddr]) require.Equal(t, nodes[1].NodeInfo, NodeInfoToAddrInfo(rev.NodeInfo)) require.Equal(t, failure, rev.Error) } @@ -244,13 +243,13 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { includeCfg := routing.DefaultIncludeConfig() includeCfg.Clock = clk - include, err := routing.NewInclude[key.Key256, ma.Multiaddr](rt, includeCfg) + include, err := routing.NewInclude[KadKey, ma.Multiaddr](rt, includeCfg) require.NoError(t, err) probeCfg := routing.DefaultProbeConfig() probeCfg.Clock = clk probeCfg.CheckInterval = 5 * time.Minute - probe, err := routing.NewProbe[key.Key256, ma.Multiaddr](rt, probeCfg) + probe, err := routing.NewProbe[KadKey, ma.Multiaddr](rt, probeCfg) require.NoError(t, err) // ensure bootstrap is always idle @@ -266,7 +265,7 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { require.False(t, intable) // notify that there is a new node to be included - routingBehaviour.Notify(ctx, &EventDhtAddNodeInfo{ + routingBehaviour.Notify(ctx, &EventAddAddrInfo{ NodeInfo: candidate, }) @@ -275,18 +274,18 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { require.True(t, ok) // include should be asking to send a message to the node - require.IsType(t, &EventOutboundGetClosestNodes{}, dev) + require.IsType(t, &EventOutboundGetCloserNodes{}, dev) - oev := dev.(*EventOutboundGetClosestNodes) + oev := dev.(*EventOutboundGetCloserNodes) // advance time a little clk.Add(time.Second) // notify a successful response back (best to use the notify included in the event even though it will be the behaviour's Notify method) oev.Notify.Notify(ctx, &EventGetCloserNodesSuccess{ - QueryID: oev.QueryID, - To: oev.To, - Target: oev.Target, + QueryID: oev.QueryID, + To: oev.To, + Target: oev.Target, CloserNodes: []peer.AddrInfo{nodes[1].NodeInfo}, // must include one for include check to pass }) @@ -305,10 +304,10 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { // routing update event should be emitted from the include state machine dev, ok = routingBehaviour.Perform(ctx) require.True(t, ok) - require.IsType(t, &EventOutboundGetClosestNodes{}, dev) + require.IsType(t, &EventOutboundGetCloserNodes{}, dev) // confirm that the message is for the correct node - oev = dev.(*EventOutboundGetClosestNodes) + oev = dev.(*EventOutboundGetCloserNodes) require.Equal(t, query.QueryID("probe"), oev.QueryID) require.Equal(t, candidate.ID, oev.To.ID) } From fbe3f447a2238b8f3061f8f4f054db29acea5b8a Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Mon, 4 Sep 2023 16:47:42 +0100 Subject: [PATCH 12/26] Minor naming cleanup --- v2/coord/coordinator.go | 2 +- v2/coord/coordinator_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index ee9a2a52..9d33066f 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -117,7 +117,7 @@ func DefaultConfig() *CoordinatorConfig { QueryTimeout: 5 * time.Minute, RequestConcurrency: 3, RequestTimeout: time.Minute, - Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), + Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), } } diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index 4fb221da..213c3588 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -142,7 +142,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { self := nodes[0].NodeInfo.ID c, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) if err != nil { - log.Fatalf("unexpected error creating dht: %v", err) + log.Fatalf("unexpected error creating coordinator: %v", err) } buffer := make(chan RoutingNotification, 5) From dca5c5dfa6a245ccdab7c3b0a845d7187a266472 Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:16:45 +0100 Subject: [PATCH 13/26] Change maintainers for v2 while being developed --- CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CODEOWNERS b/CODEOWNERS index 43a81df8..1ee0652f 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -13,3 +13,6 @@ # records are IPFS specific /records.go @libp2p/kubo-maintainers @guillaumemichel /records_test.go @libp2p/kubo-maintainers @guillaumemichel + + +/v2/ @dennis-tra @iand From afa8051b04604222542e53eb981f13a8fefbc397 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 5 Sep 2023 14:18:29 +0200 Subject: [PATCH 14/26] remove Zikade dependency --- v2/config.go | 5 ++- v2/coord/internal/kadtest/context.go | 1 + v2/dht.go | 10 ++--- v2/go.mod | 3 -- v2/go.sum | 2 - v2/kadt/kadt.go | 20 ++++----- v2/pb/msg.aux.go | 4 +- v2/router.go | 62 +++++++--------------------- v2/routing.go | 15 ++++--- v2/routing_test.go | 52 ----------------------- 10 files changed, 44 insertions(+), 130 deletions(-) delete mode 100644 v2/routing_test.go diff --git a/v2/config.go b/v2/config.go index e1e18347..c52425b2 100644 --- a/v2/config.go +++ b/v2/config.go @@ -13,6 +13,7 @@ import ( "github.com/plprobelab/go-kademlia/coord" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing" "github.com/plprobelab/go-kademlia/routing/triert" "go.opentelemetry.io/otel" "go.uber.org/zap/exp/zapslog" @@ -123,7 +124,7 @@ type Config struct { // [triert.TrieRT] routing table will be used. This field will be nil // in the default configuration because a routing table requires information // about the local node. - RoutingTable kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + RoutingTable routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] // The Backends field holds a map of key namespaces to their corresponding // backend implementation. For example, if we received an IPNS record, the @@ -184,7 +185,7 @@ func DefaultConfig() *Config { // DefaultRoutingTable returns a triert.TrieRT routing table. This routing table // cannot be initialized in [DefaultConfig] because it requires information // about the local peer. -func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]], error) { +func DefaultRoutingTable(nodeID kad.NodeID[key.Key256]) (routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]], error) { rtCfg := triert.DefaultConfig[key.Key256, kad.NodeID[key.Key256]]() rt, err := triert.New[key.Key256, kad.NodeID[key.Key256]](nodeID, rtCfg) if err != nil { diff --git a/v2/coord/internal/kadtest/context.go b/v2/coord/internal/kadtest/context.go index 659d328f..1ef31f40 100644 --- a/v2/coord/internal/kadtest/context.go +++ b/v2/coord/internal/kadtest/context.go @@ -26,5 +26,6 @@ func CtxShort(t *testing.T) (context.Context, context.CancelFunc) { deadline = goal } } + return context.WithDeadline(context.Background(), deadline) } diff --git a/v2/dht.go b/v2/dht.go index 6d8451a6..675c83bc 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -6,16 +6,16 @@ import ( "io" "sync" - "github.com/iand/zikade/kademlia" "github.com/ipfs/go-datastore/trace" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" + "github.com/plprobelab/go-kademlia/routing" "golang.org/x/exp/slog" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -35,11 +35,11 @@ type DHT struct { mode mode // kad is a reference to the go-kademlia coordinator - kad *kademlia.Dht[key.Key256, ma.Multiaddr] + kad *coord.Coordinator // rt holds a reference to the routing table implementation. This can be // configured via the Config struct. - rt kad.RoutingTable[key.Key256, kad.NodeID[key.Key256]] + rt routing.RoutingTableCpl[key.Key256, kad.NodeID[key.Key256]] // backends backends map[string]Backend @@ -121,7 +121,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - d.kad, err = kademlia.NewDht[key.Key256, ma.Multiaddr](nid, &Router{host: h}, d.rt, nil) + d.kad, err = coord.NewCoordinator(d.host.ID(), &Router{host: h}, d.rt, nil) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } diff --git a/v2/go.mod b/v2/go.mod index dba7a727..6300136c 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -5,7 +5,6 @@ go 1.20 require ( github.com/benbjohnson/clock v1.3.5 github.com/hashicorp/golang-lru/v2 v2.0.5 - github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb github.com/ipfs/boxo v0.12.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 @@ -117,5 +116,3 @@ require ( ) replace github.com/ipfs/go-datastore v0.6.0 => github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a - -replace github.com/iand/zikade v0.0.0-20230824143824-d11f2132b4fb => github.com/dennis-tra/zikade v0.0.0-20230830153809-e6af3ad31acd diff --git a/v2/go.sum b/v2/go.sum index c5ae0a1a..a7ac15d9 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -43,8 +43,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etly github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a h1:YnrW4Kcy7kTIJRfL3Xg7+fIMS17izs0WWH2GdYwYhNs= github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= -github.com/dennis-tra/zikade v0.0.0-20230830153809-e6af3ad31acd h1:EGfJ0TEVP3z99zFuMOztW7wTb/60nHpEzLcAmS+eknA= -github.com/dennis-tra/zikade v0.0.0-20230830153809-e6af3ad31acd/go.mod h1:k5AXGe5qXg7d/pUBDNQvtmTvsnXRehzGNf1XC04+qBM= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= diff --git a/v2/kadt/kadt.go b/v2/kadt/kadt.go index 9db79bad..d71ecc39 100644 --- a/v2/kadt/kadt.go +++ b/v2/kadt/kadt.go @@ -13,23 +13,23 @@ import ( "github.com/plprobelab/go-kademlia/key" ) -// PeerID is a type alias for peer.ID that implements the kad.NodeID interface. -// This means we can use PeerID for any operation that interfaces with -// go-kademlia. +// PeerID is a type alias for [peer.ID] that implements the [kad.NodeID] +// interface. This means we can use PeerID for any operation that interfaces +// with go-kademlia. type PeerID peer.ID // assertion that PeerID implements the kad.NodeID interface var _ kad.NodeID[key.Key256] = PeerID("") -// Key returns the Kademlia key of PeerID. The amino DHT operates on SHA256 -// hashes of, in this case, peer.IDs. This means this Key method takes -// the peer.ID, hashes it and constructs a 256-bit key. +// Key returns the Kademlia [key.Key256] of PeerID. The amino DHT operates on +// SHA256 hashes of, in this case, peer.IDs. This means this Key method takes +// the [peer.ID], hashes it and constructs a 256-bit key. func (p PeerID) Key() key.Key256 { h := sha256.Sum256([]byte(p)) return key.NewKey256(h[:]) } -// String calls String on the underlying peer.ID and returns a string like +// String calls String on the underlying [peer.ID] and returns a string like // QmFoo or 12D3KooBar. func (p PeerID) String() string { return peer.ID(p).String() @@ -45,15 +45,15 @@ type AddrInfo struct { Info peer.AddrInfo } -// assertion that AddrInfo implements the kad.NodeInfo interface +// assertion that AddrInfo implements the [kad.NodeInfo] interface var _ kad.NodeInfo[key.Key256, ma.Multiaddr] = (*AddrInfo)(nil) -// ID returns the kad.NodeID of this peer's information struct. +// ID returns the [kad.NodeID] of this peer's information struct. func (ai AddrInfo) ID() kad.NodeID[key.Key256] { return PeerID(ai.Info.ID) } -// ID returns the peer ID of this peer's information struct as a PeerID +// PeerID returns the peer ID of this peer's information struct as a PeerID. func (ai AddrInfo) PeerID() PeerID { return PeerID(ai.Info.ID) } diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go index cd9f5588..78f896a3 100644 --- a/v2/pb/msg.aux.go +++ b/v2/pb/msg.aux.go @@ -4,13 +4,13 @@ import ( "bytes" "crypto/sha256" + "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "golang.org/x/exp/slog" - - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) // this file contains auxiliary methods to augment the protobuf generated types. diff --git a/v2/router.go b/v2/router.go index 2e046226..8c40471d 100644 --- a/v2/router.go +++ b/v2/router.go @@ -5,21 +5,19 @@ import ( "fmt" "time" - "github.com/iand/zikade/kademlia" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-msgio" "github.com/libp2p/go-msgio/pbio" - ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/network/address" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) @@ -27,7 +25,7 @@ type Router struct { host host.Host } -var _ kademlia.Router[key.Key256, ma.Multiaddr] = (*Router)(nil) +var _ coord.Router = (*Router)(nil) func WriteMsg(s network.Stream, msg protoreflect.ProtoMessage) error { w := pbio.NewDelimitedWriter(s) @@ -53,26 +51,13 @@ type ProtoKadResponseMessage[K kad.Key[K], A kad.Address[A]] interface { kad.Response[K, A] } -func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], protoID address.ProtocolID, req kad.Request[key.Key256, ma.Multiaddr]) (kad.Response[key.Key256, ma.Multiaddr], error) { +func (r *Router) SendMessage(ctx context.Context, to peer.AddrInfo, protoID address.ProtocolID, req *pb.Message) (*pb.Message, error) { if err := r.AddNodeInfo(ctx, to, time.Hour); err != nil { return nil, fmt.Errorf("add node info: %w", err) } - protoReq, ok := req.(ProtoKadMessage) - if !ok { - return nil, fmt.Errorf("aaah ProtoKadMessage") - } - - var p peer.ID - nid, ok := to.ID().(kadt.PeerID) - if !ok { - naddr := to.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) - p = peer.ID(naddr.ID().(kadt.PeerID)) - } else { - p = peer.ID(nid) - } - - if len(r.host.Peerstore().Addrs(p)) == 0 { + // TODO: what to do with addresses in peer.AddrInfo? + if len(r.host.Peerstore().Addrs(to.ID)) == 0 { return nil, fmt.Errorf("aaah ProtoKadMessage") } @@ -83,7 +68,7 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma var err error var s network.Stream - s, err = r.host.NewStream(ctx, p, protocol.ID(protoID)) + s, err = r.host.NewStream(ctx, to.ID, protocol.ID(protoID)) if err != nil { return nil, fmt.Errorf("stream creation: %w", err) } @@ -92,7 +77,7 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma w := pbio.NewDelimitedWriter(s) reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) - err = w.WriteMsg(protoReq) + err = w.WriteMsg(req) if err != nil { return nil, fmt.Errorf("write message: %w", err) } @@ -107,48 +92,33 @@ func (r *Router) SendMessage(ctx context.Context, to kad.NodeInfo[key.Key256, ma } for _, info := range protoResp.CloserPeersAddrInfos() { - _ = r.AddNodeInfo(ctx, kadt.AddrInfo{ - Info: info, - }, time.Hour) + _ = r.AddNodeInfo(ctx, info, time.Hour) } return &protoResp, err } -func (r *Router) AddNodeInfo(ctx context.Context, info kad.NodeInfo[key.Key256, ma.Multiaddr], ttl time.Duration) error { - var p peer.ID - nid, ok := info.ID().(kadt.PeerID) - if !ok { - naddr := info.(*kademlia.NodeAddr[key.Key256, ma.Multiaddr]) - p = peer.ID(naddr.ID().(kadt.PeerID)) - } else { - p = peer.ID(nid) - } - ai := peer.AddrInfo{ - ID: p, - Addrs: info.Addresses(), - } - +func (r *Router) AddNodeInfo(ctx context.Context, ai peer.AddrInfo, ttl time.Duration) error { // Don't add addresses for self or our connected peers. We have better ones. - if ai.ID == r.host.ID() || - r.host.Network().Connectedness(ai.ID) == network.Connected { + if ai.ID == r.host.ID() || r.host.Network().Connectedness(ai.ID) == network.Connected { return nil } + r.host.Peerstore().AddAddrs(ai.ID, ai.Addrs, ttl) return nil } -func (r *Router) GetNodeInfo(ctx context.Context, id kad.NodeID[key.Key256]) (kad.NodeInfo[key.Key256, ma.Multiaddr], error) { - pid := peer.ID(id.(kadt.PeerID)) - return kadt.AddrInfo{Info: r.host.Peerstore().PeerInfo(pid)}, nil +func (r *Router) GetNodeInfo(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { + return r.host.Peerstore().PeerInfo(id), nil } -func (r *Router) GetClosestNodes(ctx context.Context, to kad.NodeInfo[key.Key256, ma.Multiaddr], target key.Key256) ([]kad.NodeInfo[key.Key256, ma.Multiaddr], error) { +func (r *Router) GetClosestNodes(ctx context.Context, to peer.AddrInfo, target key.Key256) ([]peer.AddrInfo, error) { resp, err := r.SendMessage(ctx, to, address.ProtocolID(ProtocolIPFS), FindKeyRequest(target)) if err != nil { return nil, err } - return resp.CloserNodes(), nil + + return resp.CloserPeersAddrInfos(), nil } func FindKeyRequest(k key.Key256) *pb.Message { diff --git a/v2/routing.go b/v2/routing.go index c2e84aa3..c14258e3 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -6,7 +6,8 @@ import ( "fmt" "time" - "github.com/iand/zikade/core" + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -15,8 +16,6 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" - ma "github.com/multiformats/go-multiaddr" - "github.com/plprobelab/go-kademlia/key" "go.opentelemetry.io/otel/attribute" otel "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" @@ -43,12 +42,12 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { target := kadt.PeerID(id) - var foundNode core.Node[key.Key256, ma.Multiaddr] - fn := func(ctx context.Context, node core.Node[key.Key256, ma.Multiaddr], stats core.QueryStats) error { + var foundNode coord.Node + fn := func(ctx context.Context, node coord.Node, stats coord.QueryStats) error { slog.Info("visiting node", "id", node.ID()) - if key.Equal(node.ID().Key(), target.Key()) { + if node.ID() == id { foundNode = node - return core.SkipRemaining + return coord.SkipRemaining } return nil } @@ -63,7 +62,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { } return peer.AddrInfo{ - ID: peer.ID(foundNode.ID().(kadt.PeerID)), + ID: foundNode.ID(), Addrs: foundNode.Addresses(), }, nil } diff --git a/v2/routing_test.go b/v2/routing_test.go deleted file mode 100644 index 7406c401..00000000 --- a/v2/routing_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package dht - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" -) - -func TestRouting_FindPeer(t *testing.T) { - d := newTestDHT(t) - ctx := context.Background() - - // friend is the first peer we know in the IPFS DHT network (bootstrap node) - friendID, err := peer.Decode("12D3KooWGjgvfDkpuVAoNhd7PRRvMTEG4ZgzHBFURqDe1mqEzAMS") - require.NoError(t, err) - - // multiaddress of friend - friendAddr, err := multiaddr.NewMultiaddr("/ip4/45.32.75.236/tcp/4001") - require.NoError(t, err) - - t.Log("connecting...") - friendInfo := peer.AddrInfo{ID: friendID, Addrs: []multiaddr.Multiaddr{friendAddr}} - err = d.host.Connect(ctx, friendInfo) - require.NoError(t, err) - t.Log("connected") - - // target is the peer we want to find - target, err := peer.Decode("12D3KooWGWcyxn3JfihYiu2HspbE5XHzfgZiLwihVCeyXQQU8yC1") - require.NoError(t, err) - - // Error -> delay between AddNodes and added to routing table - //err = d.kad.AddNodes(ctx, []kad.NodeInfo[key.Key256, multiaddr.Multiaddr]{ - // kadt.AddrInfo{Info: friendInfo}, - //}) - //require.NoError(t, err) - //time.Sleep(100 * time.Millisecond) - - d.rt.AddNode(kadt.PeerID(friendInfo.ID)) - - targetInfo, err := d.FindPeer(ctx, target) - require.NoError(t, err) - t.Log(targetInfo.ID) - t.Log(targetInfo.Addrs) - - assert.Greater(t, len(targetInfo.Addrs), 0) -} From 79c7009ad45794f53d84aeb72af887c2aa12887d Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 5 Sep 2023 14:29:05 +0200 Subject: [PATCH 15/26] Consolidate type parameters --- v2/coord/behaviour.go | 14 ++++++++------ v2/coord/coretypes.go | 14 +++++++++----- v2/pb/msg.aux.go | 1 - 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/v2/coord/behaviour.go b/v2/coord/behaviour.go index 4077d347..2f9f9e33 100644 --- a/v2/coord/behaviour.go +++ b/v2/coord/behaviour.go @@ -6,18 +6,20 @@ import ( "sync/atomic" ) -type Notify[C BehaviourEvent] interface { - Notify(ctx context.Context, ev C) +// Notify is the interface that a components to implement to be notified of +// [BehaviourEvent]'s. +type Notify[E BehaviourEvent] interface { + Notify(ctx context.Context, ev E) } -type NotifyCloser[C BehaviourEvent] interface { - Notify[C] +type NotifyCloser[E BehaviourEvent] interface { + Notify[E] Close() } -type NotifyFunc[C BehaviourEvent] func(ctx context.Context, ev C) +type NotifyFunc[E BehaviourEvent] func(ctx context.Context, ev E) -func (f NotifyFunc[C]) Notify(ctx context.Context, ev C) { +func (f NotifyFunc[E]) Notify(ctx context.Context, ev E) { f(ctx, ev) } diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index ad7036b1..ebadf442 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -12,6 +12,8 @@ import ( "github.com/plprobelab/go-kademlia/network/address" ) +// KadKey is a type alias for the type of key that's used with this DHT +// implementation. type KadKey = key.Key256 // Value is a value that may be stored in the DHT. @@ -20,19 +22,21 @@ type Value interface { MarshalBinary() ([]byte, error) } -// Node represent a remote node, a participant in the DHT. +// Node represents the local or a remote node participating in the DHT. type Node interface { + // ID returns the peer ID identifying this node. ID() peer.ID // Addresses returns the network addresses associated with the given node. Addresses() []ma.Multiaddr - // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. - // The node may return fewer nodes than requested. + // GetClosestNodes requests the n closest nodes to the key from the node's + // local routing table. The node may return fewer nodes than requested. GetClosestNodes(ctx context.Context, key KadKey, n int) ([]Node, error) - // GetValue requests that the node return any value associated with the supplied key. - // If the node does not have a value for the key it returns ErrValueNotFound. + // GetValue requests that the node return any value associated with the + // supplied key. If the node does not have a value for the key it returns + // ErrValueNotFound. GetValue(ctx context.Context, key KadKey) (Value, error) // PutValue requests that the node stores a value to be associated with the supplied key. diff --git a/v2/pb/msg.aux.go b/v2/pb/msg.aux.go index 78f896a3..14b7f6d0 100644 --- a/v2/pb/msg.aux.go +++ b/v2/pb/msg.aux.go @@ -5,7 +5,6 @@ import ( "crypto/sha256" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" - "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" From 1d7cdea47dea09b1dd9dbd36b7a2b25d3b3e948d Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Tue, 5 Sep 2023 15:04:09 +0200 Subject: [PATCH 16/26] Change config test structure --- v2/config_test.go | 186 +++++++++++++++++++--------------------------- 1 file changed, 76 insertions(+), 110 deletions(-) diff --git a/v2/config_test.go b/v2/config_test.go index 670dcc84..5caeaa2c 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -3,117 +3,83 @@ package dht import ( "testing" "time" + + "github.com/stretchr/testify/assert" ) func TestConfig_Validate(t *testing.T) { - tests := []struct { - name string - mutate func(*Config) *Config - wantErr bool - }{ - { - name: "happy path", - wantErr: false, - mutate: func(c *Config) *Config { return c }, - }, - { - name: "invalid mode", - wantErr: true, - mutate: func(c *Config) *Config { - c.Mode = "invalid" - return c - }, - }, - { - name: "nil Kademlia configuration", - wantErr: true, - mutate: func(c *Config) *Config { - c.Kademlia = nil - return c - }, - }, - { - name: "invalid Kademlia configuration", - wantErr: true, - mutate: func(c *Config) *Config { - c.Kademlia.Clock = nil - return c - }, - }, - { - name: "empty protocol", - wantErr: true, - mutate: func(c *Config) *Config { - c.ProtocolID = "" - return c - }, - }, - { - name: "nil logger", - wantErr: true, - mutate: func(c *Config) *Config { - c.Logger = nil - return c - }, - }, - { - name: "0 stream idle timeout", - wantErr: true, - mutate: func(c *Config) *Config { - c.TimeoutStreamIdle = time.Duration(0) - return c - }, - }, - { - name: "negative stream idle timeout", - wantErr: true, - mutate: func(c *Config) *Config { - c.TimeoutStreamIdle = time.Duration(-1) - return c - }, - }, - { - // When we're using the IPFS protocol, we always require support - // for ipns, pk, and provider records. - // If the Backends map is empty and the IPFS protocol is configured, - // we automatically populate the DHT backends for these record - // types. - name: "incompatible backends with ipfs protocol", - wantErr: true, - mutate: func(c *Config) *Config { - c.ProtocolID = ProtocolIPFS - c.Backends["another"] = &RecordBackend{} - return c - }, - }, - { - name: "additional backends for ipfs protocol", - wantErr: true, - mutate: func(c *Config) *Config { - c.ProtocolID = ProtocolIPFS - c.Backends[namespaceProviders] = &RecordBackend{} - c.Backends[namespaceIPNS] = &RecordBackend{} - c.Backends[namespacePublicKey] = &RecordBackend{} - c.Backends["another"] = &RecordBackend{} - return c - }, - }, - { - name: "nil address filter", - wantErr: true, - mutate: func(c *Config) *Config { - c.AddressFilter = nil - return c - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := DefaultConfig() - c = tt.mutate(c) - if err := c.Validate(); (err != nil) != tt.wantErr { - t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } + t.Run("happy path", func(t *testing.T) { + cfg := DefaultConfig() + assert.NoError(t, cfg.Validate()) + }) + + t.Run("invalid mode", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Mode = "invalid" + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil Kademlia configuration", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Kademlia = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("invalid Kademlia configuration", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Kademlia.Clock = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("empty protocol", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = "" + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil logger", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Logger = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("0 stream idle timeout", func(t *testing.T) { + cfg := DefaultConfig() + cfg.TimeoutStreamIdle = time.Duration(0) + assert.Error(t, cfg.Validate()) + }) + + t.Run("negative stream idle timeout", func(t *testing.T) { + cfg := DefaultConfig() + cfg.TimeoutStreamIdle = time.Duration(-1) + assert.Error(t, cfg.Validate()) + }) + + t.Run("incompatible backends with ipfs protocol", func(t *testing.T) { + // When we're using the IPFS protocol, we always require support + // for ipns, pk, and provider records. + // If the Backends map is empty and the IPFS protocol is configured, + // we automatically populate the DHT backends for these record + // types. + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends["another"] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + + t.Run("additional backends for ipfs protocol", func(t *testing.T) { + cfg := DefaultConfig() + cfg.ProtocolID = ProtocolIPFS + cfg.Backends[namespaceProviders] = &RecordBackend{} + cfg.Backends[namespaceIPNS] = &RecordBackend{} + cfg.Backends[namespacePublicKey] = &RecordBackend{} + cfg.Backends["another"] = &RecordBackend{} + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil address filter", func(t *testing.T) { + cfg := DefaultConfig() + cfg.AddressFilter = nil + assert.Error(t, cfg.Validate()) + }) } From e8d82f1e486ee92c3d4578910a96286955ddd692 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 11:50:02 +0200 Subject: [PATCH 17/26] use opentelemetry --- v2/backend.go | 23 +++-- v2/backend_provider.go | 31 ++++--- v2/backend_provider_test.go | 10 +- v2/backend_record.go | 15 ++- v2/backend_trace.go | 10 +- v2/config.go | 21 ++++- v2/config_test.go | 12 +++ v2/dht.go | 45 +++++++-- v2/go.mod | 13 ++- v2/go.sum | 69 +++----------- v2/handlers.go | 2 +- v2/handlers_test.go | 5 +- v2/metrics/metrics.go | 118 ------------------------ v2/routing.go | 14 +-- v2/stream.go | 73 ++++++++------- v2/tele/tele.go | 179 ++++++++++++++++++++++++++++++++++++ 16 files changed, 374 insertions(+), 266 deletions(-) delete mode 100644 v2/metrics/metrics.go create mode 100644 v2/tele/tele.go diff --git a/v2/backend.go b/v2/backend.go index 48dab7a4..a2cd2121 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -2,6 +2,7 @@ package dht import ( "context" + "fmt" lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/boxo/ipns" @@ -60,9 +61,11 @@ type Backend interface { // store and fetch IPNS records from the given datastore. The stored and // returned records must be of type [*recpb.Record]. The cfg parameter can be // nil, in which case the [DefaultRecordBackendConfig] will be used. -func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackendConfig) *RecordBackend { +func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackendConfig) (be *RecordBackend, err error) { if cfg == nil { - cfg = DefaultRecordBackendConfig() + if cfg, err = DefaultRecordBackendConfig(); err != nil { + return nil, fmt.Errorf("default ipns backend config: %w", err) + } } return &RecordBackend{ @@ -71,16 +74,18 @@ func NewBackendIPNS(ds ds.TxnDatastore, kb peerstore.KeyBook, cfg *RecordBackend namespace: namespaceIPNS, datastore: ds, validator: ipns.Validator{KeyBook: kb}, - } + }, nil } // NewBackendPublicKey initializes a new backend for the "pk" namespace that can // store and fetch public key records from the given datastore. The stored and // returned records must be of type [*recpb.Record]. The cfg parameter can be // nil, in which case the [DefaultRecordBackendConfig] will be used. -func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBackend { +func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) (be *RecordBackend, err error) { if cfg == nil { - cfg = DefaultRecordBackendConfig() + if cfg, err = DefaultRecordBackendConfig(); err != nil { + return nil, fmt.Errorf("default public key backend config: %w", err) + } } return &RecordBackend{ @@ -89,7 +94,7 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBa namespace: namespacePublicKey, datastore: ds, validator: record.PublicKeyValidator{}, - } + }, nil } // NewBackendProvider initializes a new backend for the "providers" namespace @@ -98,9 +103,11 @@ func NewBackendPublicKey(ds ds.TxnDatastore, cfg *RecordBackendConfig) *RecordBa // The values returned from [ProvidersBackend.Fetch] will be of type // [*providerSet] (unexported). The cfg parameter can be nil, in which case the // [DefaultProviderBackendConfig] will be used. -func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProvidersBackendConfig) (*ProvidersBackend, error) { +func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *ProvidersBackendConfig) (be *ProvidersBackend, err error) { if cfg == nil { - cfg = DefaultProviderBackendConfig() + if cfg, err = DefaultProviderBackendConfig(); err != nil { + return nil, fmt.Errorf("default provider backend config: %w", err) + } } cache, err := lru.New[string, providerSet](cfg.CacheSize) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index be318b59..cd33772a 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "path" - "strconv" "strings" "sync" "time" @@ -19,11 +18,11 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-base32" - "go.opencensus.io/stats" - "go.opencensus.io/tag" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // ProvidersBackend implements the [Backend] interface and handles provider @@ -95,6 +94,10 @@ type ProvidersBackendConfig struct { // Logger is the logger to use Logger *slog.Logger + // Tele holds a reference to the telemetry struct to capture metrics and + // traces. + Tele *tele.Telemetry + // AddressFilter is a filter function that any addresses that we attempt to // store or fetch from the peerstore's address book need to pass through. // If you're manually configuring this backend, make sure to align the @@ -106,7 +109,12 @@ type ProvidersBackendConfig struct { // configuration. Use this as a starting point and modify it. If a nil // configuration is passed to [NewBackendProvider], this default configuration // here is used. -func DefaultProviderBackendConfig() *ProvidersBackendConfig { +func DefaultProviderBackendConfig() (*ProvidersBackendConfig, error) { + telemetry, err := tele.New(nil, nil) + if err != nil { + return nil, fmt.Errorf("new telemetry: %w", err) + } + return &ProvidersBackendConfig{ clk: clock.New(), ProvideValidity: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md @@ -115,8 +123,9 @@ func DefaultProviderBackendConfig() *ProvidersBackendConfig { CacheSize: 256, // MAGIC GCInterval: time.Hour, // MAGIC Logger: slog.Default(), + Tele: telemetry, AddressFilter: AddrFilterIdentity, // verify alignment with [Config.AddressFilter] - } + }, nil } // Store implements the [Backend] interface. In the case of a [ProvidersBackend] @@ -346,13 +355,11 @@ func (p *ProvidersBackend) collectGarbage(ctx context.Context) { // trackCacheQuery updates the prometheus metrics about cache hit/miss performance func (p *ProvidersBackend) trackCacheQuery(ctx context.Context, hit bool) { - _ = stats.RecordWithTags(ctx, - []tag.Mutator{ - tag.Upsert(metrics.KeyCacheHit, strconv.FormatBool(hit)), - tag.Upsert(metrics.KeyRecordType, "provider"), - }, - metrics.LRUCache.M(1), + set := tele.FromContext(ctx, + attribute.Bool(tele.AttrKeyCacheHit, hit), + attribute.String(tele.AttrKeyRecordType, "provider"), ) + p.cfg.Tele.LRUCache.Add(ctx, 1, metric.WithAttributeSet(set)) } // delete is a convenience method to delete the record at the given datastore diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index fab0a6cd..66425653 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -42,7 +42,9 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac func TestProvidersBackend_GarbageCollection(t *testing.T) { mockClock := clock.NewMock() - cfg := DefaultProviderBackendConfig() + cfg, err := DefaultProviderBackendConfig() + require.NoError(t, err) + cfg.clk = mockClock cfg.Logger = devnull @@ -58,7 +60,7 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { // write to datastore dsKey := newDatastoreKey(namespaceProviders, "random-key", string(p.ID)) rec := expiryRecord{expiry: mockClock.Now()} - err := b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) + err = b.datastore.Put(ctx, dsKey, rec.MarshalBinary()) require.NoError(t, err) // write to peerstore @@ -90,7 +92,9 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { } func TestProvidersBackend_GarbageCollection_lifecycle_thread_safe(t *testing.T) { - cfg := DefaultProviderBackendConfig() + cfg, err := DefaultProviderBackendConfig() + require.NoError(t, err) + cfg.Logger = devnull b := newBackendProvider(t, cfg) diff --git a/v2/backend_record.go b/v2/backend_record.go index 072afaf9..3f350115 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -10,6 +10,8 @@ import ( record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" "golang.org/x/exp/slog" + + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) type RecordBackend struct { @@ -25,13 +27,20 @@ var _ Backend = (*RecordBackend)(nil) type RecordBackendConfig struct { MaxRecordAge time.Duration Logger *slog.Logger + Tele *tele.Telemetry } -func DefaultRecordBackendConfig() *RecordBackendConfig { +func DefaultRecordBackendConfig() (*RecordBackendConfig, error) { + telemetry, err := tele.New(nil, nil) + if err != nil { + return nil, fmt.Errorf("new telemetry: %w", err) + } + return &RecordBackendConfig{ Logger: slog.Default(), + Tele: telemetry, MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md - } + }, nil } func (r *RecordBackend) Store(ctx context.Context, key string, value any) (any, error) { @@ -128,7 +137,7 @@ func (r *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { // If unmarshalling or validation fails, this function (alongside an error) also // returns true because the existing record should be replaced. func (r *RecordBackend) shouldReplaceExistingRecord(ctx context.Context, txn ds.Read, dsKey ds.Key, value []byte) (bool, error) { - ctx, span := tracer.Start(ctx, "DHT.shouldReplaceExistingRecord") + ctx, span := r.cfg.Tele.Tracer.Start(ctx, "RecordBackend.shouldReplaceExistingRecord") defer span.End() existingBytes, err := txn.Get(ctx, dsKey) diff --git a/v2/backend_trace.go b/v2/backend_trace.go index e8ad77e5..6a2bdf3a 100644 --- a/v2/backend_trace.go +++ b/v2/backend_trace.go @@ -5,25 +5,27 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - otel "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace" ) type tracedBackend struct { namespace string backend Backend + tracer trace.Tracer } var _ Backend = (*tracedBackend)(nil) -func traceWrapBackend(namespace string, backend Backend) Backend { +func traceWrapBackend(namespace string, backend Backend, tracer trace.Tracer) Backend { return &tracedBackend{ namespace: namespace, backend: backend, + tracer: tracer, } } func (t tracedBackend) Store(ctx context.Context, key string, value any) (any, error) { - ctx, span := tracer.Start(ctx, "Store", otel.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) + ctx, span := t.tracer.Start(ctx, "Store", trace.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) defer span.End() result, err := t.backend.Store(ctx, key, value) @@ -36,7 +38,7 @@ func (t tracedBackend) Store(ctx context.Context, key string, value any) (any, e } func (t tracedBackend) Fetch(ctx context.Context, key string) (any, error) { - ctx, span := tracer.Start(ctx, "Fetch", otel.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) + ctx, span := t.tracer.Start(ctx, "Fetch", trace.WithAttributes(attribute.String("backend", t.namespace), attribute.String("key", key))) defer span.End() result, err := t.backend.Fetch(ctx, key) diff --git a/v2/config.go b/v2/config.go index c52425b2..9b7468db 100644 --- a/v2/config.go +++ b/v2/config.go @@ -16,6 +16,8 @@ import ( "github.com/plprobelab/go-kademlia/routing" "github.com/plprobelab/go-kademlia/routing/triert" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" ) @@ -36,9 +38,6 @@ const ( ProtocolFilecoin protocol.ID = "/fil/kad/testnetnet/kad/1.0.0" ) -// tracer is an open telemetry tracing instance -var tracer = otel.Tracer("go-libp2p-kad-dht") - type ( // ModeOpt describes in which mode this [DHT] process should operate in. // Possible options are client, server, and any variant that switches @@ -161,6 +160,12 @@ type Config struct { // also fetch from the peer store and serve to other peers. It is mainly // used to filter out private addresses. AddressFilter AddressFilter + + // MeterProvider . + MeterProvider metric.MeterProvider + + // TracerProvider . + TracerProvider trace.TracerProvider } // DefaultConfig returns a configuration struct that can be used as-is to @@ -179,6 +184,8 @@ func DefaultConfig() *Config { Logger: slog.New(zapslog.NewHandler(logging.Logger("dht").Desugar().Core())), TimeoutStreamIdle: time.Minute, // MAGIC AddressFilter: AddrFilterPrivate, + MeterProvider: otel.GetMeterProvider(), + TracerProvider: otel.GetTracerProvider(), } } @@ -254,6 +261,14 @@ func (c *Config) Validate() error { return fmt.Errorf("address filter must not be nil - use AddrFilterIdentity to disable filtering") } + if c.MeterProvider == nil { + return fmt.Errorf("opentelemetry meter provider must not be nil") + } + + if c.TracerProvider == nil { + return fmt.Errorf("opentelemetry tracer provider must not be nil") + } + return nil } diff --git a/v2/config_test.go b/v2/config_test.go index 5caeaa2c..d066ac99 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -82,4 +82,16 @@ func TestConfig_Validate(t *testing.T) { cfg.AddressFilter = nil assert.Error(t, cfg.Validate()) }) + + t.Run("nil meter provider", func(t *testing.T) { + cfg := DefaultConfig() + cfg.MeterProvider = nil + assert.Error(t, cfg.Validate()) + }) + + t.Run("nil tracer provider", func(t *testing.T) { + cfg := DefaultConfig() + cfg.TracerProvider = nil + assert.Error(t, cfg.Validate()) + }) } diff --git a/v2/dht.go b/v2/dht.go index 675c83bc..0ec8c189 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/coord" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // DHT is an implementation of Kademlia with S/Kademlia modifications. @@ -52,6 +53,9 @@ type DHT struct { // these events in networkEventsSubscription and consumes them // asynchronously in consumeNetworkEvents. sub event.Subscription + + // tele holds a reference to a telemetry struct + tele *tele.Telemetry } // New constructs a new [DHT] for the given underlying host and with the given @@ -79,6 +83,12 @@ func New(h host.Host, cfg *Config) (*DHT, error) { return nil, fmt.Errorf("new trie routing table: %w", err) } + // initialize a new telemetry struct + d.tele, err = tele.New(cfg.MeterProvider, cfg.TracerProvider) + if err != nil { + return nil, fmt.Errorf("init telemetry: %w", err) + } + if len(cfg.Backends) != 0 { d.backends = cfg.Backends } else if cfg.ProtocolID == ProtocolIPFS { @@ -91,33 +101,48 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // wrap datastore in open telemetry tracing - dstore = trace.New(dstore, tracer) + dstore = trace.New(dstore, d.tele.Tracer) - pbeCfg := DefaultProviderBackendConfig() + pbeCfg, err := DefaultProviderBackendConfig() + if err != nil { + return nil, fmt.Errorf("default provider config: %w", err) + } pbeCfg.Logger = cfg.Logger pbeCfg.AddressFilter = cfg.AddressFilter + pbeCfg.Tele = d.tele pbe, err := NewBackendProvider(h.Peerstore(), dstore, pbeCfg) if err != nil { return nil, fmt.Errorf("new provider backend: %w", err) } - rbeCfg := DefaultRecordBackendConfig() + rbeCfg, err := DefaultRecordBackendConfig() + if err != nil { + return nil, fmt.Errorf("default provider config: %w", err) + } rbeCfg.Logger = cfg.Logger + rbeCfg.Tele = d.tele + + ipnsBe, err := NewBackendIPNS(dstore, h.Peerstore(), rbeCfg) + if err != nil { + return nil, fmt.Errorf("new ipns backend: %w", err) + } + + pkBe, err := NewBackendPublicKey(dstore, rbeCfg) + if err != nil { + return nil, fmt.Errorf("new public key backend: %w", err) + } d.backends = map[string]Backend{ - "ipns": NewBackendIPNS(dstore, h.Peerstore(), rbeCfg), - "pk": NewBackendPublicKey(dstore, rbeCfg), + "ipns": ipnsBe, + "pk": pkBe, "providers": pbe, } } // wrap all backends with tracing - for ns, backend := range d.backends { - d.backends[ns] = &tracedBackend{ - namespace: ns, - backend: backend, - } + for ns, be := range d.backends { + d.backends[ns] = traceWrapBackend(ns, be, d.tele.Tracer) } // instantiate a new Kademlia DHT coordinator. diff --git a/v2/go.mod b/v2/go.mod index 6300136c..85b481ae 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -7,7 +7,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.5 github.com/ipfs/boxo v0.12.0 github.com/ipfs/go-cid v0.4.1 - github.com/ipfs/go-datastore v0.6.0 + github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-libp2p v0.30.0 @@ -17,9 +17,11 @@ require ( github.com/multiformats/go-multiaddr v0.11.0 github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 github.com/stretchr/testify v1.8.4 - go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.16.0 - go.opentelemetry.io/otel/trace v1.16.0 + go.opentelemetry.io/otel v1.17.0 + go.opentelemetry.io/otel/metric v1.17.0 + go.opentelemetry.io/otel/sdk v1.17.0 + go.opentelemetry.io/otel/sdk/metric v0.40.0 + go.opentelemetry.io/otel/trace v1.17.0 go.uber.org/zap/exp v0.1.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 google.golang.org/protobuf v1.31.0 @@ -99,7 +101,6 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -114,5 +115,3 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) - -replace github.com/ipfs/go-datastore v0.6.0 => github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a diff --git a/v2/go.sum b/v2/go.sum index a7ac15d9..d392ea5d 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -18,12 +18,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -41,8 +39,6 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a h1:YnrW4Kcy7kTIJRfL3Xg7+fIMS17izs0WWH2GdYwYhNs= -github.com/dennis-tra/go-datastore v0.0.0-20230823171431-12ac85aaa42a/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -50,10 +46,6 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -83,8 +75,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -92,14 +82,6 @@ github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -107,12 +89,7 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pO github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -125,7 +102,6 @@ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBB github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= @@ -146,6 +122,8 @@ github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGB github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c h1:iSyhKydtSJiEkmf5O3KizuySDB0zgyWPth76NACTMVI= +github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c/go.mod h1:3Et7HSjOA8tPu9OjYuDZxLAgBLfvlNMD4r8BIuri9eo= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= @@ -291,7 +269,6 @@ github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -348,16 +325,11 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= @@ -373,14 +345,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= +go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= +go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= +go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= +go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= +go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= +go.opentelemetry.io/otel/sdk/metric v0.40.0 h1:qOM29YaGcxipWjL5FzpyZDpCYrDREvX0mVlmXdOjCHU= +go.opentelemetry.io/otel/sdk/metric v0.40.0/go.mod h1:dWxHtdzdJvg+ciJUKLTKwrMe5P6Dv3FyDbh8UkfgkVs= +go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= +go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -419,7 +393,6 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -442,7 +415,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -500,7 +472,6 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -528,25 +499,10 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -573,7 +529,6 @@ grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJd honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= diff --git a/v2/handlers.go b/v2/handlers.go index 9a37c17b..ee360c27 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -234,7 +234,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { - ctx, span := tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) + ctx, span := d.tele.Tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) defer span.End() peers := d.rt.NearestNodes(target, d.cfg.BucketSize) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 5c9b0c89..b0de3168 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -803,8 +803,11 @@ func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { ds, err := InMemoryDatastore() require.NoError(t, err) + cfg, err := DefaultRecordBackendConfig() + require.NoError(t, err) + recBackend := &RecordBackend{ - cfg: DefaultRecordBackendConfig(), + cfg: cfg, log: devnull, namespace: "test", datastore: ds, diff --git a/v2/metrics/metrics.go b/v2/metrics/metrics.go deleted file mode 100644 index 3cee215d..00000000 --- a/v2/metrics/metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -package metrics - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -var ( - defaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Keys -var ( - KeyCacheHit, _ = tag.NewKey("hit") - KeyRecordType, _ = tag.NewKey("record_type") // currently only used for the provider backend LRU cache - KeyMessageType, _ = tag.NewKey("message_type") - KeyPeerID, _ = tag.NewKey("peer_id") - // KeyInstanceID identifies a dht instance by the pointer address. - // Useful for differentiating between different dhts that have the same peer id. - KeyInstanceID, _ = tag.NewKey("instance_id") -) - -// Measures -var ( - ReceivedMessages = stats.Int64("libp2p.io/dht/kad/received_messages", "Total number of messages received per RPC", stats.UnitDimensionless) - ReceivedMessageErrors = stats.Int64("libp2p.io/dht/kad/received_message_errors", "Total number of errors for messages received per RPC", stats.UnitDimensionless) - ReceivedBytes = stats.Int64("libp2p.io/dht/kad/received_bytes", "Total received bytes per RPC", stats.UnitBytes) - InboundRequestLatency = stats.Float64("libp2p.io/dht/kad/inbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) - OutboundRequestLatency = stats.Float64("libp2p.io/dht/kad/outbound_request_latency", "Latency per RPC", stats.UnitMilliseconds) - SentMessages = stats.Int64("libp2p.io/dht/kad/sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless) - SentMessageErrors = stats.Int64("libp2p.io/dht/kad/sent_message_errors", "Total number of errors for messages sent per RPC", stats.UnitDimensionless) - SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless) - SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless) - SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes) - LRUCache = stats.Int64("libp2p.io/dht/kad/lru_cache", "Cache hit or miss counter", stats.UnitDimensionless) - NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless) -) - -// Views -var ( - ReceivedMessagesView = &view.View{ - Measure: ReceivedMessages, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - ReceivedMessageErrorsView = &view.View{ - Measure: ReceivedMessageErrors, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - ReceivedBytesView = &view.View{ - Measure: ReceivedBytes, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultBytesDistribution, - } - InboundRequestLatencyView = &view.View{ - Measure: InboundRequestLatency, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultMillisecondsDistribution, - } - OutboundRequestLatencyView = &view.View{ - Measure: OutboundRequestLatency, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultMillisecondsDistribution, - } - SentMessagesView = &view.View{ - Measure: SentMessages, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentMessageErrorsView = &view.View{ - Measure: SentMessageErrors, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentRequestsView = &view.View{ - Measure: SentRequests, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentRequestErrorsView = &view.View{ - Measure: SentRequestErrors, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - SentBytesView = &view.View{ - Measure: SentBytes, - TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID}, - Aggregation: defaultBytesDistribution, - } - LRUCacheView = &view.View{ - Measure: LRUCache, - TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } - NetworkSizeView = &view.View{ - Measure: NetworkSize, - TagKeys: []tag.Key{KeyPeerID, KeyInstanceID}, - Aggregation: view.Count(), - } -) - -// DefaultViews with all views in it. -var DefaultViews = []*view.View{ - ReceivedMessagesView, - ReceivedMessageErrorsView, - ReceivedBytesView, - InboundRequestLatencyView, - OutboundRequestLatencyView, - SentMessagesView, - SentMessageErrorsView, - SentRequestsView, - SentRequestErrorsView, - SentBytesView, - NetworkSizeView, -} diff --git a/v2/routing.go b/v2/routing.go index c14258e3..6d641c32 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -24,7 +24,7 @@ import ( var _ routing.Routing = (*DHT)(nil) func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { - ctx, span := tracer.Start(ctx, "DHT.FindPeer") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.FindPeer") defer span.End() // First check locally. If we are or were recently connected to the peer, @@ -68,7 +68,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { } func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { - ctx, span := tracer.Start(ctx, "DHT.Provide", otel.WithAttributes(attribute.String("cid", c.String()))) + ctx, span := d.tele.Tracer.Start(ctx, "DHT.Provide", otel.WithAttributes(attribute.String("cid", c.String()))) defer span.End() // verify if this DHT supports provider records by checking if a "providers" @@ -99,7 +99,7 @@ func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { } func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { - ctx, span := tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) + ctx, span := d.tele.Tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) defer span.End() // verify if this DHT supports provider records by checking if a "providers" @@ -116,7 +116,7 @@ func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-ch } func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ...routing.Option) error { - ctx, span := tracer.Start(ctx, "DHT.PutValue") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.PutValue") defer span.End() ns, path, err := record.SplitKey(key) @@ -142,7 +142,7 @@ func (d *DHT) PutValue(ctx context.Context, key string, value []byte, option ... } func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option) ([]byte, error) { - ctx, span := tracer.Start(ctx, "DHT.GetValue") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.GetValue") defer span.End() ns, path, err := record.SplitKey(key) @@ -173,14 +173,14 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option } func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { - ctx, span := tracer.Start(ctx, "DHT.SearchValue") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.SearchValue") defer span.End() panic("implement me") } func (d *DHT) Bootstrap(ctx context.Context) error { - ctx, span := tracer.Start(ctx, "DHT.Bootstrap") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") defer span.End() panic("implement me") diff --git a/v2/stream.go b/v2/stream.go index 454e2fef..ce1f7c71 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -14,34 +14,42 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio" "github.com/libp2p/go-msgio/protoio" - "go.opencensus.io/stats" - "go.opencensus.io/tag" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" "google.golang.org/protobuf/proto" - "github.com/libp2p/go-libp2p-kad-dht/v2/metrics" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) +func (d *DHT) commonTelemetryAttributes() []attribute.KeyValue { + return []attribute.KeyValue{ + attribute.String(tele.AttrKeyPeerID, d.host.ID().String()), + attribute.String(tele.AttrKeyInstanceID, fmt.Sprintf("%p", d)), + } +} + // streamHandler is the function that's registered with the libp2p host for // the DHT protocol ID. It sets up metrics and the resource manager scope. It // actually starts handling the stream and depending on the outcome resets or // closes it. func (d *DHT) streamHandler(s network.Stream) { - ctx, _ := tag.New(context.Background(), - tag.Upsert(metrics.KeyPeerID, d.host.ID().String()), - tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", d)), - ) + ctx, span := d.tele.Tracer.Start(context.Background(), "DHT.streamHandler", trace.WithAttributes(d.commonTelemetryAttributes()...)) + defer span.End() if err := s.Scope().SetService(ServiceName); err != nil { d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("err", err.Error())) d.logErr(s.Reset(), "failed to reset stream") + span.RecordError(err) return } if err := d.handleNewStream(ctx, s); err != nil { // If we exited with an error, let the remote peer know. d.logErr(s.Reset(), "failed to reset stream") + span.RecordError(err) } else { // If we exited without an error, close gracefully. d.logErr(s.Close(), "failed to close stream") @@ -67,9 +75,11 @@ func (d *DHT) streamHandler(s network.Stream) { // it will return nil indicating the end of the stream or all messages have been // processed correctly. func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { - ctx, span := tracer.Start(ctx, "DHT.handleNewStream") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.handleNewStream") defer span.End() + ctx = tele.WithAttributes(ctx, d.commonTelemetryAttributes()...) + // init structured logger that always contains the remote peers PeerID slogger := d.log.With(slog.String("from", s.Conn().RemotePeer().String())) @@ -109,27 +119,30 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { return fmt.Errorf("reset stream deadline: %w", err) } + ctx = tele.WithAttributes(ctx, + attribute.String(tele.AttrKeyMessageType, req.GetType().String()), + attribute.String(tele.AttrKeyKey, base64.StdEncoding.EncodeToString(req.GetKey())), + ) + // extend metrics context and slogger with message information. // ctx must be overwritten because in the next iteration metrics.KeyMessageType // would already exist and tag.New would return an error. - ctx, _ := tag.New(ctx, tag.Upsert(metrics.KeyMessageType, req.GetType().String())) slogger = slogger.With( slog.String("type", req.GetType().String()), slog.String("key", base64.StdEncoding.EncodeToString(req.GetKey())), ) // track message metrics - stats.Record(ctx, - metrics.ReceivedMessages.M(1), - metrics.ReceivedBytes.M(int64(len(data))), - ) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx)) + d.tele.ReceivedMessages.Add(ctx, 1, mattrs) + d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) // 3. handle the message and gather response slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") resp, err := d.handleMsg(ctx, s.Conn().RemotePeer(), req) if err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", time.Since(startTime)), slog.String("error", err.Error())) - stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) return err } slogger.LogAttrs(ctx, slog.LevelDebug, "handled message", slog.Duration("time", time.Since(startTime))) @@ -148,7 +161,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // final logging, metrics tracking latency := time.Since(startTime) slogger.LogAttrs(ctx, slog.LevelDebug, "responded to message", slog.Duration("time", latency)) - stats.Record(ctx, metrics.InboundRequestLatency.M(float64(latency.Milliseconds()))) + d.tele.InboundRequestLatency.Record(ctx, float64(latency.Milliseconds()), mattrs) } } @@ -156,7 +169,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // corresponding bytes. If an error occurs it, logs it, and updates the metrics. // If the bytes are empty and the error is nil, the remote peer returned func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.Reader) ([]byte, error) { - ctx, span := tracer.Start(ctx, "DHT.streamReadMsg") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.streamReadMsg") defer span.End() data, err := r.ReadMsg() @@ -168,12 +181,10 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // record any potential partial message we have received if len(data) > 0 { - _ = stats.RecordWithTags(ctx, - []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, - metrics.ReceivedMessages.M(1), - metrics.ReceivedMessageErrors.M(1), - metrics.ReceivedBytes.M(int64(len(data))), - ) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx, attribute.String(tele.AttrKeyMessageType, "UNKNOWN"))) + d.tele.ReceivedMessages.Add(ctx, 1, mattrs) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) + d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) } return nil, err @@ -186,19 +197,16 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // protobuf message. If an error occurs, it will be logged and the metrics will // be updated. func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data []byte) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.streamUnmarshalMsg") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.streamUnmarshalMsg") defer span.End() var req pb.Message if err := proto.Unmarshal(data, &req); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("err", err.Error())) - _ = stats.RecordWithTags(ctx, - []tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")}, - metrics.ReceivedMessages.M(1), - metrics.ReceivedMessageErrors.M(1), - metrics.ReceivedBytes.M(int64(len(data))), - ) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx, attribute.String(tele.AttrKeyMessageType, "UNKNOWN"))) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) + d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) return nil, err } @@ -209,7 +217,7 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data // handleMsg handles the give protobuf message based on its type from the // given remote peer. func (d *DHT) handleMsg(ctx context.Context, remote peer.ID, req *pb.Message) (*pb.Message, error) { - ctx, span := tracer.Start(ctx, "DHT.handle_"+req.GetType().String()) + ctx, span := d.tele.Tracer.Start(ctx, "DHT.handle_"+req.GetType().String(), trace.WithAttributes(attribute.String("remote_id", remote.String()))) defer span.End() switch req.GetType() { @@ -233,12 +241,13 @@ func (d *DHT) handleMsg(ctx context.Context, remote peer.ID, req *pb.Message) (* // streamWriteMsg sends the given message over the stream and handles traces // and telemetry. func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s network.Stream, msg *pb.Message) error { - ctx, span := tracer.Start(ctx, "DHT.streamWriteMsg") + ctx, span := d.tele.Tracer.Start(ctx, "DHT.streamWriteMsg") defer span.End() if err := writeMsg(s, msg); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error writing response", slog.String("err", err.Error())) - stats.Record(ctx, metrics.ReceivedMessageErrors.M(1)) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx)) + d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) return err } diff --git a/v2/tele/tele.go b/v2/tele/tele.go new file mode 100644 index 00000000..e22382bc --- /dev/null +++ b/v2/tele/tele.go @@ -0,0 +1,179 @@ +package tele + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk/instrumentation" + otelsdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/trace" +) + +type ctxKey struct{} + +var ( + meterName = "github.com/libp2p/go-libp2p-kad-dht/v2" + tracerName = "go-libp2p-kad-dht" + attrsCtxKey = ctxKey{} +) + +var MeterProviderOpts = func() []otelsdk.Option { + return []otelsdk.Option{ + otelsdk.WithView(otelsdk.NewView( + otelsdk.Instrument{Name: "*_bytes", Scope: instrumentation.Scope{Name: meterName}}, + otelsdk.Stream{ + Aggregation: otelsdk.AggregationExplicitBucketHistogram{ + Boundaries: []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}, + }, + }, + )), + otelsdk.WithView(otelsdk.NewView( + otelsdk.Instrument{Name: "*_request_latency", Scope: instrumentation.Scope{Name: meterName}}, + otelsdk.Stream{ + Aggregation: otelsdk.AggregationExplicitBucketHistogram{ + Boundaries: []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}, + }, + }, + )), + } +} + +type Telemetry struct { + Tracer trace.Tracer + ReceivedMessages metric.Int64Counter + ReceivedMessageErrors metric.Int64Counter + ReceivedBytes metric.Int64Histogram + InboundRequestLatency metric.Float64Histogram + OutboundRequestLatency metric.Float64Histogram + SentMessages metric.Int64Counter + SentMessageErrors metric.Int64Counter + SentRequests metric.Int64Counter + SentRequestErrors metric.Int64Counter + SentBytes metric.Int64Histogram + LRUCache metric.Int64Counter + NetworkSize metric.Int64Counter +} + +// New initializes a new opentelemetry meter provider with the given options. +// This function also registers custom views for certain histogram metrics. +// Probably the most important configuration option to pass into this function +// is [api.WithReader] to provide, e.g., the prometheus exporter. +func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider) (*Telemetry, error) { + var err error + + if meterProvider == nil { + meterProvider = otel.GetMeterProvider() + } + + if tracerProvider == nil { + tracerProvider = otel.GetTracerProvider() + } + + t := &Telemetry{ + Tracer: tracerProvider.Tracer(tracerName), + } + + meter := meterProvider.Meter(meterName) + t.ReceivedMessages, err = meter.Int64Counter("received_messages", metric.WithDescription("Total number of messages received per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("received_messages counter: %w", err) + } + + t.ReceivedMessageErrors, err = meter.Int64Counter("received_message_errors", metric.WithDescription("Total number of errors for messages received per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("received_message_errors counter: %w", err) + } + + t.ReceivedBytes, err = meter.Int64Histogram("received_bytes", metric.WithDescription("Total received bytes per RPC"), metric.WithUnit("By")) + if err != nil { + return nil, fmt.Errorf("received_bytes histogram: %w", err) + } + + t.InboundRequestLatency, err = meter.Float64Histogram("inbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("inbound_request_latency histogram: %w", err) + } + + t.OutboundRequestLatency, err = meter.Float64Histogram("outbound_request_latency", metric.WithDescription("Latency per RPC"), metric.WithUnit("ms")) + if err != nil { + return nil, fmt.Errorf("outbound_request_latency histogram: %w", err) + } + + t.SentMessages, err = meter.Int64Counter("sent_messages", metric.WithDescription("Total number of messages sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_messages counter: %w", err) + } + + t.SentMessageErrors, err = meter.Int64Counter("sent_message_errors", metric.WithDescription("Total number of errors for messages sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_message_errors counter: %w", err) + } + + t.SentRequests, err = meter.Int64Counter("sent_requests", metric.WithDescription("Total number of requests sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_requests counter: %w", err) + } + + t.SentRequestErrors, err = meter.Int64Counter("sent_request_errors", metric.WithDescription("Total number of errors for requests sent per RPC"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("sent_request_errors counter: %w", err) + } + + t.SentBytes, err = meter.Int64Histogram("sent_bytes", metric.WithDescription("Total sent bytes per RPC"), metric.WithUnit("By")) + if err != nil { + return nil, fmt.Errorf("sent_bytes histogram: %w", err) + } + + t.LRUCache, err = meter.Int64Counter("lru_cache", metric.WithDescription("Cache hit or miss counter"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("lru_cache counter: %w", err) + } + + t.NetworkSize, err = meter.Int64Counter("network_size", metric.WithDescription("Network size estimation"), metric.WithUnit("1")) + if err != nil { + return nil, fmt.Errorf("network_size counter: %w", err) + } + + return t, nil +} + +// Keys +const ( + AttrKeyCacheHit = "hit" + AttrKeyRecordType = "record_type" // currently only used for the provider backend LRU cache + AttrKeyMessageType = "message_type" + AttrKeyPeerID = "peer_id" + AttrKeyKey = "key" + // AttrKeyInstanceID identifies a dht instance by the pointer address. + // Useful for differentiating between different dhts that have the same peer id. + AttrKeyInstanceID = "instance_id" +) + +func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Context { + set := attribute.NewSet(attrs...) + val := ctx.Value(attrsCtxKey) + if val != nil { + existing, ok := val.(attribute.Set) + if ok { + set = attribute.NewSet(append(existing.ToSlice(), attrs...)...) + } + } + return context.WithValue(ctx, attrsCtxKey, set) +} + +func FromContext(ctx context.Context, attrs ...attribute.KeyValue) attribute.Set { + val := ctx.Value(attrsCtxKey) + if val == nil { + return attribute.NewSet(attrs...) + } + + set, ok := val.(attribute.Set) + if !ok { + return attribute.NewSet(attrs...) + } + + return attribute.NewSet(append(set.ToSlice(), attrs...)...) +} From 165b00b7417d403872933bba773ec4366bada3f7 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 12:03:43 +0200 Subject: [PATCH 18/26] use convenience attribute methods --- v2/backend_provider.go | 5 ++--- v2/stream.go | 28 ++++++++++++++-------------- v2/tele/tele.go | 37 ++++++++++++++++++++++++++----------- 3 files changed, 42 insertions(+), 28 deletions(-) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index cd33772a..50b92453 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -18,7 +18,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/multiformats/go-base32" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "golang.org/x/exp/slog" @@ -356,8 +355,8 @@ func (p *ProvidersBackend) collectGarbage(ctx context.Context) { // trackCacheQuery updates the prometheus metrics about cache hit/miss performance func (p *ProvidersBackend) trackCacheQuery(ctx context.Context, hit bool) { set := tele.FromContext(ctx, - attribute.Bool(tele.AttrKeyCacheHit, hit), - attribute.String(tele.AttrKeyRecordType, "provider"), + tele.AttrCacheHit(hit), + tele.AttrRecordType("provider"), ) p.cfg.Tele.LRUCache.Add(ctx, 1, metric.WithAttributeSet(set)) } diff --git a/v2/stream.go b/v2/stream.go index ce1f7c71..e053c6eb 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -24,21 +24,23 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) -func (d *DHT) commonTelemetryAttributes() []attribute.KeyValue { - return []attribute.KeyValue{ - attribute.String(tele.AttrKeyPeerID, d.host.ID().String()), - attribute.String(tele.AttrKeyInstanceID, fmt.Sprintf("%p", d)), - } -} - // streamHandler is the function that's registered with the libp2p host for // the DHT protocol ID. It sets up metrics and the resource manager scope. It // actually starts handling the stream and depending on the outcome resets or // closes it. func (d *DHT) streamHandler(s network.Stream) { - ctx, span := d.tele.Tracer.Start(context.Background(), "DHT.streamHandler", trace.WithAttributes(d.commonTelemetryAttributes()...)) + attrs := []attribute.KeyValue{ + tele.AttrPeerID(d.host.ID().String()), + tele.AttrInstanceID(fmt.Sprintf("%p", d)), + } + + // start stream handler span + ctx, span := d.tele.Tracer.Start(context.Background(), "DHT.streamHandler", trace.WithAttributes(attrs...)) defer span.End() + // attach attribute to context to make them available to metrics below + ctx = tele.WithAttributes(ctx, attrs...) + if err := s.Scope().SetService(ServiceName); err != nil { d.log.LogAttrs(ctx, slog.LevelWarn, "error attaching stream to DHT service", slog.String("err", err.Error())) d.logErr(s.Reset(), "failed to reset stream") @@ -78,8 +80,6 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { ctx, span := d.tele.Tracer.Start(ctx, "DHT.handleNewStream") defer span.End() - ctx = tele.WithAttributes(ctx, d.commonTelemetryAttributes()...) - // init structured logger that always contains the remote peers PeerID slogger := d.log.With(slog.String("from", s.Conn().RemotePeer().String())) @@ -120,8 +120,8 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { } ctx = tele.WithAttributes(ctx, - attribute.String(tele.AttrKeyMessageType, req.GetType().String()), - attribute.String(tele.AttrKeyKey, base64.StdEncoding.EncodeToString(req.GetKey())), + tele.AttrMessageType(req.GetType().String()), + tele.AttrKey(base64.StdEncoding.EncodeToString(req.GetKey())), ) // extend metrics context and slogger with message information. @@ -181,7 +181,7 @@ func (d *DHT) streamReadMsg(ctx context.Context, slogger *slog.Logger, r msgio.R // record any potential partial message we have received if len(data) > 0 { - mattrs := metric.WithAttributeSet(tele.FromContext(ctx, attribute.String(tele.AttrKeyMessageType, "UNKNOWN"))) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx, tele.AttrMessageType("UNKNOWN"))) d.tele.ReceivedMessages.Add(ctx, 1, mattrs) d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) @@ -204,7 +204,7 @@ func (d *DHT) streamUnmarshalMsg(ctx context.Context, slogger *slog.Logger, data if err := proto.Unmarshal(data, &req); err != nil { slogger.LogAttrs(ctx, slog.LevelDebug, "error unmarshalling message", slog.String("err", err.Error())) - mattrs := metric.WithAttributeSet(tele.FromContext(ctx, attribute.String(tele.AttrKeyMessageType, "UNKNOWN"))) + mattrs := metric.WithAttributeSet(tele.FromContext(ctx, tele.AttrMessageType("UNKNOWN"))) d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) d.tele.ReceivedBytes.Record(ctx, int64(len(data)), mattrs) diff --git a/v2/tele/tele.go b/v2/tele/tele.go index e22382bc..b0ce8925 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -140,17 +140,32 @@ func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider return t, nil } -// Keys -const ( - AttrKeyCacheHit = "hit" - AttrKeyRecordType = "record_type" // currently only used for the provider backend LRU cache - AttrKeyMessageType = "message_type" - AttrKeyPeerID = "peer_id" - AttrKeyKey = "key" - // AttrKeyInstanceID identifies a dht instance by the pointer address. - // Useful for differentiating between different dhts that have the same peer id. - AttrKeyInstanceID = "instance_id" -) +// AttrInstanceID identifies a dht instance by the pointer address. +// Useful for differentiating between different DHTs that have the same peer id. +func AttrInstanceID(instanceID string) attribute.KeyValue { + return attribute.String("instance_id", instanceID) +} + +func AttrPeerID(pid string) attribute.KeyValue { + return attribute.String("peer_id", pid) +} + +func AttrCacheHit(hit bool) attribute.KeyValue { + return attribute.Bool("hit", hit) +} + +// AttrRecordType is currently only used for the provider backend LRU cache +func AttrRecordType(val string) attribute.KeyValue { + return attribute.String("record_type", val) +} + +func AttrMessageType(val string) attribute.KeyValue { + return attribute.String("message_type", val) +} + +func AttrKey(val string) attribute.KeyValue { + return attribute.String("key", val) +} func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Context { set := attribute.NewSet(attrs...) From 413aefd4767a0f8e637b11d4f3d380cd2b710c89 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 12:44:31 +0200 Subject: [PATCH 19/26] let coord package use tele --- v2/backend_provider.go | 2 +- v2/backend_record.go | 2 +- v2/coord/coordinator.go | 111 +++++++++++++++++++++-------------- v2/coord/coordinator_test.go | 52 ++++++++++++---- v2/coord/query.go | 62 +++++++++---------- v2/coord/routing.go | 18 +++--- v2/coord/routing_test.go | 16 ++--- v2/tele/tele.go | 87 ++++++++++++++++++--------- 8 files changed, 219 insertions(+), 131 deletions(-) diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 50b92453..8c967a59 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -109,7 +109,7 @@ type ProvidersBackendConfig struct { // configuration is passed to [NewBackendProvider], this default configuration // here is used. func DefaultProviderBackendConfig() (*ProvidersBackendConfig, error) { - telemetry, err := tele.New(nil, nil) + telemetry, err := tele.NewWithGlobalProviders() if err != nil { return nil, fmt.Errorf("new telemetry: %w", err) } diff --git a/v2/backend_record.go b/v2/backend_record.go index 3f350115..5ad3bfdc 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -31,7 +31,7 @@ type RecordBackendConfig struct { } func DefaultRecordBackendConfig() (*RecordBackendConfig, error) { - telemetry, err := tele.New(nil, nil) + telemetry, err := tele.NewWithGlobalProviders() if err != nil { return nil, fmt.Errorf("new telemetry: %w", err) } diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index 9d33066f..c594b206 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -15,11 +15,13 @@ import ( "github.com/plprobelab/go-kademlia/network/address" "github.com/plprobelab/go-kademlia/query" "github.com/plprobelab/go-kademlia/routing" - "github.com/plprobelab/go-kademlia/util" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap/exp/zapslog" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) // A Coordinator coordinates the state machines that comprise a Kademlia DHT @@ -61,7 +63,8 @@ type CoordinatorConfig struct { RequestConcurrency int // the maximum number of concurrent requests that each query may have in flight RequestTimeout time.Duration // the timeout queries should use for contacting a single node - Logger *slog.Logger // a structured logger that should be used when logging. + Logger *slog.Logger // a structured logger that should be used when logging. + Tele *tele.Telemetry // a struct holding a reference to various metric counters/histograms and a tracer } // Validate checks the configuration options and returns an error if any have invalid values. @@ -106,10 +109,23 @@ func (cfg *CoordinatorConfig) Validate() error { Err: fmt.Errorf("logger must not be nil"), } } + + if cfg.Tele == nil { + return &kaderr.ConfigurationError{ + Component: "CoordinatorConfig", + Err: fmt.Errorf("telemetry must not be nil"), + } + } + return nil } -func DefaultConfig() *CoordinatorConfig { +func DefaultConfig() (*CoordinatorConfig, error) { + telemetry, err := tele.NewWithGlobalProviders() + if err != nil { + return nil, fmt.Errorf("new telemetry: %w", err) + } + return &CoordinatorConfig{ Clock: clock.New(), // use standard time PeerstoreTTL: 10 * time.Minute, @@ -118,12 +134,17 @@ func DefaultConfig() *CoordinatorConfig { RequestConcurrency: 3, RequestTimeout: time.Minute, Logger: slog.New(zapslog.NewHandler(logging.Logger("coord").Desugar().Core())), - } + Tele: telemetry, + }, nil } func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, kad.NodeID[KadKey]], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { - cfg = DefaultConfig() + c, err := DefaultConfig() + if err != nil { + return nil, fmt.Errorf("default config: %w", err) + } + cfg = c } else if err := cfg.Validate(); err != nil { return nil, err } @@ -139,7 +160,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, if err != nil { return nil, fmt.Errorf("query pool: %w", err) } - queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger) + queryBehaviour := NewPooledQueryBehaviour(qp, cfg.Logger, cfg.Tele.Tracer) bootstrapCfg := routing.DefaultBootstrapConfig[KadKey, ma.Multiaddr]() bootstrapCfg.Clock = cfg.Clock @@ -177,7 +198,7 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, return nil, fmt.Errorf("probe: %w", err) } - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger, cfg.Tele.Tracer) networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger) @@ -198,13 +219,13 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, return d, nil } -func (d *Coordinator) ID() peer.ID { - return d.self +func (c *Coordinator) ID() peer.ID { + return c.self } -func (d *Coordinator) Addresses() []ma.Multiaddr { +func (c *Coordinator) Addresses() []ma.Multiaddr { // TODO: return configured listen addresses - info, err := d.rtr.GetNodeInfo(context.TODO(), d.self) + info, err := c.rtr.GetNodeInfo(context.TODO(), c.self) if err != nil { return nil } @@ -212,33 +233,33 @@ func (d *Coordinator) Addresses() []ma.Multiaddr { } // RoutingNotifications returns a channel that may be read to be notified of routing updates -func (d *Coordinator) RoutingNotifications() <-chan RoutingNotification { - return d.routingNotifications +func (c *Coordinator) RoutingNotifications() <-chan RoutingNotification { + return c.routingNotifications } -func (d *Coordinator) eventLoop() { +func (c *Coordinator) eventLoop() { ctx := context.Background() for { var ev BehaviourEvent var ok bool select { - case <-d.networkBehaviour.Ready(): - ev, ok = d.networkBehaviour.Perform(ctx) - case <-d.routingBehaviour.Ready(): - ev, ok = d.routingBehaviour.Perform(ctx) - case <-d.queryBehaviour.Ready(): - ev, ok = d.queryBehaviour.Perform(ctx) + case <-c.networkBehaviour.Ready(): + ev, ok = c.networkBehaviour.Perform(ctx) + case <-c.routingBehaviour.Ready(): + ev, ok = c.routingBehaviour.Perform(ctx) + case <-c.queryBehaviour.Ready(): + ev, ok = c.queryBehaviour.Perform(ctx) } if ok { - d.dispatchEvent(ctx, ev) + c.dispatchEvent(ctx, ev) } } } func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { - ctx, span := util.StartSpan(ctx, "Coordinator.dispatchEvent") + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.dispatchEvent", trace.WithAttributes(attribute.String("event_type", fmt.Sprintf("%T", ev)))) defer span.End() switch ev := ev.(type) { @@ -261,12 +282,12 @@ func (c *Coordinator) dispatchEvent(ctx context.Context, ev BehaviourEvent) { // GetNode retrieves the node associated with the given node id from the DHT's local routing table. // If the node isn't found in the table, it returns ErrNodeNotFound. -func (d *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { - if _, exists := d.rt.GetNode(kadt.PeerID(id).Key()); !exists { +func (c *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { + if _, exists := c.rt.GetNode(kadt.PeerID(id).Key()); !exists { return nil, ErrNodeNotFound } - nh, err := d.networkBehaviour.getNodeHandler(ctx, id) + nh, err := c.networkBehaviour.getNodeHandler(ctx, id) if err != nil { return nil, err } @@ -274,11 +295,11 @@ func (d *Coordinator) GetNode(ctx context.Context, id peer.ID) (Node, error) { } // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. -func (d *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { - closest := d.rt.NearestNodes(k, n) +func (c *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { + closest := c.rt.NearestNodes(k, n) nodes := make([]Node, 0, len(closest)) for _, id := range closest { - nh, err := d.networkBehaviour.getNodeHandler(ctx, NodeIDToPeerID(id)) + nh, err := c.networkBehaviour.getNodeHandler(ctx, NodeIDToPeerID(id)) if err != nil { return nil, err } @@ -289,25 +310,25 @@ func (d *Coordinator) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]N // GetValue requests that the node return any value associated with the supplied key. // If the node does not have a value for the key it returns ErrValueNotFound. -func (d *Coordinator) GetValue(ctx context.Context, k KadKey) (Value, error) { +func (c *Coordinator) GetValue(ctx context.Context, k KadKey) (Value, error) { panic("not implemented") } // PutValue requests that the node stores a value to be associated with the supplied key. // If the node cannot or chooses not to store the value for the key it returns ErrValueNotAccepted. -func (d *Coordinator) PutValue(ctx context.Context, r Value, q int) error { +func (c *Coordinator) PutValue(ctx context.Context, r Value, q int) error { panic("not implemented") } // Query traverses the DHT calling fn for each node visited. -func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (QueryStats, error) { - ctx, span := util.StartSpan(ctx, "Coordinator.Query") +func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (QueryStats, error) { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.Query") defer span.End() ctx, cancel := context.WithCancel(ctx) defer cancel() - seeds, err := d.GetClosestNodes(ctx, target, 20) + seeds, err := c.GetClosestNodes(ctx, target, 20) if err != nil { return QueryStats{}, err } @@ -330,7 +351,7 @@ func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q } // queue the start of the query - d.queryBehaviour.Notify(ctx, cmd) + c.queryBehaviour.Notify(ctx, cmd) var lastStats QueryStats for { @@ -347,7 +368,7 @@ func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q Success: ev.Stats.Success, Failure: ev.Stats.Failure, } - nh, err := d.networkBehaviour.getNodeHandler(ctx, ev.NodeID) + nh, err := c.networkBehaviour.getNodeHandler(ctx, ev.NodeID) if err != nil { // ignore unknown node break @@ -356,7 +377,7 @@ func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q err = fn(ctx, nh, lastStats) if errors.Is(err, SkipRemaining) { // done - d.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) + c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) return lastStats, nil } if errors.Is(err, SkipNode) { @@ -365,7 +386,7 @@ func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q } if err != nil { // user defined error that terminates the query - d.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) + c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) return lastStats, err } @@ -384,16 +405,16 @@ func (d *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q // AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. -func (d *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error { - ctx, span := util.StartSpan(ctx, "Coordinator.AddNodes") +func (c *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.AddNodes") defer span.End() for _, info := range infos { - if info.ID == d.self { + if info.ID == c.self { // skip self continue } - d.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ + c.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ NodeInfo: info, }) @@ -403,12 +424,12 @@ func (d *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error } // Bootstrap instructs the dht to begin bootstrapping the routing table. -func (d *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { - ctx, span := util.StartSpan(ctx, "Coordinator.Bootstrap") +func (c *Coordinator) Bootstrap(ctx context.Context, seeds []peer.ID) error { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.Bootstrap") defer span.End() - d.routingBehaviour.Notify(ctx, &EventStartBootstrap{ + c.routingBehaviour.Notify(ctx, &EventStartBootstrap{ // Bootstrap state machine uses the message - Message: &fakeMessage{key: kadt.PeerID(d.self).Key()}, + Message: &fakeMessage{key: kadt.PeerID(c.self).Key()}, SeedNodes: seeds, }) diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index 213c3588..3d22c9d3 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -37,18 +37,24 @@ func expectEventType(t *testing.T, ctx context.Context, events <-chan RoutingNot func TestConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + require.NoError(t, cfg.Validate()) }) t.Run("clock is not nil", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + cfg.Clock = nil require.Error(t, cfg.Validate()) }) t.Run("query concurrency positive", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + cfg.QueryConcurrency = 0 require.Error(t, cfg.Validate()) cfg.QueryConcurrency = -1 @@ -56,7 +62,9 @@ func TestConfigValidate(t *testing.T) { }) t.Run("query timeout positive", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + cfg.QueryTimeout = 0 require.Error(t, cfg.Validate()) cfg.QueryTimeout = -1 @@ -64,7 +72,9 @@ func TestConfigValidate(t *testing.T) { }) t.Run("request concurrency positive", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + cfg.RequestConcurrency = 0 require.Error(t, cfg.Validate()) cfg.QueryConcurrency = -1 @@ -72,7 +82,9 @@ func TestConfigValidate(t *testing.T) { }) t.Run("request timeout positive", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + cfg.RequestTimeout = 0 require.Error(t, cfg.Validate()) cfg.RequestTimeout = -1 @@ -80,10 +92,20 @@ func TestConfigValidate(t *testing.T) { }) t.Run("logger not nil", func(t *testing.T) { - cfg := DefaultConfig() + cfg, err := DefaultConfig() + require.NoError(t, err) + cfg.Logger = nil require.Error(t, cfg.Validate()) }) + + t.Run("telemetry not nil", func(t *testing.T) { + cfg, err := DefaultConfig() + require.NoError(t, err) + + cfg.Tele = nil + require.Error(t, cfg.Validate()) + }) } func TestExhaustiveQuery(t *testing.T) { @@ -93,7 +115,9 @@ func TestExhaustiveQuery(t *testing.T) { clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg := DefaultConfig() + ccfg, err := DefaultConfig() + require.NoError(t, err) + ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -132,7 +156,9 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg := DefaultConfig() + ccfg, err := DefaultConfig() + require.NoError(t, err) + ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -195,7 +221,9 @@ func TestBootstrap(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg := DefaultConfig() + ccfg, err := DefaultConfig() + require.NoError(t, err) + ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL @@ -254,7 +282,9 @@ func TestIncludeNode(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg := DefaultConfig() + ccfg, err := DefaultConfig() + require.NoError(t, err) + ccfg.Clock = clk ccfg.PeerstoreTTL = peerstoreTTL diff --git a/v2/coord/query.go b/v2/coord/query.go index 69766bb4..8ef2bdfc 100644 --- a/v2/coord/query.go +++ b/v2/coord/query.go @@ -8,7 +8,7 @@ import ( "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/query" - "github.com/plprobelab/go-kademlia/util" + "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" ) @@ -21,24 +21,26 @@ type PooledQueryBehaviour struct { ready chan struct{} logger *slog.Logger + tracer trace.Tracer } -func NewPooledQueryBehaviour(pool *query.Pool[KadKey, ma.Multiaddr], logger *slog.Logger) *PooledQueryBehaviour { +func NewPooledQueryBehaviour(pool *query.Pool[KadKey, ma.Multiaddr], logger *slog.Logger, tracer trace.Tracer) *PooledQueryBehaviour { h := &PooledQueryBehaviour{ pool: pool, waiters: make(map[query.QueryID]NotifyCloser[BehaviourEvent]), ready: make(chan struct{}, 1), logger: logger, + tracer: tracer, } return h } -func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Notify") +func (p *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.Notify") defer span.End() - r.pendingMu.Lock() - defer r.pendingMu.Unlock() + p.pendingMu.Lock() + defer p.pendingMu.Unlock() var cmd query.PoolEvent switch ev := ev.(type) { @@ -51,7 +53,7 @@ func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { KnownClosestNodes: SliceOfPeerIDToSliceOfNodeID(ev.KnownClosestNodes), } if ev.Notify != nil { - r.waiters[ev.QueryID] = ev.Notify + p.waiters[ev.QueryID] = ev.Notify } case *EventStopQuery: @@ -62,11 +64,11 @@ func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { case *EventGetCloserNodesSuccess: for _, info := range ev.CloserNodes { // TODO: do this after advancing pool - r.pending = append(r.pending, &EventAddAddrInfo{ + p.pending = append(p.pending, &EventAddAddrInfo{ NodeInfo: info, }) } - waiter, ok := r.waiters[ev.QueryID] + waiter, ok := p.waiters[ev.QueryID] if ok { waiter.Notify(ctx, &EventQueryProgressed{ NodeID: ev.To.ID, @@ -91,39 +93,39 @@ func (r *PooledQueryBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { } // attempt to advance the query pool - ev, ok := r.advancePool(ctx, cmd) + ev, ok := p.advancePool(ctx, cmd) if ok { - r.pending = append(r.pending, ev) + p.pending = append(p.pending, ev) } - if len(r.pending) > 0 { + if len(p.pending) > 0 { select { - case r.ready <- struct{}{}: + case p.ready <- struct{}{}: default: } } } -func (r *PooledQueryBehaviour) Ready() <-chan struct{} { - return r.ready +func (p *PooledQueryBehaviour) Ready() <-chan struct{} { + return p.ready } -func (r *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { - ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.Perform") +func (p *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.Perform") defer span.End() // No inbound work can be done until Perform is complete - r.pendingMu.Lock() - defer r.pendingMu.Unlock() + p.pendingMu.Lock() + defer p.pendingMu.Unlock() for { // drain queued events first. - if len(r.pending) > 0 { + if len(p.pending) > 0 { var ev BehaviourEvent - ev, r.pending = r.pending[0], r.pending[1:] + ev, p.pending = p.pending[0], p.pending[1:] - if len(r.pending) > 0 { + if len(p.pending) > 0 { select { - case r.ready <- struct{}{}: + case p.ready <- struct{}{}: default: } } @@ -131,36 +133,36 @@ func (r *PooledQueryBehaviour) Perform(ctx context.Context) (BehaviourEvent, boo } // attempt to advance the query pool - ev, ok := r.advancePool(ctx, &query.EventPoolPoll{}) + ev, ok := p.advancePool(ctx, &query.EventPoolPoll{}) if ok { return ev, true } - if len(r.pending) == 0 { + if len(p.pending) == 0 { return nil, false } } } -func (r *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (BehaviourEvent, bool) { - ctx, span := util.StartSpan(ctx, "PooledQueryBehaviour.advancePool") +func (p *PooledQueryBehaviour) advancePool(ctx context.Context, ev query.PoolEvent) (BehaviourEvent, bool) { + ctx, span := p.tracer.Start(ctx, "PooledQueryBehaviour.advancePool") defer span.End() - pstate := r.pool.Advance(ctx, ev) + pstate := p.pool.Advance(ctx, ev) switch st := pstate.(type) { case *query.StatePoolQueryMessage[KadKey, ma.Multiaddr]: return &EventOutboundGetCloserNodes{ QueryID: st.QueryID, To: NodeIDToAddrInfo(st.NodeID), Target: st.Message.Target(), - Notify: r, + Notify: p, }, true case *query.StatePoolWaitingAtCapacity: // nothing to do except wait for message response or timeout case *query.StatePoolWaitingWithCapacity: // nothing to do except wait for message response or timeout case *query.StatePoolQueryFinished: - waiter, ok := r.waiters[st.QueryID] + waiter, ok := p.waiters[st.QueryID] if ok { waiter.Notify(ctx, &EventQueryFinished{ QueryID: st.QueryID, diff --git a/v2/coord/routing.go b/v2/coord/routing.go index 11e70c41..a78a1cb0 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -8,8 +8,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/routing" - "github.com/plprobelab/go-kademlia/util" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -33,9 +33,10 @@ type RoutingBehaviour struct { ready chan struct{} logger *slog.Logger + tracer trace.Tracer } -func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger) *RoutingBehaviour { +func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, routing.BootstrapState], include SM[routing.IncludeEvent, routing.IncludeState], probe SM[routing.ProbeEvent, routing.ProbeState], logger *slog.Logger, tracer trace.Tracer) *RoutingBehaviour { r := &RoutingBehaviour{ self: self, bootstrap: bootstrap, @@ -43,12 +44,13 @@ func NewRoutingBehaviour(self peer.ID, bootstrap SM[routing.BootstrapEvent, rout probe: probe, ready: make(chan struct{}, 1), logger: logger, + tracer: tracer, } return r } func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Notify") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.Notify") defer span.End() r.pendingMu.Lock() @@ -58,7 +60,7 @@ func (r *RoutingBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { // notify must only be called while r.pendingMu is held func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { - ctx, span := util.StartSpan(ctx, "RoutingBehaviour.notify") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.notify") defer span.End() switch ev := ev.(type) { case *EventStartBootstrap: @@ -200,7 +202,7 @@ func (r *RoutingBehaviour) Ready() <-chan struct{} { } func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { - ctx, span := util.StartSpan(ctx, "RoutingBehaviour.Perform") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.Perform") defer span.End() // No inbound work can be done until Perform is complete @@ -247,7 +249,7 @@ func (r *RoutingBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { } func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.BootstrapEvent) (BehaviourEvent, bool) { - ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceBootstrap") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceBootstrap") defer span.End() bstate := r.bootstrap.Advance(ctx, ev) switch st := bstate.(type) { @@ -276,7 +278,7 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot } func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (BehaviourEvent, bool) { - ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceInclude") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceInclude") defer span.End() istate := r.include.Advance(ctx, ev) switch st := istate.(type) { @@ -317,7 +319,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ } func (r *RoutingBehaviour) advanceProbe(ctx context.Context, ev routing.ProbeEvent) (BehaviourEvent, bool) { - ctx, span := util.StartSpan(ctx, "RoutingBehaviour.advanceProbe") + ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceProbe") defer span.End() st := r.probe.Advance(ctx, ev) switch st := st.(type) { diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index 6b6e0498..ecaeef2b 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "go.opentelemetry.io/otel" + "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" @@ -35,7 +37,7 @@ func TestRoutingStartBootstrapSendsEvent(t *testing.T) { include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) req := &pb.Message{ Type: pb.Message_FIND_NODE, @@ -74,7 +76,7 @@ func TestRoutingBootstrapGetClosestNodesSuccess(t *testing.T) { include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) ev := &EventGetCloserNodesSuccess{ QueryID: query.QueryID("bootstrap"), @@ -108,7 +110,7 @@ func TestRoutingBootstrapGetClosestNodesFailure(t *testing.T) { include := new(NullSM[routing.IncludeEvent, routing.IncludeState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ @@ -144,7 +146,7 @@ func TestRoutingAddNodeInfoSendsEvent(t *testing.T) { bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) ev := &EventAddAddrInfo{ NodeInfo: nodes[2].NodeInfo, @@ -175,7 +177,7 @@ func TestRoutingIncludeGetClosestNodesSuccess(t *testing.T) { bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) ev := &EventGetCloserNodesSuccess{ QueryID: query.QueryID("include"), @@ -210,7 +212,7 @@ func TestRoutingIncludeGetClosestNodesFailure(t *testing.T) { bootstrap := new(NullSM[routing.BootstrapEvent, routing.BootstrapState]) probe := new(NullSM[routing.ProbeEvent, routing.ProbeState]) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) failure := errors.New("failed") ev := &EventGetCloserNodesFailure{ @@ -255,7 +257,7 @@ func TestRoutingIncludedNodeAddToProbeList(t *testing.T) { // ensure bootstrap is always idle bootstrap := NewRecordingSM[routing.BootstrapEvent, routing.BootstrapState](&routing.StateBootstrapIdle{}) - routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default()) + routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, slog.Default(), otel.Tracer("test")) // a new node to be included candidate := nodes[len(nodes)-1].NodeInfo diff --git a/v2/tele/tele.go b/v2/tele/tele.go index b0ce8925..acfc68a2 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -8,39 +8,27 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/instrumentation" - otelsdk "go.opentelemetry.io/otel/sdk/metric" + motel "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/trace" ) +// ctxKey is an unexported type alias for the value of a context key. This is +// used to attach metric values to a context and get them out of a context. type ctxKey struct{} var ( - meterName = "github.com/libp2p/go-libp2p-kad-dht/v2" - tracerName = "go-libp2p-kad-dht" + meterName = "github.com/libp2p/go-libp2p-kad-dht/v2" + tracerName = "go-libp2p-kad-dht" + + // attrsCtxKey is the actual context key value that's used as a key for + // metric values that are attached to a context. attrsCtxKey = ctxKey{} ) -var MeterProviderOpts = func() []otelsdk.Option { - return []otelsdk.Option{ - otelsdk.WithView(otelsdk.NewView( - otelsdk.Instrument{Name: "*_bytes", Scope: instrumentation.Scope{Name: meterName}}, - otelsdk.Stream{ - Aggregation: otelsdk.AggregationExplicitBucketHistogram{ - Boundaries: []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}, - }, - }, - )), - otelsdk.WithView(otelsdk.NewView( - otelsdk.Instrument{Name: "*_request_latency", Scope: instrumentation.Scope{Name: meterName}}, - otelsdk.Stream{ - Aggregation: otelsdk.AggregationExplicitBucketHistogram{ - Boundaries: []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}, - }, - }, - )), - } -} - +// Telemetry is the struct that holds a reference to all metrics and the tracer. +// Initialize this struct with [New] or [NewWithGlobalProviders]. Make sure +// to also register the [MeterProviderOpts] with your custom or the global +// [metric.MeterProvider]. type Telemetry struct { Tracer trace.Tracer ReceivedMessages metric.Int64Counter @@ -57,10 +45,17 @@ type Telemetry struct { NetworkSize metric.Int64Counter } -// New initializes a new opentelemetry meter provider with the given options. -// This function also registers custom views for certain histogram metrics. -// Probably the most important configuration option to pass into this function -// is [api.WithReader] to provide, e.g., the prometheus exporter. +// NewWithGlobalProviders uses the global meter and tracer providers from +// opentelemetry. Check out the documentation of [MeterProviderOpts] for +// implications of using this constructor. +func NewWithGlobalProviders() (*Telemetry, error) { + return New(otel.GetMeterProvider(), otel.GetTracerProvider()) +} + +// New initializes a Telemetry struct with the given meter and tracer providers. +// It constructs the different metric counters and histograms. The histograms +// have custom boundaries. Therefore, the given [metric.MeterProvider] should +// have the custom view registered that [MeterProviderOpts] returns. func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider) (*Telemetry, error) { var err error @@ -140,6 +135,42 @@ func New(meterProvider metric.MeterProvider, tracerProvider trace.TracerProvider return t, nil } +// MeterProviderOpts is a method that returns metric options. Make sure +// to register these options to your [metric.MeterProvider]. Unfortunately, +// attaching these options to an already existing [metric.MeterProvider] +// is not possible. Therefore, you can't just register the options with the +// global MeterProvider that is returned by [otel.GetMeterProvider]. +// One example to register a new [metric.MeterProvider] would be: +// +// provider := metric.NewMeterProvider(tele.MeterProviderOpts()...) // <-- also add your options, like a metric reader +// otel.SetMeterProvider(provider) +// +// Then you can use [NewWithGlobalProviders] and it will use a correctly +// configured meter provider. +// +// The options that MeterProviderOpts returns are just custom histogram +// boundaries for a few metrics. In the future, we could reconsider these +// boundaries because we just blindly ported them from v1 to v2 of +// go-libp2p-kad-dht. +var MeterProviderOpts = []motel.Option{ + motel.WithView(motel.NewView( + motel.Instrument{Name: "*_bytes", Scope: instrumentation.Scope{Name: meterName}}, + motel.Stream{ + Aggregation: motel.AggregationExplicitBucketHistogram{ + Boundaries: []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}, + }, + }, + )), + motel.WithView(motel.NewView( + motel.Instrument{Name: "*_request_latency", Scope: instrumentation.Scope{Name: meterName}}, + motel.Stream{ + Aggregation: motel.AggregationExplicitBucketHistogram{ + Boundaries: []float64{0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000}, + }, + }, + )), +} + // AttrInstanceID identifies a dht instance by the pointer address. // Useful for differentiating between different DHTs that have the same peer id. func AttrInstanceID(instanceID string) attribute.KeyValue { From 53821ddd57d139b104f8907865a59bb0000036b8 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 12:49:21 +0200 Subject: [PATCH 20/26] fix golint warnings --- v2/coord/coretypes.go | 4 ++-- v2/handlers.go | 2 +- v2/handlers_test.go | 2 +- v2/routing.go | 6 +++--- v2/stream.go | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index ebadf442..660b503a 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -71,10 +71,10 @@ type QueryStats struct { var ( // SkipNode is used as a return value from a QueryFunc to indicate that the node is to be skipped. - SkipNode = errors.New("skip node") + SkipNode = errors.New("skip node") //nolint:all // SkipRemaining is used as a return value a QueryFunc to indicate that all remaining nodes are to be skipped. - SkipRemaining = errors.New("skip remaining nodes") + SkipRemaining = errors.New("skip remaining nodes") //nolint:all ) // Router its a work in progress diff --git a/v2/handlers.go b/v2/handlers.go index ee360c27..5339c3fa 100644 --- a/v2/handlers.go +++ b/v2/handlers.go @@ -234,7 +234,7 @@ func (d *DHT) handleGetProviders(ctx context.Context, remote peer.ID, req *pb.Me // closerPeers returns the closest peers to the given target key this host knows // about. It doesn't return 1) itself 2) the peer that asked for closer peers. func (d *DHT) closerPeers(ctx context.Context, remote peer.ID, target key.Key256) []*pb.Message_Peer { - ctx, span := d.tele.Tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) + _, span := d.tele.Tracer.Start(ctx, "DHT.closerPeers", otel.WithAttributes(attribute.String("remote", remote.String()), attribute.String("target", target.HexString()))) defer span.End() peers := d.rt.NearestNodes(target, d.cfg.BucketSize) diff --git a/v2/handlers_test.go b/v2/handlers_test.go index b0de3168..58d613de 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -104,7 +104,7 @@ func TestMessage_noKey(t *testing.T) { pb.Message_ADD_PROVIDER, pb.Message_GET_PROVIDERS, } { - t.Run(fmt.Sprintf("%s", typ), func(t *testing.T) { + t.Run(typ.String(), func(t *testing.T) { msg := &pb.Message{Type: typ} // no key _, err := d.handleMsg(context.Background(), peer.ID(""), msg) if err == nil { diff --git a/v2/routing.go b/v2/routing.go index 6d641c32..eacc9d8d 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -99,7 +99,7 @@ func (d *DHT) Provide(ctx context.Context, c cid.Cid, brdcst bool) error { } func (d *DHT) FindProvidersAsync(ctx context.Context, c cid.Cid, count int) <-chan peer.AddrInfo { - ctx, span := d.tele.Tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) + _, span := d.tele.Tracer.Start(ctx, "DHT.FindProvidersAsync", otel.WithAttributes(attribute.String("cid", c.String()), attribute.Int("count", count))) defer span.End() // verify if this DHT supports provider records by checking if a "providers" @@ -173,14 +173,14 @@ func (d *DHT) GetValue(ctx context.Context, key string, option ...routing.Option } func (d *DHT) SearchValue(ctx context.Context, s string, option ...routing.Option) (<-chan []byte, error) { - ctx, span := d.tele.Tracer.Start(ctx, "DHT.SearchValue") + _, span := d.tele.Tracer.Start(ctx, "DHT.SearchValue") defer span.End() panic("implement me") } func (d *DHT) Bootstrap(ctx context.Context) error { - ctx, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") + _, span := d.tele.Tracer.Start(ctx, "DHT.Bootstrap") defer span.End() panic("implement me") diff --git a/v2/stream.go b/v2/stream.go index e053c6eb..f7eb7c25 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -13,7 +13,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-msgio" - "github.com/libp2p/go-msgio/protoio" + "github.com/libp2p/go-msgio/pbio" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" @@ -259,7 +259,7 @@ func (d *DHT) streamWriteMsg(ctx context.Context, slogger *slog.Logger, s networ // packet for every single write. type bufferedDelimitedWriter struct { *bufio.Writer - protoio.WriteCloser + pbio.WriteCloser } var writerPool = sync.Pool{ @@ -267,7 +267,7 @@ var writerPool = sync.Pool{ w := bufio.NewWriter(nil) return &bufferedDelimitedWriter{ Writer: w, - WriteCloser: protoio.NewDelimitedWriter(w), + WriteCloser: pbio.NewDelimitedWriter(w), } }, } From 05e2d9071fa438097199f3de22b78887b046277f Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 14:52:33 +0200 Subject: [PATCH 21/26] use clock.Clock --- v2/backend_provider_test.go | 8 ++++---- v2/backend_record.go | 7 +++++-- v2/config.go | 9 +++++++++ v2/config_test.go | 6 ++++++ v2/coord/coordinator_test.go | 8 ++------ v2/dht.go | 2 ++ v2/stream.go | 13 ++++++------- 7 files changed, 34 insertions(+), 19 deletions(-) diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index 66425653..b87cf488 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -66,22 +66,22 @@ func TestProvidersBackend_GarbageCollection(t *testing.T) { // write to peerstore b.addrBook.AddAddrs(p.ID, p.Addrs, time.Hour) - // advance clock half the gc time and check if record is still there + // advance clock half the validity time and check if record is still there mockClock.Add(cfg.ProvideValidity / 2) // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey(namespaceProviders)) + err = b.datastore.Sync(ctx, ds.NewKey("")) require.NoError(t, err) // we expect the record to still be there after half the ProvideValidity _, err = b.datastore.Get(ctx, dsKey) require.NoError(t, err) - // advance clock another gc time and check if record was GC'd now + // advance clock another time and check if the record was GC'd now mockClock.Add(cfg.ProvideValidity + cfg.GCInterval) // sync autobatching datastore to have all put/deletes visible - err = b.datastore.Sync(ctx, ds.NewKey(namespaceProviders)) + err = b.datastore.Sync(ctx, ds.NewKey("")) require.NoError(t, err) // we expect the record to be GC'd now diff --git a/v2/backend_record.go b/v2/backend_record.go index 5ad3bfdc..9655d2b7 100644 --- a/v2/backend_record.go +++ b/v2/backend_record.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" record "github.com/libp2p/go-libp2p-record" recpb "github.com/libp2p/go-libp2p-record/pb" @@ -25,6 +26,7 @@ type RecordBackend struct { var _ Backend = (*RecordBackend)(nil) type RecordBackendConfig struct { + clk clock.Clock MaxRecordAge time.Duration Logger *slog.Logger Tele *tele.Telemetry @@ -37,6 +39,7 @@ func DefaultRecordBackendConfig() (*RecordBackendConfig, error) { } return &RecordBackendConfig{ + clk: clock.New(), Logger: slog.Default(), Tele: telemetry, MaxRecordAge: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md @@ -68,7 +71,7 @@ func (r *RecordBackend) Store(ctx context.Context, key string, value any) (any, } // avoid storing arbitrary data, so overwrite that field - rec.TimeReceived = time.Now().UTC().Format(time.RFC3339Nano) + rec.TimeReceived = r.cfg.clk.Now().UTC().Format(time.RFC3339Nano) data, err := rec.Marshal() if err != nil { @@ -110,7 +113,7 @@ func (r *RecordBackend) Fetch(ctx context.Context, key string) (any, error) { // validate that we don't serve stale records. receivedAt, err := time.Parse(time.RFC3339Nano, rec.GetTimeReceived()) - if err != nil || time.Since(receivedAt) > r.cfg.MaxRecordAge { + if err != nil || r.cfg.clk.Since(receivedAt) > r.cfg.MaxRecordAge { errStr := "" if err != nil { errStr = err.Error() diff --git a/v2/config.go b/v2/config.go index 9b7468db..58ff52da 100644 --- a/v2/config.go +++ b/v2/config.go @@ -4,6 +4,7 @@ import ( "fmt" "time" + "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" leveldb "github.com/ipfs/go-ds-leveldb" logging "github.com/ipfs/go-log/v2" @@ -103,6 +104,9 @@ const ( // to build up your own configuration struct. The [DHT] constructor [New] uses the // below method [*Config.Validate] to test for violations of configuration invariants. type Config struct { + // Clock + Clock clock.Clock + // Mode defines if the DHT should operate as a server or client or switch // between both automatically (see ModeOpt). Mode ModeOpt @@ -174,6 +178,7 @@ type Config struct { // fields come from separate top-level methods prefixed with Default. func DefaultConfig() *Config { return &Config{ + Clock: clock.New(), Mode: ModeOptAutoClient, Kademlia: coord.DefaultConfig(), BucketSize: 20, // MAGIC @@ -210,6 +215,10 @@ func InMemoryDatastore() (Datastore, error) { // an error if any configuration issue was detected and nil if this is // a valid configuration. func (c *Config) Validate() error { + if c.Clock == nil { + return fmt.Errorf("clock must not be nil") + } + switch c.Mode { case ModeOptClient: case ModeOptServer: diff --git a/v2/config_test.go b/v2/config_test.go index d066ac99..892f9f8a 100644 --- a/v2/config_test.go +++ b/v2/config_test.go @@ -94,4 +94,10 @@ func TestConfig_Validate(t *testing.T) { cfg.TracerProvider = nil assert.Error(t, cfg.Validate()) }) + + t.Run("nil clock", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Clock = nil + assert.Error(t, cfg.Validate()) + }) } diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index 3d22c9d3..992337df 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -229,9 +229,7 @@ func TestBootstrap(t *testing.T) { self := nodes[0].NodeInfo.ID d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) - if err != nil { - log.Fatalf("unexpected error creating dht: %v", err) - } + require.NoError(t, err) buffer := make(chan RoutingNotification, 5) go func() { @@ -245,9 +243,7 @@ func TestBootstrap(t *testing.T) { } }() - seeds := []peer.ID{ - nodes[1].NodeInfo.ID, - } + seeds := []peer.ID{nodes[1].NodeInfo.ID} err = d.Bootstrap(ctx, seeds) require.NoError(t, err) diff --git a/v2/dht.go b/v2/dht.go index 0ec8c189..4a2b3cf2 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -110,6 +110,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { pbeCfg.Logger = cfg.Logger pbeCfg.AddressFilter = cfg.AddressFilter pbeCfg.Tele = d.tele + pbeCfg.clk = d.cfg.Clock pbe, err := NewBackendProvider(h.Peerstore(), dstore, pbeCfg) if err != nil { @@ -122,6 +123,7 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } rbeCfg.Logger = cfg.Logger rbeCfg.Tele = d.tele + rbeCfg.clk = d.cfg.Clock ipnsBe, err := NewBackendIPNS(dstore, h.Peerstore(), rbeCfg) if err != nil { diff --git a/v2/stream.go b/v2/stream.go index f7eb7c25..ea8c3a8b 100644 --- a/v2/stream.go +++ b/v2/stream.go @@ -8,7 +8,6 @@ import ( "fmt" "io" "sync" - "time" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -84,7 +83,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { slogger := d.log.With(slog.String("from", s.Conn().RemotePeer().String())) // reset the stream after it was idle for too long - if err := s.SetDeadline(time.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { + if err := s.SetDeadline(d.cfg.Clock.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { return fmt.Errorf("set initial stream deadline: %w", err) } @@ -103,7 +102,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { // we have received a message, start the timer to // track inbound request latency - startTime := time.Now() + startTime := d.cfg.Clock.Now() // 2. unmarshal message into something usable req, err := d.streamUnmarshalMsg(ctx, slogger, data) @@ -115,7 +114,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { reader.ReleaseMsg(data) // reset stream deadline - if err = s.SetDeadline(time.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { + if err = s.SetDeadline(d.cfg.Clock.Now().Add(d.cfg.TimeoutStreamIdle)); err != nil { return fmt.Errorf("reset stream deadline: %w", err) } @@ -141,11 +140,11 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { slogger.LogAttrs(ctx, slog.LevelDebug, "handling message") resp, err := d.handleMsg(ctx, s.Conn().RemotePeer(), req) if err != nil { - slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", time.Since(startTime)), slog.String("error", err.Error())) + slogger.LogAttrs(ctx, slog.LevelDebug, "error handling message", slog.Duration("time", d.cfg.Clock.Since(startTime)), slog.String("error", err.Error())) d.tele.ReceivedMessageErrors.Add(ctx, 1, mattrs) return err } - slogger.LogAttrs(ctx, slog.LevelDebug, "handled message", slog.Duration("time", time.Since(startTime))) + slogger.LogAttrs(ctx, slog.LevelDebug, "handled message", slog.Duration("time", d.cfg.Clock.Since(startTime))) // if the handler didn't return a response, continue reading the stream if resp == nil { @@ -159,7 +158,7 @@ func (d *DHT) handleNewStream(ctx context.Context, s network.Stream) error { } // final logging, metrics tracking - latency := time.Since(startTime) + latency := d.cfg.Clock.Since(startTime) slogger.LogAttrs(ctx, slog.LevelDebug, "responded to message", slog.Duration("time", latency)) d.tele.InboundRequestLatency.Record(ctx, float64(latency.Milliseconds()), mattrs) } From 5d7df1cf00457e3c5c471f75e68090a6700b7849 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 16:21:07 +0200 Subject: [PATCH 22/26] add telemetry context tests --- v2/tele/tele_test.go | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 v2/tele/tele_test.go diff --git a/v2/tele/tele_test.go b/v2/tele/tele_test.go new file mode 100644 index 00000000..d7c98d93 --- /dev/null +++ b/v2/tele/tele_test.go @@ -0,0 +1,41 @@ +package tele + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" +) + +func TestWithAttributes(t *testing.T) { + ctx := context.Background() + + set := FromContext(ctx) + assert.Equal(t, 0, set.Len()) + + ctx = WithAttributes(ctx, attribute.Int("A", 1)) + ctx = WithAttributes(ctx, attribute.Int("B", 1)) + ctx = WithAttributes(ctx, attribute.Int("A", 1)) + ctx = WithAttributes(ctx, attribute.Int("B", 2)) + ctx = WithAttributes(ctx, attribute.Int("C", 1)) + + set = FromContext(ctx, attribute.Int("A", 2)) + + val, found := set.Value("A") + require.True(t, found) + assert.EqualValues(t, 2, val.AsInt64()) + + val, found = set.Value("B") + require.True(t, found) + assert.EqualValues(t, 2, val.AsInt64()) + + val, found = set.Value("C") + require.True(t, found) + assert.EqualValues(t, 1, val.AsInt64()) + + ctx = context.WithValue(ctx, attrsCtxKey, "not an attribute set") + set = FromContext(ctx) + assert.Equal(t, 0, set.Len()) +} From a749dcf2f4c10c87b801339917457d164c89a164 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Wed, 6 Sep 2023 16:25:38 +0200 Subject: [PATCH 23/26] Improve telemetry documentation --- v2/tele/tele.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/v2/tele/tele.go b/v2/tele/tele.go index acfc68a2..a3571ffb 100644 --- a/v2/tele/tele.go +++ b/v2/tele/tele.go @@ -29,6 +29,9 @@ var ( // Initialize this struct with [New] or [NewWithGlobalProviders]. Make sure // to also register the [MeterProviderOpts] with your custom or the global // [metric.MeterProvider]. +// +// To see the documentation for each metric below, check out [New] and the +// metric.WithDescription() calls when initializing each metric. type Telemetry struct { Tracer trace.Tracer ReceivedMessages metric.Int64Counter @@ -198,6 +201,8 @@ func AttrKey(val string) attribute.KeyValue { return attribute.String("key", val) } +// WithAttributes is a function that attaches the provided attributes to the +// given context. The given attributes will overwrite any already existing ones. func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Context { set := attribute.NewSet(attrs...) val := ctx.Value(attrsCtxKey) @@ -210,6 +215,10 @@ func WithAttributes(ctx context.Context, attrs ...attribute.KeyValue) context.Co return context.WithValue(ctx, attrsCtxKey, set) } +// FromContext returns the attributes that were previously associated with the +// given context via [WithAttributes] plus any attributes that are also passed +// into this function. The given attributes will take precedence over any +// attributes stored in the context. func FromContext(ctx context.Context, attrs ...attribute.KeyValue) attribute.Set { val := ctx.Value(attrsCtxKey) if val == nil { From 5fa755b46fb04046b9ea9b7f30ece3633914ba82 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 7 Sep 2023 17:15:49 +0200 Subject: [PATCH 24/26] fix test race --- v2/backend_provider_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index b87cf488..ebd3b449 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -9,6 +9,7 @@ import ( "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,7 +35,7 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac } }) - b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) + b, err := NewBackendProvider(h.Peerstore(), syncds.MutexWrap(dstore), cfg) require.NoError(t, err) return b From 6170d5ddd2e11ff5090970333dafa6a18ad764c2 Mon Sep 17 00:00:00 2001 From: Dennis Trautwein Date: Thu, 7 Sep 2023 17:50:09 +0200 Subject: [PATCH 25/26] fix garbage collection race --- v2/backend.go | 3 +-- v2/backend_provider.go | 10 +++------- v2/backend_provider_test.go | 3 +-- v2/config.go | 2 ++ 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/v2/backend.go b/v2/backend.go index a2cd2121..a8c7775a 100644 --- a/v2/backend.go +++ b/v2/backend.go @@ -7,7 +7,6 @@ import ( lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/autobatch" record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/core/peerstore" ) @@ -121,7 +120,7 @@ func NewBackendProvider(pstore peerstore.Peerstore, dstore ds.Batching, cfg *Pro cache: cache, namespace: namespaceProviders, addrBook: pstore, - datastore: autobatch.NewAutoBatching(dstore, cfg.BatchSize), + datastore: dstore, } return p, nil diff --git a/v2/backend_provider.go b/v2/backend_provider.go index 8c967a59..aee03322 100644 --- a/v2/backend_provider.go +++ b/v2/backend_provider.go @@ -13,7 +13,6 @@ import ( "github.com/benbjohnson/clock" lru "github.com/hashicorp/golang-lru/v2" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/autobatch" dsq "github.com/ipfs/go-datastore/query" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" @@ -46,8 +45,9 @@ type ProvidersBackend struct { // fetch peer multiaddresses from (we don't save them in the datastore). addrBook peerstore.AddrBook - // datastore is where we save the peer IDs providing a certain multihash - datastore *autobatch.Datastore + // datastore is where we save the peer IDs providing a certain multihash. + // The datastore must be thread-safe. + datastore ds.Datastore // gcSkip is a sync map that marks records as to-be-skipped by the garbage // collection process. TODO: this is a sub-optimal pattern. @@ -81,9 +81,6 @@ type ProvidersBackendConfig struct { // requesting peers' side. AddressTTL time.Duration - // BatchSize specifies how many provider record writes should be batched - BatchSize int - // CacheSize specifies the LRU cache size CacheSize int @@ -118,7 +115,6 @@ func DefaultProviderBackendConfig() (*ProvidersBackendConfig, error) { clk: clock.New(), ProvideValidity: 48 * time.Hour, // empirically measured in: https://github.com/plprobelab/network-measurements/blob/master/results/rfm17-provider-record-liveness.md AddressTTL: 24 * time.Hour, // MAGIC - BatchSize: 256, // MAGIC CacheSize: 256, // MAGIC GCInterval: time.Hour, // MAGIC Logger: slog.Default(), diff --git a/v2/backend_provider_test.go b/v2/backend_provider_test.go index ebd3b449..b87cf488 100644 --- a/v2/backend_provider_test.go +++ b/v2/backend_provider_test.go @@ -9,7 +9,6 @@ import ( "github.com/benbjohnson/clock" ds "github.com/ipfs/go-datastore" - syncds "github.com/ipfs/go-datastore/sync" "github.com/libp2p/go-libp2p" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -35,7 +34,7 @@ func newBackendProvider(t testing.TB, cfg *ProvidersBackendConfig) *ProvidersBac } }) - b, err := NewBackendProvider(h.Peerstore(), syncds.MutexWrap(dstore), cfg) + b, err := NewBackendProvider(h.Peerstore(), dstore, cfg) require.NoError(t, err) return b diff --git a/v2/config.go b/v2/config.go index 58ff52da..98136cca 100644 --- a/v2/config.go +++ b/v2/config.go @@ -149,6 +149,8 @@ type Config struct { // construct them individually and register them with the above Backends // map. Note that if you configure the DHT to use [ProtocolIPFS] it is // required to register backends for the ipns, pk, and providers namespaces. + // + // This datastore must be thread-safe. Datastore Datastore // Logger can be used to configure a custom structured logger instance. From 46ff62134d72bf83ef7826d1d3c2d713a70a723d Mon Sep 17 00:00:00 2001 From: Ian Davis <18375+iand@users.noreply.github.com> Date: Thu, 7 Sep 2023 17:10:05 +0100 Subject: [PATCH 26/26] Add AddAddresses method to DHT (#879) * Add AddAddresses method to DHT * Add AddAddresses method to DHT * go mod tidy * Rename Query Skip errors * go fmt coordinator.go * Fix test flakes * Fix lint errors --- v2/coord/behaviour.go | 4 +- v2/coord/conversion.go | 5 + v2/coord/coordinator.go | 60 +++++--- v2/coord/coordinator_test.go | 165 +++++++++++++-------- v2/coord/coretypes.go | 8 +- v2/coord/event.go | 5 +- v2/coord/internal/nettest/layouts.go | 6 +- v2/coord/internal/nettest/topology.go | 4 +- v2/coord/network.go | 26 +++- v2/coord/network_test.go | 5 +- v2/coord/routing.go | 12 +- v2/coord/routing_test.go | 2 +- v2/dht.go | 29 +++- v2/dht_test.go | 62 ++++++++ v2/go.mod | 1 + v2/go.sum | 3 + v2/handlers_test.go | 28 ++-- v2/{coord => }/internal/kadtest/context.go | 0 v2/internal/kadtest/tracing.go | 33 +++++ v2/query_test.go | 74 +++++++++ v2/routing.go | 2 +- v2/stream_test.go | 2 +- 22 files changed, 410 insertions(+), 126 deletions(-) rename v2/{coord => }/internal/kadtest/context.go (100%) create mode 100644 v2/internal/kadtest/tracing.go create mode 100644 v2/query_test.go diff --git a/v2/coord/behaviour.go b/v2/coord/behaviour.go index 2f9f9e33..aa69917f 100644 --- a/v2/coord/behaviour.go +++ b/v2/coord/behaviour.go @@ -56,7 +56,7 @@ type WorkQueue[E BehaviourEvent] struct { func NewWorkQueue[E BehaviourEvent](fn WorkQueueFunc[E]) *WorkQueue[E] { w := &WorkQueue[E]{ - pending: make(chan pendingEvent[E], 16), + pending: make(chan pendingEvent[E], 1), fn: fn, } return w @@ -113,7 +113,7 @@ var _ Notify[BehaviourEvent] = (*Waiter[BehaviourEvent])(nil) func NewWaiter[E BehaviourEvent]() *Waiter[E] { w := &Waiter[E]{ - pending: make(chan WaiterEvent[E], 16), + pending: make(chan WaiterEvent[E], 1), } return w } diff --git a/v2/coord/conversion.go b/v2/coord/conversion.go index dadc0bcc..3a6b0ba8 100644 --- a/v2/coord/conversion.go +++ b/v2/coord/conversion.go @@ -27,6 +27,11 @@ func NodeIDToAddrInfo(id kad.NodeID[KadKey]) peer.AddrInfo { } } +// AddrInfoToNodeID converts a peer.AddrInfo to a kad.NodeID. +func AddrInfoToNodeID(ai peer.AddrInfo) kad.NodeID[KadKey] { + return kadt.PeerID(ai.ID) +} + // SliceOfNodeInfoToSliceOfAddrInfo converts a kad.NodeInfo to a peer.AddrInfo. // This function will panic if any info.ID() does not return a kadt.PeerID func SliceOfNodeInfoToSliceOfAddrInfo(infos []kad.NodeInfo[KadKey, ma.Multiaddr]) []peer.AddrInfo { diff --git a/v2/coord/coordinator.go b/v2/coord/coordinator.go index c594b206..579701a4 100644 --- a/v2/coord/coordinator.go +++ b/v2/coord/coordinator.go @@ -29,6 +29,9 @@ type Coordinator struct { // self is the peer id of the system the dht is running on self peer.ID + // cancel is used to cancel all running goroutines when the coordinator is cleaning up + cancel context.CancelFunc + // cfg is a copy of the optional configuration supplied to the dht cfg CoordinatorConfig @@ -50,8 +53,6 @@ type Coordinator struct { queryBehaviour Behaviour[BehaviourEvent, BehaviourEvent] } -const DefaultChanqueueCapacity = 1024 - type CoordinatorConfig struct { PeerstoreTTL time.Duration // duration for which a peer is kept in the peerstore @@ -120,14 +121,14 @@ func (cfg *CoordinatorConfig) Validate() error { return nil } -func DefaultConfig() (*CoordinatorConfig, error) { +func DefaultCoordinatorConfig() (*CoordinatorConfig, error) { telemetry, err := tele.NewWithGlobalProviders() if err != nil { return nil, fmt.Errorf("new telemetry: %w", err) } return &CoordinatorConfig{ - Clock: clock.New(), // use standard time + Clock: clock.New(), PeerstoreTTL: 10 * time.Minute, QueryConcurrency: 3, QueryTimeout: 5 * time.Minute, @@ -140,7 +141,7 @@ func DefaultConfig() (*CoordinatorConfig, error) { func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, kad.NodeID[KadKey]], cfg *CoordinatorConfig) (*Coordinator, error) { if cfg == nil { - c, err := DefaultConfig() + c, err := DefaultCoordinatorConfig() if err != nil { return nil, fmt.Errorf("default config: %w", err) } @@ -200,25 +201,34 @@ func NewCoordinator(self peer.ID, rtr Router, rt routing.RoutingTableCpl[KadKey, routingBehaviour := NewRoutingBehaviour(self, bootstrap, include, probe, cfg.Logger, cfg.Tele.Tracer) - networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger) + networkBehaviour := NewNetworkBehaviour(rtr, cfg.Logger, cfg.Tele.Tracer) + + ctx, cancel := context.WithCancel(context.Background()) d := &Coordinator{ - self: self, - cfg: *cfg, - rtr: rtr, - rt: rt, + self: self, + cfg: *cfg, + rtr: rtr, + rt: rt, + cancel: cancel, networkBehaviour: networkBehaviour, routingBehaviour: routingBehaviour, queryBehaviour: queryBehaviour, - routingNotifications: make(chan RoutingNotification, 20), + routingNotifications: make(chan RoutingNotification, 20), // buffered mainly to allow tests to read the channel after running an operation } - go d.eventLoop() + go d.eventLoop(ctx) return d, nil } +// Close cleans up all resources associated with this Coordinator. +func (c *Coordinator) Close() error { + c.cancel() + return nil +} + func (c *Coordinator) ID() peer.ID { return c.self } @@ -237,13 +247,16 @@ func (c *Coordinator) RoutingNotifications() <-chan RoutingNotification { return c.routingNotifications } -func (c *Coordinator) eventLoop() { - ctx := context.Background() - +func (c *Coordinator) eventLoop(ctx context.Context) { + ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.eventLoop") + defer span.End() for { var ev BehaviourEvent var ok bool select { + case <-ctx.Done(): + // coordinator is closing + return case <-c.networkBehaviour.Ready(): ev, ok = c.networkBehaviour.Perform(ctx) case <-c.routingBehaviour.Ready(): @@ -339,7 +352,7 @@ func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q } waiter := NewWaiter[BehaviourEvent]() - queryID := query.QueryID("foo") + queryID := query.QueryID("foo") // TODO: choose query ID cmd := &EventStartQuery{ QueryID: queryID, @@ -375,12 +388,12 @@ func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q } err = fn(ctx, nh, lastStats) - if errors.Is(err, SkipRemaining) { + if errors.Is(err, ErrSkipRemaining) { // done c.queryBehaviour.Notify(ctx, &EventStopQuery{QueryID: queryID}) return lastStats, nil } - if errors.Is(err, SkipNode) { + if errors.Is(err, ErrSkipNode) { // TODO: don't add closer nodes from this node break } @@ -405,17 +418,20 @@ func (c *Coordinator) Query(ctx context.Context, target KadKey, fn QueryFunc) (Q // AddNodes suggests new DHT nodes and their associated addresses to be added to the routing table. // If the routing table is updated as a result of this operation an EventRoutingUpdated notification // is emitted on the routing notification channel. -func (c *Coordinator) AddNodes(ctx context.Context, infos []peer.AddrInfo) error { +func (c *Coordinator) AddNodes(ctx context.Context, ais []peer.AddrInfo, ttl time.Duration) error { ctx, span := c.cfg.Tele.Tracer.Start(ctx, "Coordinator.AddNodes") defer span.End() - for _, info := range infos { - if info.ID == c.self { + for _, ai := range ais { + if ai.ID == c.self { // skip self continue } + // TODO: apply address filter + c.routingBehaviour.Notify(ctx, &EventAddAddrInfo{ - NodeInfo: info, + NodeInfo: ai, + TTL: ttl, }) } diff --git a/v2/coord/coordinator_test.go b/v2/coord/coordinator_test.go index 992337df..235828cc 100644 --- a/v2/coord/coordinator_test.go +++ b/v2/coord/coordinator_test.go @@ -5,46 +5,95 @@ import ( "fmt" "log" "reflect" + "sync" "testing" "time" "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" + "github.com/libp2p/go-libp2p-kad-dht/v2/tele" ) const peerstoreTTL = 10 * time.Minute -// expectEventType selects on the event channel until an event of the expected type is sent. -func expectEventType(t *testing.T, ctx context.Context, events <-chan RoutingNotification, expected RoutingNotification) (RoutingNotification, error) { +type notificationWatcher struct { + mu sync.Mutex + buffered []RoutingNotification + signal chan struct{} +} + +func (w *notificationWatcher) Watch(t *testing.T, ctx context.Context, ch <-chan RoutingNotification) { t.Helper() + w.signal = make(chan struct{}, 1) + go func() { + for { + select { + case <-ctx.Done(): + return + case ev := <-ch: + w.mu.Lock() + t.Logf("buffered routing notification: %T\n", ev) + w.buffered = append(w.buffered, ev) + select { + case w.signal <- struct{}{}: + default: + } + w.mu.Unlock() + + } + } + }() +} + +func (w *notificationWatcher) Expect(ctx context.Context, expected RoutingNotification) (RoutingNotification, error) { for { - select { - case ev := <-events: - t.Logf("saw event: %T\n", ev) + // look in buffered events + w.mu.Lock() + for i, ev := range w.buffered { if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + // remove first from buffer and return it + w.buffered = w.buffered[:i+copy(w.buffered[i:], w.buffered[i+1:])] + w.mu.Unlock() return ev, nil } + } + w.mu.Unlock() + + // wait to be signaled that there is a new event + select { case <-ctx.Done(): return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) + case <-w.signal: } } } +// TracingTelemetry may be used to create a Telemetry that traces a test +func TracingTelemetry(t *testing.T) *tele.Telemetry { + telemetry, err := tele.New(otel.GetMeterProvider(), kadtest.JaegerTracerProvider(t)) + if err != nil { + t.Fatalf("unexpected error creating telemetry: %v", err) + } + + return telemetry +} + func TestConfigValidate(t *testing.T) { t.Run("default is valid", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) require.NoError(t, cfg.Validate()) }) t.Run("clock is not nil", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.Clock = nil @@ -52,7 +101,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("query concurrency positive", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.QueryConcurrency = 0 @@ -62,7 +111,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("query timeout positive", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.QueryTimeout = 0 @@ -72,7 +121,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("request concurrency positive", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.RequestConcurrency = 0 @@ -82,7 +131,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("request timeout positive", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.RequestTimeout = 0 @@ -92,7 +141,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("logger not nil", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.Logger = nil @@ -100,7 +149,7 @@ func TestConfigValidate(t *testing.T) { }) t.Run("telemetry not nil", func(t *testing.T) { - cfg, err := DefaultConfig() + cfg, err := DefaultCoordinatorConfig() require.NoError(t, err) cfg.Tele = nil @@ -115,7 +164,7 @@ func TestExhaustiveQuery(t *testing.T) { clk := clock.NewMock() _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultConfig() + ccfg, err := DefaultCoordinatorConfig() require.NoError(t, err) ccfg.Clock = clk @@ -156,7 +205,7 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultConfig() + ccfg, err := DefaultCoordinatorConfig() require.NoError(t, err) ccfg.Clock = clk @@ -171,17 +220,8 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { log.Fatalf("unexpected error creating coordinator: %v", err) } - buffer := make(chan RoutingNotification, 5) - go func() { - for { - select { - case <-ctx.Done(): - return - case ev := <-c.RoutingNotifications(): - buffer <- ev - } - } - }() + w := new(notificationWatcher) + w.Watch(t, ctx, c.RoutingNotifications()) qfn := func(ctx context.Context, node Node, stats QueryStats) error { return nil @@ -195,22 +235,29 @@ func TestRoutingUpdatedEventEmittedForCloserNodes(t *testing.T) { // the query run by the dht should have received a response from nodes[1] with closer nodes // nodes[0] and nodes[2] which should trigger a routing table update since nodes[2] was // not in the dht's routing table. - ev, err := expectEventType(t, ctx, buffer, &EventRoutingUpdated{}) - require.NoError(t, err) - - tev := ev.(*EventRoutingUpdated) - require.Equal(t, nodes[2].NodeInfo.ID, NodeIDToPeerID(tev.NodeInfo.ID())) + // the query then continues and should have received a response from nodes[2] with closer nodes + // nodes[1] and nodes[3] which should trigger a routing table update since nodes[3] was + // not in the dht's routing table. // no EventRoutingUpdated is sent for the self node - // the query continues and should have received a response from nodes[2] with closer nodes - // nodes[1] and nodes[3] which should trigger a routing table update since nodes[3] was - // not in the dht's routing table. - ev, err = expectEventType(t, ctx, buffer, &EventRoutingUpdated{}) + // However the order in which these events are emitted may vary depending on timing. + + ev1, err := w.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) + tev1 := ev1.(*EventRoutingUpdated) - tev = ev.(*EventRoutingUpdated) - require.Equal(t, nodes[3].NodeInfo.ID, NodeIDToPeerID(tev.NodeInfo.ID())) + ev2, err := w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + tev2 := ev2.(*EventRoutingUpdated) + + if tev1.NodeInfo.ID == nodes[2].NodeInfo.ID { + require.Equal(t, nodes[3].NodeInfo.ID, tev2.NodeInfo.ID) + } else if tev2.NodeInfo.ID == nodes[2].NodeInfo.ID { + require.Equal(t, nodes[3].NodeInfo.ID, tev1.NodeInfo.ID) + } else { + require.Failf(t, "did not see routing updated event for %s", nodes[2].NodeInfo.ID.String()) + } } func TestBootstrap(t *testing.T) { @@ -221,7 +268,7 @@ func TestBootstrap(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultConfig() + ccfg, err := DefaultCoordinatorConfig() require.NoError(t, err) ccfg.Clock = clk @@ -231,24 +278,15 @@ func TestBootstrap(t *testing.T) { d, err := NewCoordinator(self, nodes[0].Router, nodes[0].RoutingTable, ccfg) require.NoError(t, err) - buffer := make(chan RoutingNotification, 5) - go func() { - for { - select { - case <-ctx.Done(): - return - case ev := <-d.RoutingNotifications(): - buffer <- ev - } - } - }() + w := new(notificationWatcher) + w.Watch(t, ctx, d.RoutingNotifications()) seeds := []peer.ID{nodes[1].NodeInfo.ID} err = d.Bootstrap(ctx, seeds) require.NoError(t, err) // the query run by the dht should have completed - ev, err := expectEventType(t, ctx, buffer, &EventBootstrapFinished{}) + ev, err := w.Expect(ctx, &EventBootstrapFinished{}) require.NoError(t, err) require.IsType(t, &EventBootstrapFinished{}, ev) @@ -257,15 +295,21 @@ func TestBootstrap(t *testing.T) { require.Equal(t, 3, tevf.Stats.Success) require.Equal(t, 0, tevf.Stats.Failure) - // DHT should now have node1 in its routing table + _, err = w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + + _, err = w.Expect(ctx, &EventRoutingUpdated{}) + require.NoError(t, err) + + // coordinator will have node1 in its routing table _, err = d.GetNode(ctx, nodes[1].NodeInfo.ID) require.NoError(t, err) - // DHT should now have node2 in its routing table + // coordinator should now have node2 in its routing table _, err = d.GetNode(ctx, nodes[2].NodeInfo.ID) require.NoError(t, err) - // DHT should now have node3 in its routing table + // coordinator should now have node3 in its routing table _, err = d.GetNode(ctx, nodes[3].NodeInfo.ID) require.NoError(t, err) } @@ -278,7 +322,7 @@ func TestIncludeNode(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - ccfg, err := DefaultConfig() + ccfg, err := DefaultCoordinatorConfig() require.NoError(t, err) ccfg.Clock = clk @@ -296,20 +340,21 @@ func TestIncludeNode(t *testing.T) { _, err = d.GetNode(ctx, candidate.ID) require.ErrorIs(t, err, ErrNodeNotFound) - events := d.RoutingNotifications() + w := new(notificationWatcher) + w.Watch(t, ctx, d.RoutingNotifications()) // inject a new node into the dht's includeEvents queue - err = d.AddNodes(ctx, []peer.AddrInfo{candidate}) + err = d.AddNodes(ctx, []peer.AddrInfo{candidate}, time.Minute) require.NoError(t, err) // the include state machine runs in the background and eventually should add the node to routing table - ev, err := expectEventType(t, ctx, events, &EventRoutingUpdated{}) + ev, err := w.Expect(ctx, &EventRoutingUpdated{}) require.NoError(t, err) tev := ev.(*EventRoutingUpdated) - require.Equal(t, candidate.ID, NodeIDToPeerID(tev.NodeInfo.ID())) + require.Equal(t, candidate.ID, tev.NodeInfo.ID) - // the routing table should not contain the node yet + // the routing table should now contain the node _, err = d.GetNode(ctx, candidate.ID) require.NoError(t, err) } diff --git a/v2/coord/coretypes.go b/v2/coord/coretypes.go index 660b503a..fe72d90f 100644 --- a/v2/coord/coretypes.go +++ b/v2/coord/coretypes.go @@ -70,11 +70,11 @@ type QueryStats struct { } var ( - // SkipNode is used as a return value from a QueryFunc to indicate that the node is to be skipped. - SkipNode = errors.New("skip node") //nolint:all + // ErrSkipNode is used as a return value from a QueryFunc to indicate that the node is to be skipped. + ErrSkipNode = errors.New("skip node") - // SkipRemaining is used as a return value a QueryFunc to indicate that all remaining nodes are to be skipped. - SkipRemaining = errors.New("skip remaining nodes") //nolint:all + // ErrSkipRemaining is used as a return value a QueryFunc to indicate that all remaining nodes are to be skipped. + ErrSkipRemaining = errors.New("skip remaining nodes") ) // Router its a work in progress diff --git a/v2/coord/event.go b/v2/coord/event.go index f4ecae4d..4d5790f7 100644 --- a/v2/coord/event.go +++ b/v2/coord/event.go @@ -1,6 +1,8 @@ package coord import ( + "time" + "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/plprobelab/go-kademlia/kad" @@ -86,6 +88,7 @@ func (*EventStopQuery) queryCommand() {} type EventAddAddrInfo struct { NodeInfo peer.AddrInfo + TTL time.Duration } func (*EventAddAddrInfo) behaviourEvent() {} @@ -133,7 +136,7 @@ func (*EventQueryFinished) behaviourEvent() {} // EventRoutingUpdated is emitted by the coordinator when a new node has been verified and added to the routing table. type EventRoutingUpdated struct { - NodeInfo kad.NodeInfo[KadKey, ma.Multiaddr] + NodeInfo peer.AddrInfo } func (*EventRoutingUpdated) behaviourEvent() {} diff --git a/v2/coord/internal/nettest/layouts.go b/v2/coord/internal/nettest/layouts.go index 7aa548bf..f5236dc1 100644 --- a/v2/coord/internal/nettest/layouts.go +++ b/v2/coord/internal/nettest/layouts.go @@ -19,10 +19,10 @@ import ( // The topology is not a ring: nodes[0] only has nodes[1] in its table and nodes[n-1] only has nodes[n-2] in its table. // nodes[1] has nodes[0] and nodes[2] in its routing table. // If n > 2 then the first and last nodes will not have one another in their routing tables. -func LinearTopology(n int, clk *clock.Mock) (*Topology, []*Node, error) { +func LinearTopology(n int, clk clock.Clock) (*Topology, []*Node, error) { nodes := make([]*Node, n) - top := INewTopology(clk) + top := NewTopology(clk) for i := range nodes { a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)) @@ -38,7 +38,7 @@ func LinearTopology(n int, clk *clock.Mock) (*Topology, []*Node, error) { nodes[i] = &Node{ NodeInfo: ai, Router: NewRouter(ai.ID, top), - RoutingTable: simplert.New[key.Key256, kad.NodeID[key.Key256]](kadt.PeerID(ai.ID), 2), + RoutingTable: simplert.New[key.Key256, kad.NodeID[key.Key256]](kadt.PeerID(ai.ID), 20), } } diff --git a/v2/coord/internal/nettest/topology.go b/v2/coord/internal/nettest/topology.go index c275e1ad..c7aae8d5 100644 --- a/v2/coord/internal/nettest/topology.go +++ b/v2/coord/internal/nettest/topology.go @@ -21,14 +21,14 @@ type Node struct { } type Topology struct { - clk *clock.Mock + clk clock.Clock links map[string]Link nodes []*Node nodeIndex map[peer.ID]*Node routers map[peer.ID]*Router } -func INewTopology(clk *clock.Mock) *Topology { +func NewTopology(clk clock.Clock) *Topology { return &Topology{ clk: clk, links: make(map[string]Link), diff --git a/v2/coord/network.go b/v2/coord/network.go index bf241b5d..eeb05402 100644 --- a/v2/coord/network.go +++ b/v2/coord/network.go @@ -10,6 +10,7 @@ import ( "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/query" + "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" @@ -27,20 +28,25 @@ type NetworkBehaviour struct { ready chan struct{} logger *slog.Logger + tracer trace.Tracer } -func NewNetworkBehaviour(rtr Router, logger *slog.Logger) *NetworkBehaviour { +func NewNetworkBehaviour(rtr Router, logger *slog.Logger, tracer trace.Tracer) *NetworkBehaviour { b := &NetworkBehaviour{ rtr: rtr, nodeHandlers: make(map[peer.ID]*NodeHandler), ready: make(chan struct{}, 1), logger: logger, + tracer: tracer, } return b } func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { + ctx, span := b.tracer.Start(ctx, "NetworkBehaviour.Notify") + defer span.End() + b.pendingMu.Lock() defer b.pendingMu.Unlock() @@ -49,7 +55,7 @@ func (b *NetworkBehaviour) Notify(ctx context.Context, ev BehaviourEvent) { b.nodeHandlersMu.Lock() nh, ok := b.nodeHandlers[ev.To.ID] if !ok { - nh = NewNodeHandler(ev.To, b.rtr, b.logger) + nh = NewNodeHandler(ev.To, b.rtr, b.logger, b.tracer) b.nodeHandlers[ev.To.ID] = nh } b.nodeHandlersMu.Unlock() @@ -71,6 +77,8 @@ func (b *NetworkBehaviour) Ready() <-chan struct{} { } func (b *NetworkBehaviour) Perform(ctx context.Context) (BehaviourEvent, bool) { + _, span := b.tracer.Start(ctx, "NetworkBehaviour.Perform") + defer span.End() // No inbound work can be done until Perform is complete b.pendingMu.Lock() defer b.pendingMu.Unlock() @@ -100,7 +108,7 @@ func (b *NetworkBehaviour) getNodeHandler(ctx context.Context, id peer.ID) (*Nod if err != nil { return nil, err } - nh = NewNodeHandler(info, b.rtr, b.logger) + nh = NewNodeHandler(info, b.rtr, b.logger, b.tracer) b.nodeHandlers[id] = nh } b.nodeHandlersMu.Unlock() @@ -112,13 +120,15 @@ type NodeHandler struct { rtr Router queue *WorkQueue[NodeHandlerRequest] logger *slog.Logger + tracer trace.Tracer } -func NewNodeHandler(self peer.AddrInfo, rtr Router, logger *slog.Logger) *NodeHandler { +func NewNodeHandler(self peer.AddrInfo, rtr Router, logger *slog.Logger, tracer trace.Tracer) *NodeHandler { h := &NodeHandler{ self: self, rtr: rtr, logger: logger, + tracer: tracer, } h.queue = NewWorkQueue(h.send) @@ -127,6 +137,8 @@ func NewNodeHandler(self peer.AddrInfo, rtr Router, logger *slog.Logger) *NodeHa } func (h *NodeHandler) Notify(ctx context.Context, ev NodeHandlerRequest) { + ctx, span := h.tracer.Start(ctx, "NodeHandler.Notify") + defer span.End() h.queue.Enqueue(ctx, ev) } @@ -142,7 +154,7 @@ func (h *NodeHandler) send(ctx context.Context, ev NodeHandlerRequest) bool { QueryID: cmd.QueryID, To: h.self, Target: cmd.Target, - Err: fmt.Errorf("send: %w", err), + Err: fmt.Errorf("NodeHandler: %w", err), }) return false } @@ -171,6 +183,8 @@ func (h *NodeHandler) Addresses() []ma.Multiaddr { // GetClosestNodes requests the n closest nodes to the key from the node's local routing table. // The node may return fewer nodes than requested. func (h *NodeHandler) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]Node, error) { + ctx, span := h.tracer.Start(ctx, "NodeHandler.GetClosestNodes") + defer span.End() w := NewWaiter[BehaviourEvent]() ev := &EventOutboundGetCloserNodes{ @@ -192,7 +206,7 @@ func (h *NodeHandler) GetClosestNodes(ctx context.Context, k KadKey, n int) ([]N nodes := make([]Node, 0, len(res.CloserNodes)) for _, info := range res.CloserNodes { // TODO use a global registry of node handlers - nodes = append(nodes, NewNodeHandler(info, h.rtr, h.logger)) + nodes = append(nodes, NewNodeHandler(info, h.rtr, h.logger, h.tracer)) n-- if n == 0 { break diff --git a/v2/coord/network_test.go b/v2/coord/network_test.go index a596edfb..ad0f3146 100644 --- a/v2/coord/network_test.go +++ b/v2/coord/network_test.go @@ -5,10 +5,11 @@ import ( "github.com/benbjohnson/clock" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/trace" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" ) @@ -21,7 +22,7 @@ func TestGetClosestNodes(t *testing.T) { _, nodes, err := nettest.LinearTopology(4, clk) require.NoError(t, err) - h := NewNodeHandler(nodes[1].NodeInfo, nodes[1].Router, slog.Default()) + h := NewNodeHandler(nodes[1].NodeInfo, nodes[1].Router, slog.Default(), trace.NewNoopTracerProvider().Tracer("")) // node 1 has node 2 in its routing table so it will return it along with node 0 found, err := h.GetClosestNodes(ctx, kadt.PeerID(nodes[2].NodeInfo.ID).Key(), 2) diff --git a/v2/coord/routing.go b/v2/coord/routing.go index a78a1cb0..488ac689 100644 --- a/v2/coord/routing.go +++ b/v2/coord/routing.go @@ -82,6 +82,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { if ev.NodeInfo.ID == r.self { break } + // TODO: apply ttl cmd := &routing.EventIncludeAddCandidate[KadKey, ma.Multiaddr]{ NodeInfo: kadt.AddrInfo{Info: ev.NodeInfo}, } @@ -94,7 +95,7 @@ func (r *RoutingBehaviour) notify(ctx context.Context, ev BehaviourEvent) { case *EventRoutingUpdated: span.SetAttributes(attribute.String("event", "EventRoutingUpdated")) cmd := &routing.EventProbeAdd[KadKey]{ - NodeID: ev.NodeInfo.ID(), + NodeID: AddrInfoToNodeID(ev.NodeInfo), } // attempt to advance the probe state machine next, ok := r.advanceProbe(ctx, cmd) @@ -280,9 +281,11 @@ func (r *RoutingBehaviour) advanceBootstrap(ctx context.Context, ev routing.Boot func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.IncludeEvent) (BehaviourEvent, bool) { ctx, span := r.tracer.Start(ctx, "RoutingBehaviour.advanceInclude") defer span.End() + istate := r.include.Advance(ctx, ev) switch st := istate.(type) { case *routing.StateIncludeFindNodeMessage[KadKey, ma.Multiaddr]: + span.SetAttributes(attribute.String("out_event", "EventOutboundGetCloserNodes")) // include wants to send a find node message to a node return &EventOutboundGetCloserNodes{ QueryID: "include", @@ -296,12 +299,13 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ // notify other routing state machines that there is a new node in the routing table r.notify(ctx, &EventRoutingUpdated{ - NodeInfo: st.NodeInfo, + NodeInfo: NodeInfoToAddrInfo(st.NodeInfo), }) // return the event to notify outwards too + span.SetAttributes(attribute.String("out_event", "EventRoutingUpdated")) return &EventRoutingUpdated{ - NodeInfo: st.NodeInfo, + NodeInfo: NodeInfoToAddrInfo(st.NodeInfo), }, true case *routing.StateIncludeWaitingAtCapacity: // nothing to do except wait for message response or timeout @@ -310,7 +314,7 @@ func (r *RoutingBehaviour) advanceInclude(ctx context.Context, ev routing.Includ case *routing.StateIncludeWaitingFull: // nothing to do except wait for message response or timeout case *routing.StateIncludeIdle: - // nothing to do except wait for message response or timeout + // nothing to do except wait for new nodes to be added to queue default: panic(fmt.Sprintf("unexpected include state: %T", st)) } diff --git a/v2/coord/routing_test.go b/v2/coord/routing_test.go index ecaeef2b..e1342c7c 100644 --- a/v2/coord/routing_test.go +++ b/v2/coord/routing_test.go @@ -16,8 +16,8 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/exp/slog" - "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/coord/internal/nettest" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" "github.com/libp2p/go-libp2p-kad-dht/v2/kadt" "github.com/libp2p/go-libp2p-kad-dht/v2/pb" ) diff --git a/v2/dht.go b/v2/dht.go index 4a2b3cf2..31787ae5 100644 --- a/v2/dht.go +++ b/v2/dht.go @@ -1,15 +1,18 @@ package dht import ( + "context" "crypto/sha256" "fmt" "io" "sync" + "time" "github.com/ipfs/go-datastore/trace" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" "github.com/plprobelab/go-kademlia/kad" "github.com/plprobelab/go-kademlia/key" "github.com/plprobelab/go-kademlia/routing" @@ -63,8 +66,9 @@ type DHT struct { func New(h host.Host, cfg *Config) (*DHT, error) { var err error - // check if the configuration is valid - if err = cfg.Validate(); err != nil { + if cfg == nil { + cfg = DefaultConfig() + } else if err = cfg.Validate(); err != nil { return nil, fmt.Errorf("validate DHT config: %w", err) } @@ -148,7 +152,13 @@ func New(h host.Host, cfg *Config) (*DHT, error) { } // instantiate a new Kademlia DHT coordinator. - d.kad, err = coord.NewCoordinator(d.host.ID(), &Router{host: h}, d.rt, nil) + coordCfg, err := coord.DefaultCoordinatorConfig() + if err != nil { + return nil, fmt.Errorf("new coordinator config: %w", err) + } + coordCfg.Tele = d.tele + + d.kad, err = coord.NewCoordinator(d.host.ID(), &Router{host: h}, d.rt, coordCfg) if err != nil { return nil, fmt.Errorf("new coordinator: %w", err) } @@ -182,6 +192,10 @@ func (d *DHT) Close() error { d.log.With("err", err).Debug("failed closing event bus subscription") } + if err := d.kad.Close(); err != nil { + d.log.With("err", err).Debug("failed closing coordinator") + } + for ns, b := range d.backends { closer, ok := b.(io.Closer) if !ok { @@ -292,6 +306,15 @@ func (d *DHT) logErr(err error, msg string) { d.log.Warn(msg, "err", err.Error()) } +// AddAddresses suggests peers and their associated addresses to be added to the routing table. +// Addresses will be added to the peerstore with the supplied time to live. +func (d *DHT) AddAddresses(ctx context.Context, ais []peer.AddrInfo, ttl time.Duration) error { + ctx, span := d.tele.Tracer.Start(ctx, "DHT.AddAddresses") + defer span.End() + + return d.kad.AddNodes(ctx, ais, ttl) +} + // newSHA256Key returns a [key.Key256] that conforms to the [kad.Key] interface by // SHA256 hashing the given bytes and wrapping them in a [key.Key256]. func newSHA256Key(data []byte) key.Key256 { diff --git a/v2/dht_test.go b/v2/dht_test.go index 832c8ed1..b42b77b5 100644 --- a/v2/dht_test.go +++ b/v2/dht_test.go @@ -1,10 +1,19 @@ package dht import ( + "context" + "fmt" + "reflect" "testing" + "time" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/libp2p/go-libp2p-kad-dht/v2/coord" + "github.com/libp2p/go-libp2p-kad-dht/v2/internal/kadtest" ) func TestNew(t *testing.T) { @@ -65,3 +74,56 @@ func TestNew(t *testing.T) { }) } } + +// expectEventType selects on the event channel until an event of the expected type is sent. +func expectEventType(t *testing.T, ctx context.Context, events <-chan coord.RoutingNotification, expected coord.RoutingNotification) (coord.RoutingNotification, error) { + t.Helper() + for { + select { + case ev := <-events: + t.Logf("saw event: %T\n", ev) + if reflect.TypeOf(ev) == reflect.TypeOf(expected) { + return ev, nil + } + case <-ctx.Done(): + return nil, fmt.Errorf("test deadline exceeded while waiting for event %T", expected) + } + } +} + +func TestAddAddresses(t *testing.T) { + ctx, cancel := kadtest.CtxShort(t) + defer cancel() + + localCfg := DefaultConfig() + + local := newClientDht(t, localCfg) + + remote := newServerDht(t, nil) + + // Populate entries in remote's routing table so it passes a connectivity check + fillRoutingTable(t, remote, 1) + + // local routing table should not contain the node + _, err := local.kad.GetNode(ctx, remote.host.ID()) + require.ErrorIs(t, err, coord.ErrNodeNotFound) + + remoteAddrInfo := peer.AddrInfo{ + ID: remote.host.ID(), + Addrs: remote.host.Addrs(), + } + require.NotEmpty(t, remoteAddrInfo.ID) + require.NotEmpty(t, remoteAddrInfo.Addrs) + + // Add remote's addresss to the local dht + err = local.AddAddresses(ctx, []peer.AddrInfo{remoteAddrInfo}, time.Minute) + require.NoError(t, err) + + // the include state machine runs in the background and eventually should add the node to routing table + _, err = expectEventType(t, ctx, local.kad.RoutingNotifications(), &coord.EventRoutingUpdated{}) + require.NoError(t, err) + + // the routing table should now contain the node + _, err = local.kad.GetNode(ctx, remote.host.ID()) + require.NoError(t, err) +} diff --git a/v2/go.mod b/v2/go.mod index 85b481ae..1473c15b 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -18,6 +18,7 @@ require ( github.com/plprobelab/go-kademlia v0.0.0-20230901130940-286ab4ceca60 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.17.0 + go.opentelemetry.io/otel/exporters/jaeger v1.16.0 go.opentelemetry.io/otel/metric v1.17.0 go.opentelemetry.io/otel/sdk v1.17.0 go.opentelemetry.io/otel/sdk/metric v0.40.0 diff --git a/v2/go.sum b/v2/go.sum index d392ea5d..bc586fb5 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -325,6 +325,7 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -347,6 +348,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= +go.opentelemetry.io/otel/exporters/jaeger v1.16.0 h1:YhxxmXZ011C0aDZKoNw+juVWAmEfv/0W2XBOv9aHTaA= +go.opentelemetry.io/otel/exporters/jaeger v1.16.0/go.mod h1:grYbBo/5afWlPpdPZYhyn78Bk04hnvxn2+hvxQhKIQM= go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= diff --git a/v2/handlers_test.go b/v2/handlers_test.go index 58d613de..115b8bc1 100644 --- a/v2/handlers_test.go +++ b/v2/handlers_test.go @@ -75,11 +75,11 @@ func newIdentity(t testing.TB) (peer.ID, crypto.PrivKey) { return id, priv } -func fillRoutingTable(t testing.TB, d *DHT) { +// fillRoutingTable populates d's routing table and peerstore with n random peers and addresses +func fillRoutingTable(t testing.TB, d *DHT, n int) { t.Helper() - // 250 is a common number of peers to have in the routing table - for i := 0; i < 250; i++ { + for i := 0; i < n; i++ { // generate peer ID pid := newPeerID(t) @@ -865,7 +865,7 @@ func TestDHT_handlePutValue_moved_from_v1_atomic_operation(t *testing.T) { func BenchmarkDHT_handleGetValue(b *testing.B) { d := newTestDHT(b) - fillRoutingTable(b, d) + fillRoutingTable(b, d, 250) rbe, ok := d.backends[namespaceIPNS].(*RecordBackend) require.True(b, ok) @@ -909,7 +909,7 @@ func BenchmarkDHT_handleGetValue(b *testing.B) { func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) remote, priv := newIdentity(t) @@ -944,7 +944,7 @@ func TestDHT_handleGetValue_happy_path_ipns_record(t *testing.T) { func TestDHT_handleGetValue_record_not_found(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) for _, ns := range []string{namespaceIPNS, namespacePublicKey} { t.Run(ns, func(t *testing.T) { @@ -968,7 +968,7 @@ func TestDHT_handleGetValue_record_not_found(t *testing.T) { func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) for _, ns := range []string{namespaceIPNS, namespacePublicKey} { t.Run(ns, func(t *testing.T) { @@ -1006,7 +1006,7 @@ func TestDHT_handleGetValue_corrupt_record_in_datastore(t *testing.T) { func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) remote, priv := newIdentity(t) @@ -1048,7 +1048,7 @@ func TestDHT_handleGetValue_ipns_max_age_exceeded_in_datastore(t *testing.T) { func TestDHT_handleGetValue_does_not_validate_stored_record(t *testing.T) { d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) rbe, err := typedBackend[*RecordBackend](d, namespaceIPNS) require.NoError(t, err) @@ -1131,7 +1131,7 @@ func TestDHT_handleGetValue_supports_providers(t *testing.T) { p := newAddrInfo(t) key := []byte("random-key") - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) // add to addresses peerstore d.host.Peerstore().AddAddrs(p.ID, p.Addrs, time.Hour) @@ -1358,7 +1358,7 @@ func BenchmarkDHT_handleGetProviders(b *testing.B) { ctx := context.Background() d := newTestDHT(b) - fillRoutingTable(b, d) + fillRoutingTable(b, d, 250) be, ok := d.backends[namespaceIPNS].(*RecordBackend) require.True(b, ok) @@ -1404,7 +1404,7 @@ func TestDHT_handleGetProviders_happy_path(t *testing.T) { ctx := context.Background() d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) key := []byte("random-key") @@ -1455,7 +1455,7 @@ func TestDHT_handleGetProviders_do_not_return_expired_records(t *testing.T) { ctx := context.Background() d := newTestDHT(t) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) key := []byte("random-key") @@ -1514,7 +1514,7 @@ func TestDHT_handleGetProviders_only_serve_filtered_addresses(t *testing.T) { d := newTestDHTWithConfig(t, cfg) - fillRoutingTable(t, d) + fillRoutingTable(t, d, 250) key := []byte("random-key") diff --git a/v2/coord/internal/kadtest/context.go b/v2/internal/kadtest/context.go similarity index 100% rename from v2/coord/internal/kadtest/context.go rename to v2/internal/kadtest/context.go diff --git a/v2/internal/kadtest/tracing.go b/v2/internal/kadtest/tracing.go new file mode 100644 index 00000000..dc7c82c8 --- /dev/null +++ b/v2/internal/kadtest/tracing.go @@ -0,0 +1,33 @@ +package kadtest + +import ( + "context" + "fmt" + "testing" + + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/trace" +) + +// JaegerTracerProvider creates a tracer provider that exports traces to a Jaeger instance running +// on localhost on port 14268 +func JaegerTracerProvider(t *testing.T) *trace.TracerProvider { + t.Helper() + + traceHost := "127.0.0.1" + tracePort := 14268 + + endpoint := fmt.Sprintf("http://%s:%d/api/traces", traceHost, tracePort) + exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(endpoint))) + if err != nil { + t.Fatalf("failed to create jaeger exporter: %v", err) + } + + tp := trace.NewTracerProvider(trace.WithBatcher(exp)) + + t.Cleanup(func() { + tp.Shutdown(context.Background()) + }) + + return tp +} diff --git a/v2/query_test.go b/v2/query_test.go new file mode 100644 index 00000000..29fa004a --- /dev/null +++ b/v2/query_test.go @@ -0,0 +1,74 @@ +package dht + +import ( + "testing" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/stretchr/testify/require" +) + +func newServerHost(t testing.TB) host.Host { + listenAddr := libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0") + + h, err := libp2p.New(listenAddr) + require.NoError(t, err) + + t.Cleanup(func() { + if err = h.Close(); err != nil { + t.Logf("unexpected error when closing host: %s", err) + } + }) + + return h +} + +func newClientHost(t testing.TB) host.Host { + h, err := libp2p.New(libp2p.NoListenAddrs) + require.NoError(t, err) + + t.Cleanup(func() { + if err = h.Close(); err != nil { + t.Logf("unexpected error when closing host: %s", err) + } + }) + + return h +} + +func newServerDht(t testing.TB, cfg *Config) *DHT { + h := newServerHost(t) + + if cfg == nil { + cfg = DefaultConfig() + } + cfg.Mode = ModeOptServer + + d, err := New(h, cfg) + require.NoError(t, err) + + t.Cleanup(func() { + if err = d.Close(); err != nil { + t.Logf("unexpected error when closing dht: %s", err) + } + }) + return d +} + +func newClientDht(t testing.TB, cfg *Config) *DHT { + h := newClientHost(t) + + if cfg == nil { + cfg = DefaultConfig() + } + cfg.Mode = ModeOptClient + d, err := New(h, cfg) + require.NoError(t, err) + + t.Cleanup(func() { + if err = d.Close(); err != nil { + t.Logf("unexpected error when closing dht: %s", err) + } + }) + return d +} diff --git a/v2/routing.go b/v2/routing.go index eacc9d8d..cc72f849 100644 --- a/v2/routing.go +++ b/v2/routing.go @@ -47,7 +47,7 @@ func (d *DHT) FindPeer(ctx context.Context, id peer.ID) (peer.AddrInfo, error) { slog.Info("visiting node", "id", node.ID()) if node.ID() == id { foundNode = node - return coord.SkipRemaining + return coord.ErrSkipRemaining } return nil } diff --git a/v2/stream_test.go b/v2/stream_test.go index 83e203d3..12f61031 100644 --- a/v2/stream_test.go +++ b/v2/stream_test.go @@ -63,7 +63,7 @@ func newPeerPair(t testing.TB) (host.Host, *DHT) { cfg.Mode = ModeOptServer serverDHT, err := New(server, cfg) - fillRoutingTable(t, serverDHT) + fillRoutingTable(t, serverDHT, 250) t.Cleanup(func() { if err = serverDHT.Close(); err != nil {