diff --git a/Gopkg.lock b/Gopkg.lock index 3efdd0c0570..d9c0d25545e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -37,14 +37,6 @@ pruneopts = "UT" revision = "2a560b2036bee5e3679ec2133eb6520b2f195213" -[[projects]] - branch = "master" - digest = "1:95f4fdfbc8d7723bd21e467aabe98d86f06d3e2557b1ba92bf9df8aea6706784" - name = "github.com/btcsuite/btcutil" - packages = ["base58"] - pruneopts = "UT" - revision = "ab6388e0c60ae4834a1f57511e20c17b5f78be4b" - [[projects]] digest = "1:0ef770954bca104ee99b3b6b7f9b240605ac03517d9f98cbc1893daa03f3c038" name = "github.com/coreos/go-semver" @@ -146,12 +138,11 @@ revision = "80eebfe947f79f7b746dc7a2ef39fd98db275853" [[projects]] - digest = "1:d68801525369786c99880084140da0ec56cd786f12cede164b21ba8de22e2fdd" + digest = "1:2537bea8e6a4a90946bcd145cbdbd6f54d2dc263b3dee0ad96894e555a474f8b" name = "github.com/gogo/protobuf" packages = [ "io", "proto", - "sortkeys", ] pruneopts = "UT" revision = "636bf0302bc95575d69441b25a2603156ffdddf1" @@ -254,12 +245,25 @@ revision = "1395d1447324cbea88d249fbfcfd70ea878fdfca" [[projects]] - digest = "1:5e59d6e02abee7ab8ef7f396f5d681054fbfae02ff159fe3d1e1a01e6ed2b034" - name = "github.com/ipfs/go-ipfs-addr" + digest = "1:9999472bf9c934426d967c507d61f2de84affb8318b8d8c8bc38b3486edbd826" + name = "github.com/ipfs/go-cid" packages = ["."] pruneopts = "UT" - revision = "563b741b4726c35ffb6cd18eb053dab346df2684" - version = "v0.1.25" + revision = "6e296c5c49ad84dc6a44af69fa1fe4e1245cd0cf" + version = "v0.9.0" + +[[projects]] + digest = "1:80ed007eea5ec0f02359685217075730bb18ddcd702c5a3e619b0c35edabfb49" + name = "github.com/ipfs/go-datastore" + packages = [ + ".", + "autobatch", + "query", + "sync", + ] + pruneopts = "UT" + revision = "5accf38b689df4e06a973bb70cc678f798d691c0" + version = "v3.2.0" [[projects]] digest = "1:aae82c244bd91107575a0244a29e82e34ba23e06b6892557e33728de48deba96" @@ -282,6 +286,14 @@ revision = "14e95105cbafcda64fdc36197fe6a30b23c693dc" version = "v1.5.7" +[[projects]] + digest = "1:02f46892f567f7d4750ca585af9f1befecec9133fbd6e2eb329fb305d7f6489d" + name = "github.com/ipfs/go-todocounter" + packages = ["."] + pruneopts = "UT" + revision = "289b6e202fbecea527f25a32d76593ee2f12de4c" + version = "v1.0.1" + [[projects]] digest = "1:6b8866cc9496eada254debc24199981e21fd70fd0031ff7a581ac91be47187bb" name = "github.com/jackpal/gateway" @@ -298,6 +310,14 @@ revision = "c9cfead9f2a36ddf3daa40ba269aa7f4bbba6b62" version = "v1.0.1" +[[projects]] + branch = "master" + digest = "1:62fe3a7ea2050ecbd753a71889026f83d73329337ada66325cbafd5dea5f713d" + name = "github.com/jbenet/go-context" + packages = ["io"] + pruneopts = "UT" + revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" + [[projects]] branch = "master" digest = "1:8f4aedc183dc8dfef9a7a1f1ba205dc87ecd2675eea350a736bda889e3bcf8ea" @@ -379,13 +399,14 @@ version = "v0.2.0" [[projects]] - digest = "1:f2c159c4c56e156671686aeb5157b3c041516460df3aa563e72f88a3eddaf334" + digest = "1:29c26e8ea16b6fb1c7c5e20f2dcd7fb37a5926d2251c2ea7e98a3c1e2b8c7649" name = "github.com/libp2p/go-libp2p" packages = [ ".", "config", "p2p/discovery", "p2p/host/basic", + "p2p/net/mock", "p2p/protocol/identify", "p2p/protocol/identify/pb", "p2p/protocol/ping", @@ -416,6 +437,14 @@ revision = "274de1bb6c27780863df6b230c91324ab481dab2" version = "v2.0.1" +[[projects]] + branch = "master" + digest = "1:a607dfc95b409c84567c32cbd742e466e5864e1f3839cd2fa366990984ad0412" + name = "github.com/libp2p/go-libp2p-discovery" + packages = ["."] + pruneopts = "UT" + revision = "cc4105e21706452e5b0f7e05390f987017188d31" + [[projects]] digest = "1:de92515b23a5c4cd04893e67cd9af1dfff7a2a5337f95c36de273ecf757628e7" name = "github.com/libp2p/go-libp2p-host" @@ -440,6 +469,30 @@ revision = "3eda0a328422c06235501df5be481e4b51edc7cf" version = "v3.0.0" +[[projects]] + digest = "1:70fa56bd58ce2acfb6e6410144fc3af44b54f78e22a7ad6609dfe023133bd9c1" + name = "github.com/libp2p/go-libp2p-kad-dht" + packages = [ + ".", + "opts", + "pb", + "providers", + ] + pruneopts = "UT" + revision = "ee77252da00fc7ea9bc3ba309d63cff1c13555be" + version = "v4.4.12" + +[[projects]] + digest = "1:039ea002cd495f7b0681eea691d7dd00c7ce4b7e866d311bf560d56e6389dc08" + name = "github.com/libp2p/go-libp2p-kbucket" + packages = [ + ".", + "keyspace", + ] + pruneopts = "UT" + revision = "6ac78c66a92e82b828ecaa49e86445ba081446d1" + version = "v2.2.12" + [[projects]] digest = "1:383bcb8a366fab9219a01aada0916fcc014286b8d8ec75475f4d817cc6fd3ce5" name = "github.com/libp2p/go-libp2p-loggables" @@ -473,10 +526,19 @@ version = "v3.0.15" [[projects]] - digest = "1:cecdce38bd4fa8534625a84b5b9298554f5105aead852fd5324144224eebf03b" + digest = "1:a9252798bd542d1179f03bd26c58ddfd7769af89f3d764bcfa9dd8aa9b2bce37" + name = "github.com/libp2p/go-libp2p-netutil" + packages = ["."] + pruneopts = "UT" + revision = "2311f6b4677fd8027ba6466804d556e8dc84ea32" + version = "v0.4.12" + +[[projects]] + digest = "1:f9825c271a20eb262debd47f5f008af0db470f5205ed7104f0da87ab06a0569c" name = "github.com/libp2p/go-libp2p-peer" packages = [ ".", + "peerset", "test", ] pruneopts = "UT" @@ -484,12 +546,13 @@ version = "v2.4.0" [[projects]] - digest = "1:2063708efd5552590aeec525a47d0e9dcf981336c20af73f952fb8599ddc394d" + digest = "1:89c91d08dff33688a40ffa2850907848597a5e8f975edaf96aa07bf240132a85" name = "github.com/libp2p/go-libp2p-peerstore" packages = [ ".", "addr", "pstoremem", + "queue", ] pruneopts = "UT" revision = "6295e61c9fd2f13ad159c6241be3b371918045e2" @@ -514,6 +577,29 @@ revision = "f736644fe805a9f5677c82aca25c82da7cde2c76" version = "v0.11.10" +[[projects]] + digest = "1:e9460a69b97787611cb5fd53e3532a61b29f04db69ba9a3701e3ca9be85ac8c5" + name = "github.com/libp2p/go-libp2p-record" + packages = [ + ".", + "pb", + ] + pruneopts = "UT" + revision = "7a182bb5ae667ca5c930d807be14d0655afd3d57" + version = "v4.1.7" + +[[projects]] + digest = "1:0e9df8520b9cc863eeeff7a203746d82b910c4a8123b1aa4ce2253b8c4efa391" + name = "github.com/libp2p/go-libp2p-routing" + packages = [ + ".", + "notifications", + "options", + ] + pruneopts = "UT" + revision = "c568217bd16dbdb16aaa3064f5d1f2dfa224b589" + version = "v2.7.1" + [[projects]] digest = "1:1f4be308ef940c89130bee25cd1fcb5a9e39262dc17aa82902370cb18b0cdfa6" name = "github.com/libp2p/go-libp2p-secio" @@ -650,12 +736,12 @@ version = "v0.0.4" [[projects]] - digest = "1:57689550840d285f2da9e85356a66e626592a8b6f1170d7f2482438e64fe82e3" + digest = "1:1b46adc9e3d878cdf38a164cfdac2e19340f4d2662aa5bee88062f6ee08ac9df" name = "github.com/miekg/dns" packages = ["."] pruneopts = "UT" - revision = "d74956db7b5b20451796774572d0f5a0222e377a" - version = "v1.0.13" + revision = "8fc2e5773bbd308ca2fcc962fd8d25c1bd0f6743" + version = "v1.1.4" [[projects]] branch = "master" @@ -729,6 +815,14 @@ revision = "cba4f9fea8613343eb7ecc4ddadd8e7298a00c39" version = "v1.6.3" +[[projects]] + digest = "1:28e83f2abf3e3c83b9ad533a4295bdff102f94868b06d5f7bfe71c5477d81c8f" + name = "github.com/multiformats/go-multibase" + packages = ["."] + pruneopts = "UT" + revision = "bb91b53e5695e699a86654d77d03db7bc7506d12" + version = "v0.3.0" + [[projects]] digest = "1:c0ea71365e7d0e63a2e8f48e6fc2ba92f2f2b739bbeb3cdabdcd719037e175c2" name = "github.com/multiformats/go-multihash" @@ -842,6 +936,22 @@ revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1" version = "v1.20.0" +[[projects]] + branch = "master" + digest = "1:98fa13beefbf581ec173561adad6374c460631593b4bdcf03adc29cd18e5d2f5" + name = "github.com/whyrusleeping/base32" + packages = ["."] + pruneopts = "UT" + revision = "c30ac30633ccdabefe87eb12465113f06f1bab75" + +[[projects]] + branch = "master" + digest = "1:b33eed6794f2b2d1a7d0b45cb705402f26af0f0ad6521667e144ffa71f52d9d9" + name = "github.com/whyrusleeping/go-keyspace" + packages = ["."] + pruneopts = "UT" + revision = "5b898ac5add1da7178a4a98e69cb7b9205c085ee" + [[projects]] branch = "master" digest = "1:0102dfa2f98777db8ff68c920bef1be3ff8504e90485b6cdd24d61b981b487b6" @@ -996,7 +1106,7 @@ [[projects]] branch = "v2" - digest = "1:712660a913371014b418aa83a0cd2dee3a5eb428f09247e392121cc2ea8b11e4" + digest = "1:35af419dbe2008260a34284958e2e8d93b15b2aff46f2aaf074402ab8173fa6a" name = "gopkg.in/dedis/kyber.v2" packages = [ ".", @@ -1004,7 +1114,6 @@ "group/internal/marshalling", "group/mod", "sign/schnorr", - "util/key", "util/random", "xof/blake2xb", ] @@ -1048,7 +1157,6 @@ "github.com/360EntSecGroup-Skylar/excelize", "github.com/beevik/ntp", "github.com/btcsuite/btcd/btcec", - "github.com/btcsuite/btcutil/base58", "github.com/davecgh/go-spew/spew", "github.com/dedis/kyber", "github.com/dedis/kyber/group/edwards25519", @@ -1066,33 +1174,42 @@ "github.com/gin-gonic/gin", "github.com/gin-gonic/gin/binding", "github.com/glycerine/go-capnproto", - "github.com/gogo/protobuf/sortkeys", + "github.com/gogo/protobuf/io", + "github.com/gogo/protobuf/proto", "github.com/hashicorp/golang-lru", - "github.com/ipfs/go-ipfs-addr", + "github.com/ipfs/go-log", + "github.com/jbenet/goprocess", "github.com/libp2p/go-libp2p", "github.com/libp2p/go-libp2p-crypto", + "github.com/libp2p/go-libp2p-discovery", "github.com/libp2p/go-libp2p-host", + "github.com/libp2p/go-libp2p-interface-connmgr", + "github.com/libp2p/go-libp2p-kad-dht", "github.com/libp2p/go-libp2p-net", "github.com/libp2p/go-libp2p-peer", "github.com/libp2p/go-libp2p-peerstore", + "github.com/libp2p/go-libp2p-peerstore/pstoremem", "github.com/libp2p/go-libp2p-protocol", "github.com/libp2p/go-libp2p-pubsub", "github.com/libp2p/go-libp2p-pubsub/pb", "github.com/libp2p/go-libp2p/p2p/discovery", - "github.com/libp2p/go-testutil", + "github.com/libp2p/go-libp2p/p2p/net/mock", + "github.com/mr-tron/base58/base58", "github.com/multiformats/go-multiaddr", + "github.com/multiformats/go-multistream", "github.com/pkg/errors", "github.com/satori/go.uuid", "github.com/sirupsen/logrus", "github.com/stretchr/testify/assert", "github.com/syndtr/goleveldb/leveldb", "github.com/urfave/cli", + "github.com/whyrusleeping/go-logging", + "github.com/whyrusleeping/timecache", "golang.org/x/crypto/blake2b", "golang.org/x/crypto/sha3", "gopkg.in/dedis/kyber.v2", "gopkg.in/dedis/kyber.v2/group/edwards25519", "gopkg.in/dedis/kyber.v2/sign/schnorr", - "gopkg.in/dedis/kyber.v2/util/key", "gopkg.in/go-playground/validator.v8", "gotest.tools/assert", ] diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 231c143fd22..b023bfe4fd5 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -2,6 +2,8 @@ package main import ( "context" + "crypto/ecdsa" + "crypto/rand" "encoding/base64" "encoding/hex" "encoding/json" @@ -39,15 +41,18 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/ntp" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go-sandbox/process/block" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/containers" sync2 "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" "github.com/ElrondNetwork/elrond-go-sandbox/storage" beevikntp "github.com/beevik/ntp" + "github.com/btcsuite/btcd/btcec" + crypto2 "github.com/libp2p/go-libp2p-crypto" "github.com/pkg/errors" "github.com/urfave/cli" ) @@ -82,12 +87,12 @@ type genesis struct { } type netMessengerConfig struct { - ctx context.Context - port int - maxAllowedPeers int - marshalizer marshal.Marshalizer - hasher hashing.Hasher - pubSubStrategy p2p.PubSubStrategy + ctx context.Context + port int + maxAllowedPeers int + marshalizer marshal.Marshalizer + hasher hashing.Hasher + peerDiscoveryType p2p.PeerDiscoveryType } func main() { @@ -98,7 +103,7 @@ func main() { cli.AppHelpTemplate = bootNodeHelpTemplate app.Name = "BootNode CLI App" app.Usage = "This is the entry point for starting a new bootstrap node - the app will start after the genesis timestamp" - app.Flags = []cli.Flag{flags.GenesisFile, flags.Port, flags.MaxAllowedPeers, flags.PrivateKey} + app.Flags = []cli.Flag{flags.GenesisFile, flags.Port, flags.MaxAllowedPeers, flags.PrivateKey, flags.PeerDiscoveryType} app.Action = func(c *cli.Context) error { return startNode(c, log) } @@ -165,6 +170,7 @@ func startNode(ctx *cli.Context, log *logger.Logger) error { err = ef.StartNode() if err != nil { log.Error("starting node failed", err.Error()) + return err } } @@ -326,53 +332,63 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy return nil, err } - netMessenger, err := createNetMessenger(netMessengerConfig{ + netMessengerCfg := netMessengerConfig{ ctx: appContext, port: ctx.GlobalInt(flags.Port.Name), maxAllowedPeers: ctx.GlobalInt(flags.MaxAllowedPeers.Name), marshalizer: marshalizer, hasher: hasher, - pubSubStrategy: p2p.GossipSub, - }) + } + + netMessengerCfg.peerDiscoveryType, err = p2p.LoadPeerDiscoveryTypeFromString( + ctx.GlobalString(flags.PeerDiscoveryType.Name), + ) + if err != nil { + return nil, err + } + + netMessenger, err := createNetMessenger(netMessengerCfg, log) if err != nil { return nil, err } - interceptorsContainer := interceptor.NewContainer() - resolversContainer := resolver.NewContainer() - - processorFactory, err := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ - InterceptorContainer: interceptorsContainer, - ResolverContainer: resolversContainer, - Messenger: netMessenger, - Blockchain: blkc, - DataPool: datapool, - ShardCoordinator: shardCoordinator, - AddrConverter: addressConverter, - Hasher: hasher, - Marshalizer: marshalizer, - MultiSigner: multisigner, - SingleSigner: singlesigner, - KeyGen: keyGen, - Uint64ByteSliceConverter: uint64ByteSliceConverter, - }) + interceptorsContainer := containers.NewObjectsContainer() + resolversContainer := containers.NewResolversContainer() + + interceptorsResolversFactory, err := factory.NewInterceptorsResolversCreator( + factory.InterceptorsResolversConfig{ + + InterceptorContainer: interceptorsContainer, + ResolverContainer: resolversContainer, + Messenger: netMessenger, + Blockchain: blkc, + DataPool: datapool, + ShardCoordinator: shardCoordinator, + AddrConverter: addressConverter, + Hasher: hasher, + Marshalizer: marshalizer, + MultiSigner: multisigner, + SingleSigner: singlesigner, + KeyGen: keyGen, + Uint64ByteSliceConverter: uint64ByteSliceConverter, + }) if err != nil { return nil, err } - err = processorFactory.CreateInterceptors() + err = interceptorsResolversFactory.CreateInterceptors() if err != nil { return nil, err } - err = processorFactory.CreateResolvers() + err = interceptorsResolversFactory.CreateResolvers() if err != nil { return nil, err } forkDetector := sync2.NewBasicForkDetector() - res, err := processorFactory.ResolverContainer().Get(string(factory.TransactionTopic)) + res, err := interceptorsResolversFactory.ResolverContainer().Get(string(factory.TransactionTopic)) if err != nil { return nil, err } @@ -421,7 +437,7 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy node.WithPublicKey(pubKey), node.WithPrivateKey(privKey), node.WithForkDetector(forkDetector), - node.WithProcessorCreator(processorFactory), + node.WithInterceptorsResolversFactory(interceptorsResolversFactory), ) if err != nil { @@ -438,12 +454,12 @@ func createNode(ctx *cli.Context, cfg *config.Config, genesisConfig *genesis, sy func createRequestTransactionHandler(txResolver *transaction.TxResolver, log *logger.Logger) func(destShardID uint32, txHash []byte) { return func(destShardID uint32, txHash []byte) { - _ = txResolver.RequestTransactionFromHash(txHash) + _ = txResolver.RequestDataFromHash(txHash) log.Debug(fmt.Sprintf("Requested tx for shard %d with hash %s from network\n", destShardID, toB64(txHash))) } } -func createNetMessenger(config netMessengerConfig) (p2p.Messenger, error) { +func createNetMessenger(config netMessengerConfig, log *logger.Logger) (p2p.Messenger, error) { if config.port == 0 { return nil, errors.New("cannot start node on port 0") } @@ -452,13 +468,20 @@ func createNetMessenger(config netMessengerConfig) (p2p.Messenger, error) { return nil, errors.New("cannot start node without providing maxAllowedPeers") } - //TODO check if libp2p provides a better random source - cp := &p2p.ConnectParams{} - cp.Port = config.port - cp.GeneratePrivPubKeys(time.Now().UnixNano()) - cp.GenerateIDFromPubKey() + log.Info(fmt.Sprintf("Starting with peer discovery: %s", config.peerDiscoveryType)) + + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + sk := (*crypto2.Secp256k1PrivateKey)(prvKey) + + nm, err := libp2p.NewNetworkMessenger( + config.ctx, + config.port, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + config.peerDiscoveryType, + ) - nm, err := p2p.NewNetMessenger(config.ctx, config.marshalizer, config.hasher, cp, config.maxAllowedPeers, config.pubSubStrategy) if err != nil { return nil, err } diff --git a/cmd/facade/mock/nodeMock.go b/cmd/facade/mock/nodeMock.go index 36a9a344583..b113768120d 100644 --- a/cmd/facade/mock/nodeMock.go +++ b/cmd/facade/mock/nodeMock.go @@ -5,7 +5,6 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/process" ) type NodeMock struct { @@ -69,14 +68,6 @@ func (nm *NodeMock) SendTransaction(nonce uint64, sender string, receiver string return nm.SendTransactionHandler(nonce, sender, receiver, value, transactionData, signature) } -func (nm *NodeMock) GetInterceptors() []process.Interceptor { - return nil -} - -func (nm *NodeMock) GetResolvers() []process.Resolver { - return nil -} - func (nm *NodeMock) GetCurrentPublicKey() string { return nm.GetCurrentPublicKeyHandler() } diff --git a/cmd/flags/flags.go b/cmd/flags/flags.go index 48dc3ce26e5..87141a9baea 100644 --- a/cmd/flags/flags.go +++ b/cmd/flags/flags.go @@ -32,4 +32,11 @@ var ( Usage: "Maximum connections the user is willing to accept", Value: 10, } + // PeerDiscoveryType defines a flag for setting the peer discovery type + PeerDiscoveryType = cli.StringFlag{ + Name: "peer-discovery-type", + Usage: "Peer discovery type. 'off' - discovery turned off, 'mdns'" + + " - discovery using mdns mechanism, 'kad-dht' - discovery using seed nodes", + Value: "mdns", + } ) diff --git a/consensus/spos/bn/bnFactory.go b/consensus/spos/bn/bnFactory.go index d392a30fb92..38868e982ea 100644 --- a/consensus/spos/bn/bnFactory.go +++ b/consensus/spos/bn/bnFactory.go @@ -86,7 +86,7 @@ const ( type factory struct { blockChain *blockchain.BlockChain blockProcessor process.BlockProcessor - bootstraper process.Bootstraper + bootstraper process.Bootstrapper chronologyHandler consensus.ChronologyHandler consensusState *spos.ConsensusState hasher hashing.Hasher @@ -103,7 +103,7 @@ type factory struct { func NewFactory( blockChain *blockchain.BlockChain, blockProcessor process.BlockProcessor, - bootstraper process.Bootstraper, + bootstraper process.Bootstrapper, chronologyHandler consensus.ChronologyHandler, consensusState *spos.ConsensusState, hasher hashing.Hasher, @@ -158,7 +158,7 @@ func NewFactory( func checkNewFactoryParams( blockChain *blockchain.BlockChain, blockProcessor process.BlockProcessor, - bootstraper process.Bootstraper, + bootstraper process.Bootstrapper, chronologyHandler consensus.ChronologyHandler, consensusState *spos.ConsensusState, hasher hashing.Hasher, diff --git a/consensus/spos/bn/bnWorker.go b/consensus/spos/bn/bnWorker.go index 49a310555aa..08ebe2e124d 100644 --- a/consensus/spos/bn/bnWorker.go +++ b/consensus/spos/bn/bnWorker.go @@ -20,7 +20,7 @@ var log = logger.NewDefaultLogger() // worker defines the data needed by spos to communicate between nodes which are in the validators group type worker struct { - bootstraper process.Bootstraper + bootstraper process.Bootstrapper consensusState *spos.ConsensusState keyGenerator crypto.KeyGenerator marshalizer marshal.Marshalizer @@ -45,7 +45,7 @@ type worker struct { // NewWorker creates a new worker object func NewWorker( - bootstraper process.Bootstraper, + bootstraper process.Bootstrapper, consensusState *spos.ConsensusState, keyGenerator crypto.KeyGenerator, marshalizer marshal.Marshalizer, @@ -95,7 +95,7 @@ func NewWorker( } func checkNewWorkerParams( - bootstraper process.Bootstraper, + bootstraper process.Bootstrapper, consensusState *spos.ConsensusState, keyGenerator crypto.KeyGenerator, marshalizer marshal.Marshalizer, @@ -194,16 +194,24 @@ func (wrk *worker) getCleanedList(cnsDataList []*spos.ConsensusMessage) []*spos. return cleanedCnsDataList } -// ReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *worker) ReceivedMessage(name string, data interface{}, msgInfo *p2p.MessageInfo) error { +// ProcessReceivedMessage method redirects the received message to the channel which should handle it +func (wrk *worker) ProcessReceivedMessage(message p2p.MessageP2P) error { if wrk.consensusState.RoundCanceled { return ErrRoundCanceled } - cnsDta, ok := data.(*spos.ConsensusMessage) + if message == nil { + return ErrNilMessage + } + + if message.Data() == nil { + return ErrNilDataToProcess + } - if !ok { - return ErrInvalidConsensusData + cnsDta := &spos.ConsensusMessage{} + err := wrk.marshalizer.Unmarshal(cnsDta, message.Data()) + if err != nil { + return err } log.Debug(fmt.Sprintf("received %s from %s\n", MessageType(cnsDta.MsgType).String(), hex.EncodeToString(cnsDta.PubKey))) @@ -219,7 +227,9 @@ func (wrk *worker) ReceivedMessage(name string, data interface{}, msgInfo *p2p.M } if wrk.consensusState.SelfPubKey() == string(cnsDta.PubKey) { - return ErrMessageFromItself + //in this case should return nil but do not process the message + //nil error will mean that the interceptor will validate this message and broadcast it to the connected peers + return nil } sigVerifErr := wrk.checkSignature(cnsDta) diff --git a/consensus/spos/bn/bnWorker_test.go b/consensus/spos/bn/bnWorker_test.go index 083d8b696fc..23fa7557575 100644 --- a/consensus/spos/bn/bnWorker_test.go +++ b/consensus/spos/bn/bnWorker_test.go @@ -573,7 +573,7 @@ func TestWorker_RemoveAllReceivedMessageCallsShouldWork(t *testing.T) { assert.Nil(t, receivedMessageCalls[bn.MtBlockBody]) } -func TestWorker_ReceivedMessageTxBlockBody(t *testing.T) { +func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -591,11 +591,15 @@ func TestWorker_ReceivedMessageTxBlockBody(t *testing.T) { 0, ) - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + time.Sleep(time.Second) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) assert.Nil(t, err) } -func TestWorker_ReceivedMessageUnknown(t *testing.T) { +func TestWorker_ProcessReceivedMessageHeaderShouldRetNil(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -618,11 +622,15 @@ func TestWorker_ReceivedMessageUnknown(t *testing.T) { 0, ) - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + time.Sleep(time.Second) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) assert.Nil(t, err) } -func TestWorker_ReceivedMessageShouldReturnWhenIsCanceled(t *testing.T) { +func TestWorker_ProcessReceivedMessageRoundCanceledShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -641,23 +649,43 @@ func TestWorker_ReceivedMessageShouldReturnWhenIsCanceled(t *testing.T) { ) wrk.ConsensusState().RoundCanceled = true - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) + + time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) assert.Equal(t, bn.ErrRoundCanceled, err) } -func TestWorker_ReceivedMessageShouldReturnWhenDataReceivedIsInvalid(t *testing.T) { +func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() - err := wrk.ReceivedMessage(string(consensusTopic), nil, nil) + err := wrk.ProcessReceivedMessage(nil) + + time.Sleep(time.Second) + assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) - assert.Equal(t, bn.ErrInvalidConsensusData, err) + assert.Equal(t, bn.ErrNilMessage, err) } -func TestWorker_ReceivedMessageShouldReturnWhenNodeIsNotInTheEligibleList(t *testing.T) { +func TestWorker_ProcessReceivedMessageNilMessageDataFieldShouldErr(t *testing.T) { + t.Parallel() + + wrk := *initWorker() + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{}) + + time.Sleep(time.Second) + + assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) + assert.Equal(t, bn.ErrNilDataToProcess, err) +} + +func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -675,13 +703,17 @@ func TestWorker_ReceivedMessageShouldReturnWhenNodeIsNotInTheEligibleList(t *tes 0, ) - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) + + time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) assert.Equal(t, bn.ErrSenderNotOk, err) } -func TestWorker_ReceivedMessageShouldReturnWhenMessageIsForPastRound(t *testing.T) { +func TestWorker_ProcessReceivedMessageMessageIsForPastRoundShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -699,13 +731,17 @@ func TestWorker_ReceivedMessageShouldReturnWhenMessageIsForPastRound(t *testing. -1, ) - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) + + time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) assert.Equal(t, bn.ErrMessageForPastRound, err) } -func TestWorker_ReceivedMessageShouldReturnWhenReceivedMessageIsFromSelf(t *testing.T) { +func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNotProcess(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -723,13 +759,17 @@ func TestWorker_ReceivedMessageShouldReturnWhenReceivedMessageIsFromSelf(t *test 0, ) - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) + + time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) - assert.Equal(t, bn.ErrMessageFromItself, err) + assert.Nil(t, err) } -func TestWorker_ReceivedMessageShouldReturnWhenSignatureIsInvalid(t *testing.T) { +func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -747,13 +787,17 @@ func TestWorker_ReceivedMessageShouldReturnWhenSignatureIsInvalid(t *testing.T) 0, ) - err := wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) + + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) + + time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) assert.Equal(t, bn.ErrInvalidSignature, err) } -func TestWorker_ReceivedMessageShouldSendReceivedMesageOnChannel(t *testing.T) { +func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { t.Parallel() wrk := *initWorker() @@ -771,11 +815,14 @@ func TestWorker_ReceivedMessageShouldSendReceivedMesageOnChannel(t *testing.T) { 0, ) - wrk.ReceivedMessage(string(consensusTopic), cnsMsg, nil) + buff, _ := wrk.Marshalizer().Marshal(cnsMsg) - time.Sleep(1000 * time.Millisecond) + err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}) + + time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bn.MtBlockBody])) + assert.Nil(t, err) } func TestWorker_CheckSignatureShouldReturnErrNilConsensusData(t *testing.T) { diff --git a/consensus/spos/bn/errors.go b/consensus/spos/bn/errors.go index 20ef56f027d..ec67bcedf8e 100644 --- a/consensus/spos/bn/errors.go +++ b/consensus/spos/bn/errors.go @@ -21,3 +21,9 @@ var ErrMessageFromItself = errors.New("message is from itself") // ErrInvalidSignature is raised when signature is invalid var ErrInvalidSignature = errors.New("signature is invalid") + +// ErrNilMessage signals that a nil message has been received +var ErrNilMessage = errors.New("nil message") + +// ErrNilDataToProcess signals that nil data was provided +var ErrNilDataToProcess = errors.New("nil data to process") diff --git a/consensus/spos/bn/export_test.go b/consensus/spos/bn/export_test.go index 0624d47df18..c680adac523 100644 --- a/consensus/spos/bn/export_test.go +++ b/consensus/spos/bn/export_test.go @@ -33,11 +33,11 @@ func (fct *factory) SetBlockProcessor(blockProcessor process.BlockProcessor) { fct.blockProcessor = blockProcessor } -func (fct *factory) Bootstraper() process.Bootstraper { +func (fct *factory) Bootstraper() process.Bootstrapper { return fct.bootstraper } -func (fct *factory) SetBootsraper(bootstraper process.Bootstraper) { +func (fct *factory) SetBootsraper(bootstraper process.Bootstrapper) { fct.bootstraper = bootstraper } @@ -163,11 +163,11 @@ func (sr *subround) SetCheckFunction(check func() bool) { type Worker *worker -func (wrk *worker) Bootstraper() process.Bootstraper { +func (wrk *worker) Bootstraper() process.Bootstrapper { return wrk.bootstraper } -func (wrk *worker) SetBootstraper(bootstraper process.Bootstraper) { +func (wrk *worker) SetBootstraper(bootstraper process.Bootstrapper) { wrk.bootstraper = bootstraper } @@ -287,11 +287,11 @@ func (wrk *worker) SetConsensusStateChangedChannels(consensusStateChangedChannel type SubroundStartRound *subroundStartRound -func (sr *subroundStartRound) Bootstraper() process.Bootstraper { +func (sr *subroundStartRound) Bootstraper() process.Bootstrapper { return sr.bootstraper } -func (sr *subroundStartRound) SetBootsraper(bootstraper process.Bootstraper) { +func (sr *subroundStartRound) SetBootsraper(bootstraper process.Bootstrapper) { sr.bootstraper = bootstraper } diff --git a/consensus/spos/bn/subroundStartRound.go b/consensus/spos/bn/subroundStartRound.go index 8f7d47f069b..0242229f8db 100644 --- a/consensus/spos/bn/subroundStartRound.go +++ b/consensus/spos/bn/subroundStartRound.go @@ -16,7 +16,7 @@ type subroundStartRound struct { *subround blockChain *blockchain.BlockChain - bootstraper process.Bootstraper + bootstraper process.Bootstrapper consensusState *spos.ConsensusState multiSigner crypto.MultiSigner rounder consensus.Rounder @@ -28,7 +28,7 @@ type subroundStartRound struct { func NewSubroundStartRound( subround *subround, blockChain *blockchain.BlockChain, - bootstraper process.Bootstraper, + bootstraper process.Bootstrapper, consensusState *spos.ConsensusState, multiSigner crypto.MultiSigner, rounder consensus.Rounder, @@ -73,7 +73,7 @@ func NewSubroundStartRound( func checkNewSubroundStartRoundParams( subround *subround, blockChain *blockchain.BlockChain, - bootstraper process.Bootstraper, + bootstraper process.Bootstrapper, consensusState *spos.ConsensusState, multiSigner crypto.MultiSigner, rounder consensus.Rounder, diff --git a/consensus/spos/consensusMessage.go b/consensus/spos/consensusMessage.go index 6fdd1743a74..c9537867304 100644 --- a/consensus/spos/consensusMessage.go +++ b/consensus/spos/consensusMessage.go @@ -1,11 +1,5 @@ package spos -import ( - "fmt" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" -) - // ConsensusMessage defines the data needed by spos to communicate between nodes over network in all subrounds type ConsensusMessage struct { BlockHeaderHash []byte @@ -38,14 +32,3 @@ func NewConsensusMessage( RoundIndex: roundIndex, } } - -// Create method creates a new ConsensusMessage object -func (cnsdta *ConsensusMessage) Create() p2p.Creator { - return &ConsensusMessage{} -} - -// ID gets an unique id of the ConsensusMessage object -func (cnsdta *ConsensusMessage) ID() string { - id := fmt.Sprintf("%d-%s-%d", cnsdta.RoundIndex, cnsdta.Signature, cnsdta.MsgType) - return id -} diff --git a/consensus/spos/consensusMessage_test.go b/consensus/spos/consensusMessage_test.go index c07917f4841..a6dafe67e7a 100644 --- a/consensus/spos/consensusMessage_test.go +++ b/consensus/spos/consensusMessage_test.go @@ -1,7 +1,6 @@ package spos_test import ( - "fmt" "testing" "github.com/ElrondNetwork/elrond-go-sandbox/consensus/spos" @@ -22,35 +21,3 @@ func TestConsensusMessage_NewConsensusMessageShouldWork(t *testing.T) { assert.NotNil(t, cnsMsg) } - -func TestConsensusMessage_ConsensusMessageCreateShouldReturnTheSameObject(t *testing.T) { - t.Parallel() - - cnsMsg := spos.NewConsensusMessage( - nil, - nil, - nil, - nil, - 0, - 0, - 0) - - assert.Equal(t, cnsMsg, cnsMsg.Create()) -} - -func TestConsensusMessage_ConsensusMessageIDShouldReturnID(t *testing.T) { - t.Parallel() - - cnsMsg := spos.NewConsensusMessage( - nil, - nil, - nil, - []byte("sig"), - 6, - 0, - 1) - - id := fmt.Sprintf("1-sig-6") - - assert.Equal(t, id, cnsMsg.ID()) -} diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index af29accceca..edc9168c862 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -76,6 +76,24 @@ var ErrNilRoundState = errors.New("round state is nil") // ErrCommitmentHashDoesNotMatch is raised when the commitment hash does not match expected value var ErrCommitmentHashDoesNotMatch = errors.New("commitment hash does not match") +// ErrNilMessage signals that a nil message has been received +var ErrNilMessage = errors.New("nil message") + +// ErrNilDataToProcess signals that nil data was provided +var ErrNilDataToProcess = errors.New("nil data to process") + +// ErrRoundIsCancelled signals that the message should be dropped as the round is cancelled +var ErrRoundIsCancelled = errors.New("dropping consensus message as the round is cancelled") + +// ErrConsensusMessageSenderNotValid signals that the message should be dropped as the sender is not valid +var ErrConsensusMessageSenderNotValid = errors.New("consensus message sender is not valid") + +// ErrShouldDropConsensusMessage signals that the consensus message should be dropped +var ErrShouldDropConsensusMessage = errors.New("consensus message should be dropped") + +// ErrMessageSentFromSelf signals that the consensus message is sent from self +var ErrMessageSentFromSelf = errors.New("consensus message was sent from self") + // ErrNilWorker is raised when a valid worker is expected but nil used var ErrNilWorker = errors.New("worker is nil") diff --git a/consensus/spos/mock/p2pMessageMock.go b/consensus/spos/mock/p2pMessageMock.go new file mode 100644 index 00000000000..1d15966c7bf --- /dev/null +++ b/consensus/spos/mock/p2pMessageMock.go @@ -0,0 +1,43 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type P2PMessageMock struct { + FromField []byte + DataField []byte + SeqNoField []byte + TopicIDsField []string + SignatureField []byte + KeyField []byte + PeerField p2p.PeerID +} + +func (msg *P2PMessageMock) From() []byte { + return msg.FromField +} + +func (msg *P2PMessageMock) Data() []byte { + return msg.DataField +} + +func (msg *P2PMessageMock) SeqNo() []byte { + return msg.SeqNo() +} + +func (msg *P2PMessageMock) TopicIDs() []string { + return msg.TopicIDsField +} + +func (msg *P2PMessageMock) Signature() []byte { + return msg.SignatureField +} + +func (msg *P2PMessageMock) Key() []byte { + return msg.KeyField +} + +func (msg *P2PMessageMock) Peer() p2p.PeerID { + return msg.PeerField +} diff --git a/data/state/account_test.go b/data/state/account_test.go index 9630ed9ec5e..01884a9fffd 100644 --- a/data/state/account_test.go +++ b/data/state/account_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state/mock" "github.com/stretchr/testify/assert" ) diff --git a/integrationTests/block/interceptedRequestHdrMem_test.go b/integrationTests/block/interceptedRequestHdr_test.go similarity index 67% rename from integrationTests/block/interceptedRequestHdrMem_test.go rename to integrationTests/block/interceptedRequestHdr_test.go index 41b6aa50e28..cba83bca3a1 100644 --- a/integrationTests/block/interceptedRequestHdrMem_test.go +++ b/integrationTests/block/interceptedRequestHdr_test.go @@ -1,6 +1,7 @@ package block import ( + "fmt" "reflect" "testing" "time" @@ -8,21 +9,29 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/resolvers" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" "github.com/stretchr/testify/assert" ) func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + ti := &testInitializer{} + hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} - dPoolRequestor := createTestDataPool() - dPoolResolver := createTestDataPool() + dPoolRequestor := ti.createTestDataPool() + dPoolResolver := ti.createTestDataPool() - nRequestor, _, pFactory1 := createMemNode(1, dPoolRequestor) - nResolver, _, _ := createMemNode(2, dPoolResolver) + fmt.Println("Requestor:") + nRequestor, mesRequestor, _, pFactoryReq := ti.createNetNode(32000, dPoolRequestor, ti.createAccountsDB()) + + fmt.Println("Resolver:") + nResolver, mesResolver, _, _ := ti.createNetNode(32001, dPoolResolver, ti.createAccountsDB()) nRequestor.Start() nResolver.Start() @@ -31,7 +40,10 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { _ = nResolver.Stop() }() - defer p2p.ReInitializeGloballyRegisteredPeers() + //connect messengers together + time.Sleep(time.Second) + err := mesRequestor.ConnectToPeer(ti.getConnectableAddress(mesResolver)) + assert.Nil(t, err) time.Sleep(time.Second) @@ -72,10 +84,10 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithMemMessenger(t *testing.T) { }) //Step 4. request header - res, err := pFactory1.ResolverContainer().Get(string(factory.HeadersTopic)) + res, err := pFactoryReq.ResolverContainer().Get(string(factory.HeadersTopic)) assert.Nil(t, err) - hdrResolver := res.(*block2.HeaderResolver) - hdrResolver.RequestHeaderFromNonce(0) + hdrResolver := res.(*resolvers.HeaderResolver) + hdrResolver.RequestDataFromNonce(0) select { case <-chanDone: diff --git a/integrationTests/block/interceptedRequestTxBlockBodyNet_test.go b/integrationTests/block/interceptedRequestTxBlockBodyNet_test.go deleted file mode 100644 index d6c456d7d2b..00000000000 --- a/integrationTests/block/interceptedRequestTxBlockBodyNet_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package block - -import ( - "context" - "reflect" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/crypto/signing" - "github.com/ElrondNetwork/elrond-go-sandbox/crypto/signing/kv2" - "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/data/state" - "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters/uint64ByteSlice" - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/node" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" - "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" - "github.com/ElrondNetwork/elrond-go-sandbox/sharding" - "github.com/stretchr/testify/assert" -) - -func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { - t.Skip("TODO: fix tests that run on the same local network") - - hasher := sha256.Sha256{} - marshalizer := &marshal.JsonMarshalizer{} - - suite := kv2.NewBlakeSHA256Ed25519() - keyGen := signing.NewKeyGenerator(suite) - - dPoolRequestor := createTestDataPool() - dPoolResolver := createTestDataPool() - - addrConverter, _ := state.NewPlainAddressConverter(32, "0x") - - blkcRequestor := createTestBlockChain() - blkcResolver := createTestBlockChain() - reqMessenger := createMessenger(context.Background(), marshalizer, hasher, 4, 32000) - resMessenger := createMessenger(context.Background(), marshalizer, hasher, 4, 32001) - shardCoordinatorReq := &sharding.OneShardCoordinator{} - shardCoordinatorRes := &sharding.OneShardCoordinator{} - uint64BsReq := uint64ByteSlice.NewBigEndianConverter() - uint64BsRes := uint64ByteSlice.NewBigEndianConverter() - - pFactoryReq, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ - InterceptorContainer: interceptor.NewContainer(), - ResolverContainer: resolver.NewContainer(), - Messenger: reqMessenger, - Blockchain: blkcRequestor, - DataPool: dPoolRequestor, - ShardCoordinator: shardCoordinatorReq, - AddrConverter: addrConverter, - Hasher: hasher, - Marshalizer: marshalizer, - KeyGen: keyGen, - Uint64ByteSliceConverter: uint64BsReq, - }) - - pFactoryRes, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ - InterceptorContainer: interceptor.NewContainer(), - ResolverContainer: resolver.NewContainer(), - Messenger: resMessenger, - Blockchain: blkcResolver, - DataPool: dPoolResolver, - ShardCoordinator: shardCoordinatorRes, - AddrConverter: addrConverter, - Hasher: hasher, - Marshalizer: marshalizer, - KeyGen: keyGen, - Uint64ByteSliceConverter: uint64BsRes, - }) - - nRequestor, _ := node.NewNode( - node.WithMarshalizer(marshalizer), - node.WithHasher(hasher), - node.WithContext(context.Background()), - node.WithDataPool(dPoolRequestor), - node.WithAddressConverter(addrConverter), - node.WithKeyGenerator(keyGen), - node.WithShardCoordinator(shardCoordinatorReq), - node.WithBlockChain(blkcRequestor), - node.WithUint64ByteSliceConverter(uint64BsReq), - node.WithMessenger(reqMessenger), - node.WithProcessorCreator(pFactoryReq), - ) - - nResolver, _ := node.NewNode( - node.WithMarshalizer(marshalizer), - node.WithHasher(hasher), - node.WithContext(context.Background()), - node.WithDataPool(dPoolResolver), - node.WithAddressConverter(addrConverter), - node.WithKeyGenerator(keyGen), - node.WithShardCoordinator(shardCoordinatorRes), - node.WithBlockChain(blkcResolver), - node.WithUint64ByteSliceConverter(uint64BsRes), - node.WithMessenger(resMessenger), - node.WithProcessorCreator(pFactoryRes), - ) - - nRequestor.Start() - nResolver.Start() - - defer nRequestor.Stop() - defer nResolver.Stop() - - nRequestor.P2PBootstrap() - nResolver.P2PBootstrap() - - time.Sleep(time.Second) - - //TODO remove this - time.Sleep(time.Second) - - //Step 1. Generate a block body - txBlock := block.TxBlockBody{ - MiniBlocks: []block.MiniBlock{ - { - ShardID: 0, - TxHashes: [][]byte{ - hasher.Compute("tx1"), - }, - }, - }, - StateBlockBody: block.StateBlockBody{ - RootHash: hasher.Compute("root hash"), - ShardID: 0, - }, - } - - txBlockBodyBuff, _ := marshalizer.Marshal(&txBlock) - txBlockBodyHash := hasher.Compute(string(txBlockBodyBuff)) - - //Step 2. resolver has the tx block body - dPoolResolver.TxBlocks().HasOrAdd(txBlockBodyHash, &txBlock) - - //Step 3. wire up a received handler - chanDone := make(chan bool) - - dPoolRequestor.TxBlocks().RegisterHandler(func(key []byte) { - txBlockBodyStored, _ := dPoolRequestor.TxBlocks().Get(key) - - if reflect.DeepEqual(txBlockBodyStored, &txBlock) { - chanDone <- true - } - - assert.Equal(t, txBlockBodyStored, &txBlock) - - }) - - //Step 4. request tx block body - res, _ := pFactoryRes.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) - txBlockBodyResolver := res.(*block2.GenericBlockBodyResolver) - txBlockBodyResolver.RequestBlockBodyFromHash(txBlockBodyHash) - - select { - case <-chanDone: - case <-time.After(time.Second * 10): - assert.Fail(t, "timeout") - } -} - -func createMessenger(ctx context.Context, marshalizer marshal.Marshalizer, hasher hashing.Hasher, maxAllowedPeers int, port int) p2p.Messenger { - cp := &p2p.ConnectParams{} - cp.Port = port - cp.GeneratePrivPubKeys(time.Now().UnixNano()) - cp.GenerateIDFromPubKey() - - nm, _ := p2p.NewNetMessenger(ctx, marshalizer, hasher, cp, maxAllowedPeers, p2p.GossipSub) - return nm -} diff --git a/integrationTests/block/interceptedRequestTxBlockBodyMem_test.go b/integrationTests/block/interceptedRequestTxBlockBody_test.go similarity index 54% rename from integrationTests/block/interceptedRequestTxBlockBodyMem_test.go rename to integrationTests/block/interceptedRequestTxBlockBody_test.go index 736a50afebe..ea6df74eef6 100644 --- a/integrationTests/block/interceptedRequestTxBlockBodyMem_test.go +++ b/integrationTests/block/interceptedRequestTxBlockBody_test.go @@ -1,6 +1,8 @@ package block import ( + "encoding/base64" + "fmt" "reflect" "testing" "time" @@ -8,30 +10,45 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" "github.com/stretchr/testify/assert" ) -func TestNode_GenerateSendInterceptTxBlockBodyWithMemMessenger(t *testing.T) { +func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + ti := &testInitializer{} + hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} - dPoolRequestor := createTestDataPool() - dPoolResolver := createTestDataPool() + dPoolRequestor := ti.createTestDataPool() + dPoolResolver := ti.createTestDataPool() + + fmt.Println("Requestor:") + nRequestor, mesRequestor, _, pFactoryReq := ti.createNetNode(32000, dPoolRequestor, ti.createAccountsDB()) - nRequestor, _, pFactory1 := createMemNode(1, dPoolRequestor) - nResolver, _, _ := createMemNode(2, dPoolResolver) + fmt.Println("Resolver:") + nResolver, mesResolver, _, pFactoryRes := ti.createNetNode(32001, dPoolResolver, ti.createAccountsDB()) + + _ = pFactoryReq.CreateInterceptors() + _ = pFactoryReq.CreateResolvers() + + _ = pFactoryRes.CreateInterceptors() + _ = pFactoryRes.CreateResolvers() nRequestor.Start() nResolver.Start() - defer func() { - _ = nRequestor.Stop() - _ = nResolver.Stop() - }() - defer p2p.ReInitializeGloballyRegisteredPeers() + defer nRequestor.Stop() + defer nResolver.Stop() + + //connect messengers together + time.Sleep(time.Second) + err := mesRequestor.ConnectToPeer(ti.getConnectableAddress(mesResolver)) + assert.Nil(t, err) time.Sleep(time.Second) @@ -56,6 +73,7 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithMemMessenger(t *testing.T) { //Step 2. resolver has the tx block body dPoolResolver.TxBlocks().HasOrAdd(txBlockBodyHash, &txBlock) + fmt.Printf("Added %s to dPoolResolver\n", base64.StdEncoding.EncodeToString(txBlockBodyHash)) //Step 3. wire up a received handler chanDone := make(chan bool) @@ -72,13 +90,12 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithMemMessenger(t *testing.T) { }) //Step 4. request tx block body - res, _ := pFactory1.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) - hdrResolver := res.(*block2.GenericBlockBodyResolver) - hdrResolver.RequestBlockBodyFromHash(txBlockBodyHash) + txBlockBodyRequestor, _ := pFactoryReq.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) + txBlockBodyRequestor.RequestDataFromHash(txBlockBodyHash) select { case <-chanDone: - case <-time.After(time.Second * 10): + case <-time.After(time.Second * 1000): assert.Fail(t, "timeout") } } diff --git a/integrationTests/block/common.go b/integrationTests/block/testInitializer.go similarity index 58% rename from integrationTests/block/common.go rename to integrationTests/block/testInitializer.go index 067dcca0b1d..76368733fee 100644 --- a/integrationTests/block/common.go +++ b/integrationTests/block/testInitializer.go @@ -2,6 +2,10 @@ package block import ( "context" + "crypto/ecdsa" + "fmt" + "math/rand" + "strings" "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/crypto/signing" @@ -13,22 +17,29 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/dataPool" "github.com/ElrondNetwork/elrond-go-sandbox/data/shardedData" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/trie" "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go-sandbox/hashing" "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/containers" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" "github.com/ElrondNetwork/elrond-go-sandbox/storage" "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" + "github.com/btcsuite/btcd/btcec" + crypto2 "github.com/libp2p/go-libp2p-crypto" ) -func createTestBlockChain() *blockchain.BlockChain { +type testInitializer struct { +} + +func (ti *testInitializer) createTestBlockChain() *blockchain.BlockChain { cfgCache := storage.CacheConfig{Size: 100, Type: storage.LRUCache} @@ -36,16 +47,16 @@ func createTestBlockChain() *blockchain.BlockChain { blockChain, _ := blockchain.NewBlockChain( badBlockCache, - createMemUnit(), - createMemUnit(), - createMemUnit(), - createMemUnit(), - createMemUnit()) + ti.createMemUnit(), + ti.createMemUnit(), + ti.createMemUnit(), + ti.createMemUnit(), + ti.createMemUnit()) return blockChain } -func createMemUnit() storage.Storer { +func (ti *testInitializer) createMemUnit() storage.Storer { cache, _ := storage.NewCache(storage.LRUCache, 10) persist, _ := memorydb.New() @@ -53,7 +64,7 @@ func createMemUnit() storage.Storer { return unit } -func createTestDataPool() data.TransientDataHolder { +func (ti *testInitializer) createTestDataPool() data.TransientDataHolder { txPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100, Type: storage.LRUCache}) hdrPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100, Type: storage.LRUCache}) @@ -82,7 +93,7 @@ func createTestDataPool() data.TransientDataHolder { return dPool } -func createMultiSigner( +func (ti *testInitializer) createMultiSigner( privateKey crypto.PrivateKey, publicKey crypto.PublicKey, keyGen crypto.KeyGenerator, @@ -97,12 +108,26 @@ func createMultiSigner( return multiSigner, err } -func createMemNode(port int, dPool data.TransientDataHolder) (*node.Node, p2p.Messenger, process.ProcessorFactory) { +func (ti *testInitializer) createAccountsDB() *state.AccountsDB { + marsh := &marshal.JsonMarshalizer{} + + dbw, _ := trie.NewDBWriteCache(ti.createMemUnit()) + tr, _ := trie.NewTrie(make([]byte, 32), dbw, sha256.Sha256{}) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marsh) + + return adb +} + +func (ti *testInitializer) createNetNode(port int, dPool data.TransientDataHolder, accntAdapter state.AccountsAdapter) ( + *node.Node, + p2p.Messenger, + crypto.PrivateKey, + process.InterceptorsResolversFactory) { + hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} - cp, _ := p2p.NewConnectParamsFromPort(port) - mes, _ := p2p.NewMemMessenger(marshalizer, hasher, cp) + messenger := ti.createMessenger(context.Background(), port) addrConverter, _ := state.NewPlainAddressConverter(32, "0x") @@ -110,48 +135,80 @@ func createMemNode(port int, dPool data.TransientDataHolder) (*node.Node, p2p.Me signer := &singlesig.SchnorrSigner{} keyGen := signing.NewKeyGenerator(suite) sk, pk := keyGen.GeneratePair() - multiSigner, _ := createMultiSigner(sk, pk, keyGen, hasher) - blockChain := createTestBlockChain() + multiSigner, _ := ti.createMultiSigner(sk, pk, keyGen, hasher) + blkc := ti.createTestBlockChain() shardCoordinator := &sharding.OneShardCoordinator{} uint64Converter := uint64ByteSlice.NewBigEndianConverter() - pFactory, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ - InterceptorContainer: interceptor.NewContainer(), - ResolverContainer: resolver.NewContainer(), - Messenger: mes, - Blockchain: blockChain, - DataPool: dPool, - ShardCoordinator: shardCoordinator, - AddrConverter: addrConverter, - Hasher: hasher, - Marshalizer: marshalizer, - MultiSigner: multiSigner, - SingleSigner: signer, - KeyGen: keyGen, + pFactory, _ := factory.NewInterceptorsResolversCreator(factory.InterceptorsResolversConfig{ + InterceptorContainer: containers.NewObjectsContainer(), + ResolverContainer: containers.NewResolversContainer(), + Messenger: messenger, + Blockchain: blkc, + DataPool: dPool, + ShardCoordinator: shardCoordinator, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + MultiSigner: multiSigner, + SingleSigner: signer, + KeyGen: keyGen, Uint64ByteSliceConverter: uint64Converter, }) n, _ := node.NewNode( - node.WithMessenger(mes), + node.WithMessenger(messenger), node.WithMarshalizer(marshalizer), node.WithHasher(hasher), node.WithContext(context.Background()), node.WithDataPool(dPool), node.WithAddressConverter(addrConverter), + node.WithAccountsAdapter(accntAdapter), node.WithSinglesig(signer), node.WithMultisig(multiSigner), node.WithKeyGenerator(keyGen), node.WithPrivateKey(sk), node.WithPublicKey(pk), node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(blockChain), + node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), - node.WithMessenger(mes), - node.WithProcessorCreator(pFactory), + node.WithInterceptorsResolversFactory(pFactory), ) _ = pFactory.CreateInterceptors() _ = pFactory.CreateResolvers() - return n, mes, pFactory + return n, messenger, sk, pFactory +} + +func (ti *testInitializer) createMessenger(ctx context.Context, port int) p2p.Messenger { + r := rand.New(rand.NewSource(int64(port))) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*crypto2.Secp256k1PrivateKey)(prvKey) + + libP2PMes, err := libp2p.NewNetworkMessenger( + ctx, + port, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff) + + if err != nil { + fmt.Println(err.Error()) + } + + return libP2PMes +} + +func (ti *testInitializer) getConnectableAddress(mes p2p.Messenger) string { + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") { + continue + } + + return addr + } + + return "" } diff --git a/integrationTests/peerDiscovery/mdns/peerDiscovery_test.go b/integrationTests/peerDiscovery/mdns/peerDiscovery_test.go new file mode 100644 index 00000000000..9bb6a13d4d5 --- /dev/null +++ b/integrationTests/peerDiscovery/mdns/peerDiscovery_test.go @@ -0,0 +1,80 @@ +package mdns + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/integrationTests/peerDiscovery" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/stretchr/testify/assert" +) + +var durationBootstrapingTime = time.Duration(time.Second * 2) +var durationTopicAnnounceTime = time.Duration(time.Second * 2) + +func TestPeerDiscoveryAndMessageSending(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + tr := peerDiscovery.TestRunner{} + + basePort := 23000 + noOfPeers := 20 + + //Step 1. Create noOfPeers instances of messenger type + peers := make([]p2p.Messenger, noOfPeers) + + for i := 0; i < noOfPeers; i++ { + peers[i] = tr.CreateMessenger(context.Background(), basePort+i, p2p.PeerDiscoveryMdns) + } + + //Step 2. Call bootstrap to start the discovery process + for _, peer := range peers { + peer.Bootstrap() + } + + //cleanup function that closes all messengers + defer func() { + for i := 0; i < noOfPeers; i++ { + if peers[i] != nil { + peers[i].Close() + } + } + }() + + fmt.Printf("Waiting %v for peer discovery...\n", durationBootstrapingTime) + time.Sleep(durationBootstrapingTime) + + fmt.Println("Connected peers:") + for _, peer := range peers { + fmt.Printf("Peer %s is connected to %d peers\n", peer.ID().Pretty(), len(peer.ConnectedPeers())) + } + + //Step 3. Create a test topic, add receiving handlers + for _, peer := range peers { + err := peer.CreateTopic("test topic", true) + if err != nil { + assert.Fail(t, "test fail while creating topic") + } + } + + fmt.Printf("Waiting %v for topic announcement...\n", durationTopicAnnounceTime) + time.Sleep(durationTopicAnnounceTime) + + //Step 4. run the test for a couple of times as peer discovering and topic announcing + // are not deterministic nor instant processes + + noOfTests := 5 + for i := 0; i < noOfTests; i++ { + testResult := tr.RunTest(peers, i, "test topic") + + if testResult { + return + } + } + + assert.Fail(t, "test failed. Discovery/message passing are not validated") +} diff --git a/integrationTests/peerDiscovery/messageProcessor.go b/integrationTests/peerDiscovery/messageProcessor.go new file mode 100644 index 00000000000..ebcd374bd94 --- /dev/null +++ b/integrationTests/peerDiscovery/messageProcessor.go @@ -0,0 +1,41 @@ +package peerDiscovery + +import ( + "bytes" + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type MessageProcesssor struct { + RequiredValue []byte + chanDone chan struct{} + mutDataReceived sync.Mutex + wasDataReceived bool +} + +func NewMessageProcessor(chanDone chan struct{}, requiredVal []byte) *MessageProcesssor { + return &MessageProcesssor{ + RequiredValue: requiredVal, + chanDone: chanDone, + } +} + +func (mp *MessageProcesssor) ProcessReceivedMessage(message p2p.MessageP2P) error { + if bytes.Equal(mp.RequiredValue, message.Data()) { + mp.mutDataReceived.Lock() + mp.wasDataReceived = true + mp.mutDataReceived.Unlock() + + mp.chanDone <- struct{}{} + } + + return nil +} + +func (mp *MessageProcesssor) WasDataReceived() bool { + mp.mutDataReceived.Lock() + defer mp.mutDataReceived.Unlock() + + return mp.wasDataReceived +} diff --git a/integrationTests/peerDiscovery/testRunnner.go b/integrationTests/peerDiscovery/testRunnner.go new file mode 100644 index 00000000000..219a5bb212b --- /dev/null +++ b/integrationTests/peerDiscovery/testRunnner.go @@ -0,0 +1,103 @@ +package peerDiscovery + +import ( + "context" + "crypto/ecdsa" + "fmt" + "math/rand" + "strconv" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" + "github.com/btcsuite/btcd/btcec" + crypto2 "github.com/libp2p/go-libp2p-crypto" +) + +var durationMsgRecieved = time.Duration(time.Second * 2) + +type TestRunner struct { +} + +func (tr *TestRunner) CreateMessenger(ctx context.Context, + port int, + peerDiscoveryType p2p.PeerDiscoveryType) p2p.Messenger { + + r := rand.New(rand.NewSource(int64(port))) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*crypto2.Secp256k1PrivateKey)(prvKey) + + libP2PMes, err := libp2p.NewNetworkMessenger( + ctx, + port, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + peerDiscoveryType) + + if err != nil { + fmt.Println(err.Error()) + } + + return libP2PMes +} + +func (tr *TestRunner) RunTest(peers []p2p.Messenger, testIndex int, topic string) bool { + fmt.Printf("Running test %v\n", testIndex) + + testMessage := "test " + strconv.Itoa(testIndex) + messageProcessors := make([]*MessageProcesssor, len(peers)) + + chanDone := make(chan struct{}) + chanMessageProcessor := make(chan struct{}, len(peers)) + + //add a new message processor for each messenger + for i, peer := range peers { + if peer.HasTopicValidator(topic) { + _ = peer.UnregisterMessageProcessor(topic) + } + + mp := NewMessageProcessor(chanMessageProcessor, []byte(testMessage)) + + messageProcessors[i] = mp + err := peer.RegisterMessageProcessor(topic, mp) + if err != nil { + fmt.Println(err.Error()) + return false + } + } + + var msgReceived int32 = 0 + + go func() { + + for { + <-chanMessageProcessor + + //to be 100% all peers received the messages, iterate all message processors and check received flag + for _, mp := range messageProcessors { + if !mp.WasDataReceived() { + continue + } + } + + //all messengers got the message + chanDone <- struct{}{} + return + } + }() + + //write the message on topic + peers[0].Broadcast(topic, []byte(testMessage)) + + select { + case <-chanDone: + return true + case <-time.After(durationMsgRecieved): + fmt.Printf("timeout fetching all messages. Got %d from %d\n", + atomic.LoadInt32(&msgReceived), len(peers)) + return false + } +} diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index c8bd9d199ad..7994e997375 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -15,9 +15,15 @@ import ( ) func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { - accnts := adbCreateAccountsDB() + if testing.Short() { + t.Skip("this is not a short test") + } + + ti := &testInitializer{} + + accnts := adbCreateAccountsDB(ti) - pubKeyBuff := createDummyHexAddress(64) + pubKeyBuff := ti.createDummyHexAddress(64) hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} @@ -56,9 +62,11 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { } func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { - accnts := adbCreateAccountsDB() + ti := &testInitializer{} - pubKeyBuff := createDummyHexAddress(64) + accnts := adbCreateAccountsDB(ti) + + pubKeyBuff := ti.createDummyHexAddress(64) hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} @@ -96,17 +104,19 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { } func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { - accnts := adbCreateAccountsDB() + ti := &testInitializer{} + + accnts := adbCreateAccountsDB(ti) nonce := uint64(6) initialBalance := int64(100000) balance := big.NewInt(initialBalance) addrConv, _ := state.NewPlainAddressConverter(32, "0x") - pubKeyBuff := createDummyHexAddress(64) + pubKeyBuff := ti.createDummyHexAddress(64) sender, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) - pubKeyBuff = createDummyHexAddress(64) + pubKeyBuff = ti.createDummyHexAddress(64) receiver, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) account, _ := accnts.GetJournalizedAccount(sender) @@ -186,19 +196,19 @@ func testExecTransactionsMoreTxWithRevert( } func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *testing.T) { - t.Skip("This is a very long test") + ti := &testInitializer{} - accnts := adbCreateAccountsDB() + accnts := adbCreateAccountsDB(ti) nonce := uint64(6) initialBalance := int64(100000) balance := big.NewInt(initialBalance) addrConv, _ := state.NewPlainAddressConverter(32, "0x") - pubKeyBuff := createDummyHexAddress(64) + pubKeyBuff := ti.createDummyHexAddress(64) sender, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) - pubKeyBuff = createDummyHexAddress(64) + pubKeyBuff = ti.createDummyHexAddress(64) receiver, _ := addrConv.CreateAddressFromHex(string(pubKeyBuff)) account, _ := accnts.GetJournalizedAccount(sender) @@ -208,7 +218,7 @@ func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *t initialHash, _ := accnts.Commit() fmt.Printf("Initial hash: %s\n", base64.StdEncoding.EncodeToString(initialHash)) - for i := 0; i < 10000; i++ { + for i := 0; i < 10; i++ { fmt.Printf("Iteration: %d\n", i) testExecTransactionsMoreTxWithRevert(t, accnts, sender, receiver, initialHash, nonce, initialBalance) diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index f8f9af87221..8ee3dc4ff02 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -18,19 +18,19 @@ import ( //------- Helper funcs -func adbCreateAccountsDB() *state.AccountsDB { +func adbCreateAccountsDB(ti *testInitializer) *state.AccountsDB { marsh := &marshal.JsonMarshalizer{} - dbw, _ := trie.NewDBWriteCache(createMemUnit()) + dbw, _ := trie.NewDBWriteCache(ti.createMemUnit()) tr, _ := trie.NewTrie(make([]byte, 32), dbw, sha256.Sha256{}) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marsh) return adb } -func generateAddressJurnalAccountAccountsDB() (state.AddressContainer, state.JournalizedAccountWrapper, *state.AccountsDB) { - adr := createDummyAddress() - adb := adbCreateAccountsDB() +func generateAddressJurnalAccountAccountsDB(ti *testInitializer) (state.AddressContainer, state.JournalizedAccountWrapper, *state.AccountsDB) { + adr := ti.createDummyAddress() + adb := adbCreateAccountsDB(ti) jaw, err := state.NewJournalizedAccountWrapFromAccountContainer(adr, state.NewAccount(), adb) if err != nil { @@ -104,7 +104,9 @@ func TestAccountsDB_RetrieveDataWithSomeValuesShouldWork(t *testing.T) { //and then reloading the data trie based on the root hash generated before t.Parallel() - _, jaw, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + _, jaw, adb := generateAddressJurnalAccountAccountsDB(ti) jaw.SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) jaw.SaveKeyValue([]byte{68, 69, 70}, []byte{35, 36, 37}) @@ -131,7 +133,9 @@ func TestAccountsDB_RetrieveDataWithSomeValuesShouldWork(t *testing.T) { func TestAccountsDB_PutCodeWithSomeValuesShouldWork(t *testing.T) { t.Parallel() - _, jaw, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + _, jaw, adb := generateAddressJurnalAccountAccountsDB(ti) err := adb.PutCode(jaw, []byte("Smart contract code")) assert.Nil(t, err) @@ -150,7 +154,9 @@ func TestAccountsDB_PutCodeWithSomeValuesShouldWork(t *testing.T) { func TestAccountsDB_SaveDataNoDirtyShouldWork(t *testing.T) { t.Parallel() - _, jaw, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + _, jaw, adb := generateAddressJurnalAccountAccountsDB(ti) err := adb.SaveData(jaw) assert.Nil(t, err) @@ -160,7 +166,9 @@ func TestAccountsDB_SaveDataNoDirtyShouldWork(t *testing.T) { func TestAccountsDB_HasAccountNotFoundShouldRetFalse(t *testing.T) { t.Parallel() - adr, _, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + adr, _, adb := generateAddressJurnalAccountAccountsDB(ti) //should return false val, err := adb.HasAccount(adr) @@ -171,7 +179,9 @@ func TestAccountsDB_HasAccountNotFoundShouldRetFalse(t *testing.T) { func TestAccountsDB_HasAccountFoundShouldRetTrue(t *testing.T) { t.Parallel() - adr, _, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + adr, _, adb := generateAddressJurnalAccountAccountsDB(ti) _, err := adb.GetJournalizedAccount(adr) assert.Nil(t, err) @@ -184,7 +194,9 @@ func TestAccountsDB_HasAccountFoundShouldRetTrue(t *testing.T) { func TestAccountsDB_SaveAccountStateWithSomeValues_ShouldWork(t *testing.T) { t.Parallel() - _, jaw, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + _, jaw, adb := generateAddressJurnalAccountAccountsDB(ti) err := adb.SaveJournalizedAccount(jaw) assert.Nil(t, err) @@ -193,7 +205,9 @@ func TestAccountsDB_SaveAccountStateWithSomeValues_ShouldWork(t *testing.T) { func TestAccountsDB_GetJournalizedAccountReturnExistingAccntShouldWork(t *testing.T) { t.Parallel() - adr, jaw, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + adr, jaw, adb := generateAddressJurnalAccountAccountsDB(ti) err := jaw.SetBalanceWithJournal(big.NewInt(40)) assert.Nil(t, err) @@ -210,7 +224,9 @@ func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testin //test when the account does not exists t.Parallel() - adr, _, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + adr, _, adb := generateAddressJurnalAccountAccountsDB(ti) //same address of the unsaved account acnt, err := adb.GetJournalizedAccount(adr) @@ -224,10 +240,12 @@ func TestAccountsDB_Commit2OkAccountsShouldWork(t *testing.T) { //verifies that commit saves the new tries and that can be loaded back t.Parallel() - adr1, _, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + adr1, _, adb := generateAddressJurnalAccountAccountsDB(ti) buff := make([]byte, sha256.Sha256{}.Size()) rand.Read(buff) - adr2 := createDummyAddress() + adr2 := ti.createDummyAddress() //first account has the balance of 40 state1, err := adb.GetJournalizedAccount(adr1) @@ -277,7 +295,9 @@ func TestAccountsDB_Commit2OkAccountsShouldWork(t *testing.T) { func TestAccountsDB_CommitAccountDataShouldWork(t *testing.T) { t.Parallel() - adr1, _, adb := generateAddressJurnalAccountAccountsDB() + ti := &testInitializer{} + + adr1, _, adb := generateAddressJurnalAccountAccountsDB(ti) hrEmpty := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - empty: %v\n", hrEmpty) @@ -320,11 +340,13 @@ func TestAccountsDB_CommitAccountDataShouldWork(t *testing.T) { func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { t.Parallel() - adr1 := createDummyAddress() - adr2 := createDummyAddress() + ti := &testInitializer{} + + adr1 := ti.createDummyAddress() + adr2 := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) hrEmpty := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - empty: %v\n", hrEmpty) @@ -377,11 +399,13 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { t.Parallel() - adr1 := createDummyAddress() - adr2 := createDummyAddress() + ti := &testInitializer{} + + adr1 := ti.createDummyAddress() + adr2 := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) hrEmpty := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - empty: %v\n", hrEmpty) @@ -437,11 +461,13 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { //adr1 puts code hash + code inside trie. adr2 has the same code hash //revert should work - adr1 := createDummyAddress() - adr2 := createDummyAddress() + ti := &testInitializer{} + + adr1 := ti.createDummyAddress() + adr2 := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) hrEmpty := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - empty: %v\n", hrEmpty) @@ -493,11 +519,13 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { //adr1 puts data inside trie. adr2 puts the same data //revert should work - adr1 := createDummyAddress() - adr2 := createDummyAddress() + ti := &testInitializer{} + + adr1 := ti.createDummyAddress() + adr2 := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) hrEmpty := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - empty: %v\n", hrEmpty) @@ -554,11 +582,13 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test //adr1 puts data inside trie. adr2 puts the same data //revert should work - adr1 := createDummyAddress() - adr2 := createDummyAddress() + ti := &testInitializer{} + + adr1 := ti.createDummyAddress() + adr2 := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) hrEmpty := base64.StdEncoding.EncodeToString(adb.RootHash()) fmt.Printf("State root - empty: %v\n", hrEmpty) @@ -631,11 +661,13 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { t.Parallel() - adrSrc := createDummyAddress() - adrDest := createDummyAddress() + ti := &testInitializer{} + + adrSrc := ti.createDummyAddress() + adrDest := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) acntSrc, err := adb.GetJournalizedAccount(adrSrc) assert.Nil(t, err) @@ -678,11 +710,13 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { t.Parallel() - adrSrc := createDummyAddress() - adrDest := createDummyAddress() + ti := &testInitializer{} + + adrSrc := ti.createDummyAddress() + adrDest := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) acntSrc, err := adb.GetJournalizedAccount(adrSrc) assert.Nil(t, err) @@ -709,11 +743,13 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { t.Parallel() - adrSrc := createDummyAddress() - adrDest := createDummyAddress() + ti := &testInitializer{} + + adrSrc := ti.createDummyAddress() + adrDest := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) acntSrc, err := adb.GetJournalizedAccount(adrSrc) assert.Nil(t, err) @@ -745,11 +781,13 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { } func BenchmarkTxExecution(b *testing.B) { - adrSrc := createDummyAddress() - adrDest := createDummyAddress() + ti := &testInitializer{} + + adrSrc := ti.createDummyAddress() + adrDest := ti.createDummyAddress() //Step 1. create accounts objects - adb := adbCreateAccountsDB() + adb := adbCreateAccountsDB(ti) acntSrc, err := adb.GetJournalizedAccount(adrSrc) assert.Nil(b, err) diff --git a/integrationTests/state/common.go b/integrationTests/state/testInitializer.go similarity index 79% rename from integrationTests/state/common.go rename to integrationTests/state/testInitializer.go index 666b1ce6ba1..4c7986bc3ea 100644 --- a/integrationTests/state/common.go +++ b/integrationTests/state/testInitializer.go @@ -10,7 +10,10 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" ) -func createDummyAddress() state.AddressContainer { +type testInitializer struct { +} + +func (ti *testInitializer) createDummyAddress() state.AddressContainer { buff := make([]byte, sha256.Sha256{}.Size()) r := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -19,7 +22,7 @@ func createDummyAddress() state.AddressContainer { return state.NewAddress(buff) } -func createMemUnit() storage.Storer { +func (ti *testInitializer) createMemUnit() storage.Storer { cache, _ := storage.NewCache(storage.LRUCache, 10) persist, _ := memorydb.New() @@ -27,7 +30,7 @@ func createMemUnit() storage.Storer { return unit } -func createDummyHexAddress(chars int) string { +func (ti *testInitializer) createDummyHexAddress(chars int) string { if chars < 1 { return "" } diff --git a/integrationTests/transaction/interceptedBulkTxMem_test.go b/integrationTests/transaction/interceptedBulkTxMem_test.go deleted file mode 100644 index bfd23dd225e..00000000000 --- a/integrationTests/transaction/interceptedBulkTxMem_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package transaction - -import ( - "math/big" - "sync" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/data/state" - "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/stretchr/testify/assert" -) - -func TestNode_GenerateSendInterceptBulkTransactionsWithMemMessenger(t *testing.T) { - dPool := createTestDataPool() - - startingNonce := uint64(6) - - addrConverter, _ := state.NewPlainAddressConverter(32, "0x") - accntAdapter := adbCreateAccountsDB() - - //TODO change when injecting a messenger is possible - n, _, sk, _ := createMemNode(1, dPool, accntAdapter) - - n.Start() - defer func() { _ = n.Stop() }() - - defer p2p.ReInitializeGloballyRegisteredPeers() - - //set the account's nonce to startingNonce - nodePubKeyBytes, _ := sk.GeneratePublic().ToByteArray() - nodeAddress, _ := addrConverter.CreateAddressFromPublicKeyBytes(nodePubKeyBytes) - nodeAccount, _ := accntAdapter.GetJournalizedAccount(nodeAddress) - nodeAccount.SetNonceWithJournal(startingNonce) - accntAdapter.Commit() - - noOfTx := 50 - - wg := sync.WaitGroup{} - wg.Add(noOfTx) - - chanDone := make(chan bool) - - go func() { - wg.Wait() - - chanDone <- true - }() - - mut := sync.Mutex{} - txHashes := make([][]byte, 0) - - //wire up handler - dPool.Transactions().RegisterHandler(func(key []byte) { - mut.Lock() - defer mut.Unlock() - - txHashes = append(txHashes, key) - wg.Done() - }) - - err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(time.Second * 3): - assert.Fail(t, "timeout") - return - } - - assert.Equal(t, noOfTx, len(txHashes)) - - bitmap := make([]bool, noOfTx+int(startingNonce)) - //set for each nonce from found tx a true flag in bitmap - for i := 0; i < noOfTx; i++ { - tx, _ := dPool.Transactions().ShardDataStore(0).Get(txHashes[i]) - - assert.NotNil(t, tx) - bitmap[tx.(*transaction.Transaction).Nonce] = true - } - - //for the first startingNonce values, the bitmap should be false - //for the rest, true - for i := 0; i < noOfTx+int(startingNonce); i++ { - if i < int(startingNonce) { - assert.False(t, bitmap[i]) - continue - } - - assert.True(t, bitmap[i]) - } -} diff --git a/integrationTests/transaction/interceptedBulkTxNet_test.go b/integrationTests/transaction/interceptedBulkTx_test.go similarity index 85% rename from integrationTests/transaction/interceptedBulkTxNet_test.go rename to integrationTests/transaction/interceptedBulkTx_test.go index 9c0182a928f..5bdaf0a70a6 100644 --- a/integrationTests/transaction/interceptedBulkTxNet_test.go +++ b/integrationTests/transaction/interceptedBulkTx_test.go @@ -13,18 +13,21 @@ import ( "github.com/stretchr/testify/assert" ) -func TestNode_GenerateSendInterceptBulkTransactionsWithNetMessenger(t *testing.T) { - t.Skip("TODO: fix tests that run on the same local network") +func TestNode_GenerateSendInterceptBulkTransactionsWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + ti := &testInitializer{} - dPool := createTestDataPool() + dPool := ti.createTestDataPool() startingNonce := uint64(6) addrConverter, _ := state.NewPlainAddressConverter(32, "0x") - accntAdapter := adbCreateAccountsDB() + accntAdapter := ti.createAccountsDB() - //TODO change when injecting a messenger is possible - n, _, sk := createNetNode(4000, dPool, accntAdapter) + n, _, sk, _ := ti.createNetNode(4000, dPool, accntAdapter) n.Start() defer n.Stop() @@ -40,7 +43,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithNetMessenger(t *testing.T nodeAccount.SetNonceWithJournal(startingNonce) accntAdapter.Commit() - noOfTx := 100000 + noOfTx := 10000 time.Sleep(time.Second) @@ -78,7 +81,7 @@ func TestNode_GenerateSendInterceptBulkTransactionsWithNetMessenger(t *testing.T wg.Done() }) - err := n.GenerateAndSendBulkTransactions(createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) + err := n.GenerateAndSendBulkTransactions(ti.createDummyHexAddress(64), big.NewInt(1), uint64(noOfTx)) assert.Nil(t, err) diff --git a/integrationTests/transaction/interceptedResolvedTxMem_test.go b/integrationTests/transaction/interceptedResolvedTx_test.go similarity index 70% rename from integrationTests/transaction/interceptedResolvedTxMem_test.go rename to integrationTests/transaction/interceptedResolvedTx_test.go index 5fd8636b40e..dd61ca674bf 100644 --- a/integrationTests/transaction/interceptedResolvedTxMem_test.go +++ b/integrationTests/transaction/interceptedResolvedTx_test.go @@ -11,21 +11,27 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - transaction2 "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" "github.com/stretchr/testify/assert" ) -func TestNode_RequestInterceptTransactionWithMemMessenger(t *testing.T) { +func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + ti := &testInitializer{} + hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} - dPoolRequestor := createTestDataPool() - dPoolResolver := createTestDataPool() + dPoolRequestor := ti.createTestDataPool() + dPoolResolver := ti.createTestDataPool() - nRequestor, _, sk1, pf := createMemNode(1, dPoolRequestor, adbCreateAccountsDB()) - nResolver, _, _, _ := createMemNode(2, dPoolResolver, adbCreateAccountsDB()) + fmt.Println("Requestor:") + nRequestor, mesRequestor, sk1, pf := ti.createNetNode(4000, dPoolRequestor, ti.createAccountsDB()) + fmt.Println("Resolver:") + nResolver, mesResolver, _, _ := ti.createNetNode(4001, dPoolResolver, ti.createAccountsDB()) nRequestor.Start() nResolver.Start() @@ -34,7 +40,10 @@ func TestNode_RequestInterceptTransactionWithMemMessenger(t *testing.T) { _ = nResolver.Stop() }() - defer p2p.ReInitializeGloballyRegisteredPeers() + //connect messengers together + time.Sleep(time.Second) + err := mesRequestor.ConnectToPeer(ti.getConnectableAddress(mesResolver)) + assert.Nil(t, err) time.Sleep(time.Second) @@ -78,9 +87,8 @@ func TestNode_RequestInterceptTransactionWithMemMessenger(t *testing.T) { dPoolResolver.Transactions().AddData(txHash, &tx, 0) //Step 4. request tx - res, _ := pf.ResolverContainer().Get(string(factory.TransactionTopic)) - txResolver := res.(*transaction2.TxResolver) - err := txResolver.RequestTransactionFromHash(txHash) + txResolver, _ := pf.ResolverContainer().Get(string(factory.TransactionTopic)) + err = txResolver.RequestDataFromHash(txHash) assert.Nil(t, err) select { diff --git a/integrationTests/transaction/interceptedTxMem_test.go b/integrationTests/transaction/interceptedTxMem_test.go deleted file mode 100644 index 0ad98f81569..00000000000 --- a/integrationTests/transaction/interceptedTxMem_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package transaction - -import ( - "encoding/hex" - "fmt" - "math/big" - "reflect" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/crypto/signing/kv2/singlesig" - "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/stretchr/testify/assert" -) - -func TestNode_GenerateSendInterceptTransactionWithMemMessenger(t *testing.T) { - hasher := sha256.Sha256{} - marshalizer := &marshal.JsonMarshalizer{} - - dPool := createTestDataPool() - - n, _, sk, _ := createMemNode(1, dPool, adbCreateAccountsDB()) - - n.Start() - defer func() { _ = n.Stop() }() - - defer p2p.ReInitializeGloballyRegisteredPeers() - - pkBuff, _ := sk.GeneratePublic().ToByteArray() - - //Step 1. Generate a transaction - tx := transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: hasher.Compute("receiver"), - SndAddr: pkBuff, - Data: []byte("tx notarized data"), - } - - //Step 2. Sign transaction - txBuff, _ := marshalizer.Marshal(&tx) - signer := &singlesig.SchnorrSigner{} - tx.Signature, _ = signer.Sign(sk, txBuff) - - signedTxBuff, _ := marshalizer.Marshal(&tx) - - fmt.Printf("Transaction: %v\n%v\n", tx, string(signedTxBuff)) - - chanDone := make(chan bool) - - //step 3. wire up a received handler - dPool.Transactions().RegisterHandler(func(key []byte) { - txStored, _ := dPool.Transactions().ShardDataStore(0).Get(key) - - if reflect.DeepEqual(txStored, &tx) && tx.Signature != nil { - chanDone <- true - } - - assert.Equal(t, txStored, &tx) - - }) - - //Step 4. Job Tx - _, err := n.SendTransaction(tx.Nonce, hex.EncodeToString(tx.SndAddr), hex.EncodeToString(tx.RcvAddr), - tx.Value, string(tx.Data), tx.Signature) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(time.Second * 3): - assert.Fail(t, "timeout") - } -} diff --git a/integrationTests/transaction/common.go b/integrationTests/transaction/testInitializer.go similarity index 56% rename from integrationTests/transaction/common.go rename to integrationTests/transaction/testInitializer.go index 785dd1d344f..654b4da16e8 100644 --- a/integrationTests/transaction/common.go +++ b/integrationTests/transaction/testInitializer.go @@ -2,7 +2,10 @@ package transaction import ( "context" + "crypto/ecdsa" + "fmt" "math/rand" + "strings" "time" "github.com/ElrondNetwork/elrond-go-sandbox/crypto" @@ -22,16 +25,22 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/containers" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" "github.com/ElrondNetwork/elrond-go-sandbox/storage" "github.com/ElrondNetwork/elrond-go-sandbox/storage/memorydb" + "github.com/btcsuite/btcd/btcec" + crypto2 "github.com/libp2p/go-libp2p-crypto" ) -func createTestBlockChain() *blockchain.BlockChain { +type testInitializer struct { +} + +func (ti *testInitializer) createTestBlockChain() *blockchain.BlockChain { cfgCache := storage.CacheConfig{Size: 100, Type: storage.LRUCache} @@ -39,16 +48,16 @@ func createTestBlockChain() *blockchain.BlockChain { blockChain, _ := blockchain.NewBlockChain( badBlockCache, - createMemUnit(), - createMemUnit(), - createMemUnit(), - createMemUnit(), - createMemUnit()) + ti.createMemUnit(), + ti.createMemUnit(), + ti.createMemUnit(), + ti.createMemUnit(), + ti.createMemUnit()) return blockChain } -func createMemUnit() storage.Storer { +func (ti *testInitializer) createMemUnit() storage.Storer { cache, _ := storage.NewCache(storage.LRUCache, 10) persist, _ := memorydb.New() @@ -56,7 +65,7 @@ func createMemUnit() storage.Storer { return unit } -func createTestDataPool() data.TransientDataHolder { +func (ti *testInitializer) createTestDataPool() data.TransientDataHolder { txPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100000, Type: storage.LRUCache}) hdrPool, _ := shardedData.NewShardedData(storage.CacheConfig{Size: 100000, Type: storage.LRUCache}) @@ -85,7 +94,7 @@ func createTestDataPool() data.TransientDataHolder { return dPool } -func createDummyHexAddress(chars int) string { +func (ti *testInitializer) createDummyHexAddress(chars int) string { if chars < 1 { return "" } @@ -102,17 +111,17 @@ func createDummyHexAddress(chars int) string { return string(buff) } -func adbCreateAccountsDB() *state.AccountsDB { +func (ti *testInitializer) createAccountsDB() *state.AccountsDB { marsh := &marshal.JsonMarshalizer{} - dbw, _ := trie.NewDBWriteCache(createMemUnit()) + dbw, _ := trie.NewDBWriteCache(ti.createMemUnit()) tr, _ := trie.NewTrie(make([]byte, 32), dbw, sha256.Sha256{}) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marsh) return adb } -func createMultiSigner( +func (ti *testInitializer) createMultiSigner( privateKey crypto.PrivateKey, publicKey crypto.PublicKey, keyGen crypto.KeyGenerator, @@ -127,17 +136,16 @@ func createMultiSigner( return multiSigner, err } -func createMemNode(port int, dPool data.TransientDataHolder, accntAdapter state.AccountsAdapter) ( +func (ti *testInitializer) createNetNode(port int, dPool data.TransientDataHolder, accntAdapter state.AccountsAdapter) ( *node.Node, p2p.Messenger, crypto.PrivateKey, - process.ProcessorFactory) { + process.InterceptorsResolversFactory) { hasher := sha256.Sha256{} marshalizer := &marshal.JsonMarshalizer{} - cp, _ := p2p.NewConnectParamsFromPort(port) - mes, _ := p2p.NewMemMessenger(marshalizer, hasher, cp) + messenger := ti.createMessenger(context.Background(), port) addrConverter, _ := state.NewPlainAddressConverter(32, "0x") @@ -145,29 +153,29 @@ func createMemNode(port int, dPool data.TransientDataHolder, accntAdapter state. singleSigner := &singlesig.SchnorrSigner{} keyGen := signing.NewKeyGenerator(suite) sk, pk := keyGen.GeneratePair() - multiSigner, _ := createMultiSigner(sk, pk, keyGen, hasher) - blockChain := createTestBlockChain() + multiSigner, _ := ti.createMultiSigner(sk, pk, keyGen, hasher) + blkc := ti.createTestBlockChain() shardCoordinator := &sharding.OneShardCoordinator{} uint64Converter := uint64ByteSlice.NewBigEndianConverter() - pFactory, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ - InterceptorContainer: interceptor.NewContainer(), - ResolverContainer: resolver.NewContainer(), - Messenger: mes, - Blockchain: blockChain, - DataPool: dPool, - ShardCoordinator: shardCoordinator, - AddrConverter: addrConverter, - Hasher: hasher, - Marshalizer: marshalizer, - MultiSigner: multiSigner, - SingleSigner: singleSigner, - KeyGen: keyGen, + pFactory, _ := factory.NewInterceptorsResolversCreator(factory.InterceptorsResolversConfig{ + InterceptorContainer: containers.NewObjectsContainer(), + ResolverContainer: containers.NewResolversContainer(), + Messenger: messenger, + Blockchain: blkc, + DataPool: dPool, + ShardCoordinator: shardCoordinator, + AddrConverter: addrConverter, + Hasher: hasher, + Marshalizer: marshalizer, + MultiSigner: multiSigner, + SingleSigner: singleSigner, + KeyGen: keyGen, Uint64ByteSliceConverter: uint64Converter, }) n, _ := node.NewNode( - node.WithMessenger(mes), + node.WithMessenger(messenger), node.WithMarshalizer(marshalizer), node.WithHasher(hasher), node.WithContext(context.Background()), @@ -176,86 +184,49 @@ func createMemNode(port int, dPool data.TransientDataHolder, accntAdapter state. node.WithAccountsAdapter(accntAdapter), node.WithKeyGenerator(keyGen), node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(blockChain), + node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultisig(multiSigner), node.WithSinglesig(singleSigner), node.WithPrivateKey(sk), node.WithPublicKey(pk), - node.WithProcessorCreator(pFactory), + node.WithInterceptorsResolversFactory(pFactory), ) _ = pFactory.CreateInterceptors() _ = pFactory.CreateResolvers() - return n, mes, sk, pFactory + return n, messenger, sk, pFactory } -func createNetNode(port int, dPool data.TransientDataHolder, accntAdapter state.AccountsAdapter) ( - *node.Node, - p2p.Messenger, - crypto.PrivateKey) { - - hasher := sha256.Sha256{} - marshalizer := &marshal.JsonMarshalizer{} - - messenger := createMessenger(context.Background(), marshalizer, hasher, 4, port) - - addrConverter, _ := state.NewPlainAddressConverter(32, "0x") - - suite := kv2.NewBlakeSHA256Ed25519() - singlesigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - multiSigner, _ := createMultiSigner(sk, pk, keyGen, hasher) - blkc := createTestBlockChain() - shardCoordinator := &sharding.OneShardCoordinator{} - uint64Converter := uint64ByteSlice.NewBigEndianConverter() - - pFactory, _ := factory.NewProcessorsCreator(factory.ProcessorsCreatorConfig{ - InterceptorContainer: interceptor.NewContainer(), - ResolverContainer: resolver.NewContainer(), - Messenger: messenger, - Blockchain: blkc, - DataPool: dPool, - ShardCoordinator: shardCoordinator, - AddrConverter: addrConverter, - Hasher: hasher, - Marshalizer: marshalizer, - MultiSigner: multiSigner, - SingleSigner: singlesigner, - KeyGen: keyGen, - Uint64ByteSliceConverter: uint64Converter, - }) - - n, _ := node.NewNode( - node.WithMessenger(messenger), - node.WithMarshalizer(marshalizer), - node.WithHasher(hasher), - node.WithContext(context.Background()), - node.WithDataPool(dPool), - node.WithAddressConverter(addrConverter), - node.WithAccountsAdapter(accntAdapter), - node.WithKeyGenerator(keyGen), - node.WithShardCoordinator(shardCoordinator), - node.WithBlockChain(blkc), - node.WithUint64ByteSliceConverter(uint64Converter), - node.WithMultisig(multiSigner), - node.WithSinglesig(singlesigner), - node.WithPrivateKey(sk), - node.WithPublicKey(pk), - node.WithProcessorCreator(pFactory), - ) +func (ti *testInitializer) createMessenger(ctx context.Context, port int) p2p.Messenger { + r := rand.New(rand.NewSource(int64(port))) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*crypto2.Secp256k1PrivateKey)(prvKey) + + libP2PMes, err := libp2p.NewNetworkMessenger( + ctx, + port, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff) + + if err != nil { + fmt.Println(err.Error()) + } - return n, nil, sk + return libP2PMes } -func createMessenger(ctx context.Context, marshalizer marshal.Marshalizer, hasher hashing.Hasher, maxAllowedPeers int, port int) p2p.Messenger { - cp := &p2p.ConnectParams{} - cp.Port = port - cp.GeneratePrivPubKeys(time.Now().UnixNano()) - cp.GenerateIDFromPubKey() +func (ti *testInitializer) getConnectableAddress(mes p2p.Messenger) string { + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") { + continue + } + + return addr + } - nm, _ := p2p.NewNetMessenger(ctx, marshalizer, hasher, cp, maxAllowedPeers, p2p.GossipSub) - return nm + return "" } diff --git a/node/defineOptions.go b/node/defineOptions.go index 2130cc88702..4f570d745b9 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -274,13 +274,13 @@ func WithForkDetector(forkDetector process.ForkDetector) Option { } } -// WithProcessorCreator sets up the processor factory option for the Node -func WithProcessorCreator(processorCreator process.ProcessorFactory) Option { +// WithInterceptorsResolversFactory sets up the processor factory option for the Node +func WithInterceptorsResolversFactory(interceptorsResolversCreator process.InterceptorsResolversFactory) Option { return func(n *Node) error { - if processorCreator == nil { - return ErrNilForkDetector + if interceptorsResolversCreator == nil { + return ErrNilInterceptorsResolversFactory } - n.processorCreator = processorCreator + n.interceptorsResolversCreator = interceptorsResolversCreator return nil } } diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 90be1615cc7..2a21ebfeee6 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -11,6 +11,32 @@ import ( "github.com/stretchr/testify/assert" ) +func TestWithMessenger_NilMessengerShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithMessenger(nil) + err := opt(node) + + assert.Nil(t, node.messenger) + assert.Equal(t, ErrNilMessenger, err) +} + +func TestWithMessenger_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + messenger := &mock.MessengerStub{} + + opt := WithMessenger(messenger) + err := opt(node) + + assert.True(t, node.messenger == messenger) + assert.Nil(t, err) +} + func TestWithMarshalizer_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() @@ -580,7 +606,7 @@ func TestWithMultisig_ShouldWork(t *testing.T) { assert.Nil(t, err) } -func TestWithForkDetector_shouldWork(t *testing.T) { +func TestWithForkDetector_ShouldWork(t *testing.T) { t.Parallel() node, _ := NewNode() @@ -604,3 +630,29 @@ func TestWithForkDetector_NilForkDetectorShouldErr(t *testing.T) { assert.Nil(t, node.forkDetector) assert.Equal(t, ErrNilForkDetector, err) } + +func TestWithInterceptorsResolversFactory_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + interceptorsResolvers := &mock.InterceptorsResolversFactoryStub{} + opt := WithInterceptorsResolversFactory(interceptorsResolvers) + + err := opt(node) + + assert.True(t, node.interceptorsResolversCreator == interceptorsResolvers) + assert.Nil(t, err) +} + +func TestWithInterceptorsResolversFactory_NilFactoryShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithInterceptorsResolversFactory(nil) + err := opt(node) + + assert.Nil(t, node.interceptorsResolversCreator) + assert.Equal(t, ErrNilInterceptorsResolversFactory, err) +} diff --git a/node/errors.go b/node/errors.go index f9e0189ba01..174448d1f2e 100644 --- a/node/errors.go +++ b/node/errors.go @@ -66,3 +66,9 @@ var ErrNilMultiSig = errors.New("trying to set nil multisig") // ErrNilForkDetector signals that a nil forkdetector object has been provided var ErrNilForkDetector = errors.New("nil fork detector") + +// ErrValidatorAlreadySet signals that a topic validator has already been set +var ErrValidatorAlreadySet = errors.New("topic validator has already been set") + +// ErrNilInterceptorsResolversFactory signals that a nil interceptors resolvers factory has been provided +var ErrNilInterceptorsResolversFactory = errors.New("nil interceptors resolvers factory") diff --git a/node/mock/interceptorsResolversFactoryStub.go b/node/mock/interceptorsResolversFactoryStub.go new file mode 100644 index 00000000000..849dccb02a4 --- /dev/null +++ b/node/mock/interceptorsResolversFactoryStub.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +type InterceptorsResolversFactoryStub struct { + CreateInterceptorsCalled func() error + CreateResolversCalled func() error + InterceptorContainerCalled func() process.Container + ResolverContainerCalled func() process.ResolversContainer +} + +func (irfs *InterceptorsResolversFactoryStub) CreateInterceptors() error { + return irfs.CreateInterceptorsCalled() +} + +func (irfs *InterceptorsResolversFactoryStub) CreateResolvers() error { + return irfs.CreateResolversCalled() +} + +func (irfs *InterceptorsResolversFactoryStub) InterceptorContainer() process.Container { + return irfs.InterceptorContainerCalled() +} + +func (irfs *InterceptorsResolversFactoryStub) ResolverContainer() process.ResolversContainer { + return irfs.ResolverContainerCalled() +} diff --git a/node/mock/messengerStub.go b/node/mock/messengerStub.go index 34a3b6f60e3..b454237751c 100644 --- a/node/mock/messengerStub.go +++ b/node/mock/messengerStub.go @@ -1,89 +1,103 @@ package mock import ( - "context" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/multiformats/go-multiaddr" ) type MessengerStub struct { - marshalizer marshal.Marshalizer - HasherObj hashing.Hasher - CloseCalled func() error - AddTopicCalled func(t *p2p.Topic) error - GetTopicCalled func(name string) *p2p.Topic - BootstrapCalled func(ctx context.Context) + CloseCalled func() error + IDCalled func() p2p.PeerID + PeersCalled func() []p2p.PeerID + AddressesCalled func() []string + ConnectToPeerCalled func(address string) error + KadDhtDiscoverNewPeersCalled func() error + TrimConnectionsCalled func() + IsConnectedCalled func(peerID p2p.PeerID) bool + ConnectedPeersCalled func() []p2p.PeerID + CreateTopicCalled func(name string, createPipeForTopic bool) error + HasTopicCalled func(name string) bool + HasTopicValidatorCalled func(name string) bool + BroadcastOnPipeCalled func(pipe string, topic string, buff []byte) + BroadcastCalled func(topic string, buff []byte) + RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error + UnregisterMessageProcessorCalled func(topic string) error + SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error + OutgoingPipeLoadBalancerCalled func() p2p.PipeLoadBalancer + BootstrapCalled func() error +} + +func (ms *MessengerStub) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { + return ms.RegisterMessageProcessorCalled(topic, handler) +} + +func (ms *MessengerStub) UnregisterMessageProcessor(topic string) error { + return ms.UnregisterMessageProcessorCalled(topic) } -func NewMessengerStub() *MessengerStub { - return &MessengerStub{ - marshalizer: &MarshalizerMock{}, - HasherObj: HasherMock{}, - } +func (ms *MessengerStub) Broadcast(topic string, buff []byte) { + ms.BroadcastCalled(topic, buff) +} + +func (ms *MessengerStub) OutgoingPipeLoadBalancer() p2p.PipeLoadBalancer { + return ms.OutgoingPipeLoadBalancerCalled() } func (ms *MessengerStub) Close() error { return ms.CloseCalled() } -func (ms *MessengerStub) ID() peer.ID { - panic("implement me") +func (ms *MessengerStub) ID() p2p.PeerID { + return ms.IDCalled() } -func (ms *MessengerStub) Peers() []peer.ID { - panic("implement me") +func (ms *MessengerStub) Peers() []p2p.PeerID { + return ms.PeersCalled() } -func (ms *MessengerStub) Conns() []net.Conn { - panic("implement me") +func (ms *MessengerStub) Addresses() []string { + return ms.AddressesCalled() } -func (ms *MessengerStub) Marshalizer() marshal.Marshalizer { - return ms.marshalizer +func (ms *MessengerStub) ConnectToPeer(address string) error { + return ms.ConnectToPeerCalled(address) } -func (ms *MessengerStub) Hasher() hashing.Hasher { - return ms.HasherObj +func (ms *MessengerStub) KadDhtDiscoverNewPeers() error { + return ms.KadDhtDiscoverNewPeersCalled() } -func (ms *MessengerStub) RouteTable() *p2p.RoutingTable { - panic("implement me") +func (ms *MessengerStub) TrimConnections() { + ms.TrimConnectionsCalled() } -func (ms *MessengerStub) Addresses() []string { - panic("implement me") +func (ms *MessengerStub) IsConnected(peerID p2p.PeerID) bool { + return ms.IsConnectedCalled(peerID) } -func (ms *MessengerStub) ConnectToAddresses(ctx context.Context, addresses []string) { - panic("implement me") +func (ms *MessengerStub) ConnectedPeers() []p2p.PeerID { + return ms.ConnectedPeersCalled() } -func (ms *MessengerStub) Bootstrap(ctx context.Context) { - ms.BootstrapCalled(ctx) +func (ms *MessengerStub) CreateTopic(name string, createPipeForTopic bool) error { + return ms.CreateTopicCalled(name, createPipeForTopic) } -func (ms *MessengerStub) PrintConnected() { - panic("implement me") +func (ms *MessengerStub) HasTopic(name string) bool { + return ms.HasTopicCalled(name) } -func (ms *MessengerStub) AddAddress(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) { - panic("implement me") +func (ms *MessengerStub) HasTopicValidator(name string) bool { + return ms.HasTopicValidatorCalled(name) } -func (ms *MessengerStub) Connectedness(pid peer.ID) net.Connectedness { - panic("implement me") +func (ms *MessengerStub) BroadcastOnPipe(pipe string, topic string, buff []byte) { + ms.BroadcastOnPipeCalled(pipe, topic, buff) } -func (ms *MessengerStub) GetTopic(topicName string) *p2p.Topic { - return ms.GetTopicCalled(topicName) +func (ms *MessengerStub) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { + return ms.SendToConnectedPeerCalled(topic, buff, peerID) } -func (ms *MessengerStub) AddTopic(t *p2p.Topic) error { - return ms.AddTopicCalled(t) +func (ms *MessengerStub) Bootstrap() error { + return ms.BootstrapCalled() } diff --git a/node/mock/processorCreatorMock.go b/node/mock/processorCreatorMock.go deleted file mode 100644 index 2e10de6f924..00000000000 --- a/node/mock/processorCreatorMock.go +++ /dev/null @@ -1,33 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/process" -) - -// ProcessorCreatorMock is a mock implementation of ProcessorFactory -type ProcessorCreatorMock struct { - CreateInterceptorsCalled func() error - CreateResolversCalled func() error - InterceptorContainerCalled func() process.InterceptorContainer - ResolverContainerCalled func() process.ResolverContainer -} - -// CreateInterceptors is a mock function for creating interceptors -func (p *ProcessorCreatorMock) CreateInterceptors() error { - return p.CreateInterceptorsCalled() -} - -// CreateResolvers is a mock function for creating resolvers -func (p *ProcessorCreatorMock) CreateResolvers() error { - return p.CreateResolversCalled() -} - -// InterceptorContainer is a mock getter for the interceptor container -func (p *ProcessorCreatorMock) InterceptorContainer() process.InterceptorContainer { - return p.InterceptorContainerCalled() -} - -// ResolverContainer is a mock getter for the resolver container -func (p *ProcessorCreatorMock) ResolverContainer() process.ResolverContainer { - return p.ResolverContainerCalled() -} diff --git a/node/mock/stringCreatorMock.go b/node/mock/stringCreatorMock.go deleted file mode 100644 index c9cb525a68c..00000000000 --- a/node/mock/stringCreatorMock.go +++ /dev/null @@ -1,17 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" -) - -type StringCreatorMock struct { - Data string -} - -func (sn *StringCreatorMock) ID() string { - return sn.Data -} - -func (sn *StringCreatorMock) Create() p2p.Creator { - return &StringCreatorMock{} -} diff --git a/node/node.go b/node/node.go index 41dca911e2e..05dd5bfcffe 100644 --- a/node/node.go +++ b/node/node.go @@ -2,7 +2,6 @@ package node import ( "context" - "encoding/base64" "fmt" "math/big" gosync "sync" @@ -28,7 +27,6 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/ntp" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" - block2 "github.com/ElrondNetwork/elrond-go-sandbox/process/block" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" @@ -41,6 +39,9 @@ const WaitTime = time.Duration(2000 * time.Millisecond) // ConsensusTopic is the topic used in consensus algorithm const ConsensusTopic topicName = "consensus" +// SendTransactionsPipe is the pipe used for sending new transactions +const SendTransactionsPipe = "send transactions pipe" + type topicName string var log = logger.NewDefaultLogger() @@ -52,22 +53,22 @@ type Option func(*Node) error // Node is a structure that passes the configuration parameters and initializes // required services as requested type Node struct { - marshalizer marshal.Marshalizer - ctx context.Context - hasher hashing.Hasher - initialNodesPubkeys []string - initialNodesBalances map[string]*big.Int - roundDuration uint64 - consensusGroupSize int - messenger p2p.Messenger - syncer ntp.SyncTimer - blockProcessor process.BlockProcessor - genesisTime time.Time - elasticSubrounds bool - accounts state.AccountsAdapter - addrConverter state.AddressConverter - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - processorCreator process.ProcessorFactory + marshalizer marshal.Marshalizer + ctx context.Context + hasher hashing.Hasher + initialNodesPubkeys []string + initialNodesBalances map[string]*big.Int + roundDuration uint64 + consensusGroupSize int + messenger p2p.Messenger + syncer ntp.SyncTimer + blockProcessor process.BlockProcessor + genesisTime time.Time + elasticSubrounds bool + accounts state.AccountsAdapter + addrConverter state.AddressConverter + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + interceptorsResolversCreator process.InterceptorsResolversFactory privateKey crypto.PrivateKey publicKey crypto.PublicKey @@ -144,8 +145,8 @@ func (n *Node) P2PBootstrap() error { if n.messenger == nil { return ErrNilMessenger } - n.messenger.Bootstrap(n.ctx) - return nil + + return n.messenger.Bootstrap() } // CreateShardedStores instantiate sharded cachers for Transactions and Headers @@ -225,7 +226,11 @@ func (n *Node) StartConsensus() error { n.shardCoordinator, n.singlesig, ) + if err != nil { + return err + } + err = n.createConsensusTopic(worker) if err != nil { return err } @@ -266,19 +271,6 @@ func (n *Node) StartConsensus() error { return err } - receivedMessage := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - worker.ReceivedMessage(name, data, msgInfo) - } - - topic := p2p.NewTopic(string(ConsensusTopic), &spos.ConsensusMessage{}, n.marshalizer) - topic.AddDataReceived(receivedMessage) - - err = n.messenger.AddTopic(topic) - - if err != nil { - return err - } - go chronologyHandler.StartRounds() return nil @@ -392,18 +384,16 @@ func (n *Node) GenerateAndSendBulkTransactions(receiverHex string, value *big.In return errFound } - topic := n.messenger.GetTopic(string(factory.TransactionTopic)) - if topic == nil { - return errors.New("could not get transaction topic") - } - if len(transactions) != int(noOfTx) { return errors.New(fmt.Sprintf("generated only %d from required %d transactions", len(transactions), noOfTx)) } for i := 0; i < len(transactions); i++ { - err = topic.BroadcastBuff(transactions[i]) - time.Sleep(time.Microsecond * 100) + n.messenger.BroadcastOnPipe( + SendTransactionsPipe, + string(factory.TransactionTopic), + transactions[i], + ) if err != nil { return errors.New("could not broadcast transaction: " + err.Error()) @@ -438,57 +428,28 @@ func (n *Node) createChronologyHandler(rounder consensus.Rounder) (consensus.Chr return chr, nil } -func (n *Node) createBootstraper(rounder consensus.Rounder) (process.Bootstraper, error) { - bootstrap, err := sync.NewBootstrap(n.dataPool, n.blkc, rounder, n.blockProcessor, WaitTime, n.hasher, n.marshalizer, n.forkDetector) +func (n *Node) createBootstraper(rounder consensus.Rounder) (process.Bootstrapper, error) { + bootstrap, err := sync.NewBootstrap( + n.dataPool, + n.blkc, + rounder, + n.blockProcessor, + WaitTime, + n.hasher, + n.marshalizer, + n.forkDetector, + n.interceptorsResolversCreator.ResolverContainer(), + ) if err != nil { return nil, err } - resH, err := n.processorCreator.ResolverContainer().Get(string(factory.HeadersTopic)) - if err != nil { - return nil, errors.New("cannot find headers topic resolver") - } - hdrRes := resH.(*block2.HeaderResolver) - - resT, err := n.processorCreator.ResolverContainer().Get(string(factory.TxBlockBodyTopic)) - if err != nil { - return nil, errors.New("cannot find tx block body topic resolver") - - } - gbbrRes := resT.(*block2.GenericBlockBodyResolver) - - bootstrap.RequestHeaderHandler = createRequestHeaderHandler(hdrRes) - bootstrap.RequestTxBodyHandler = cerateRequestTxBodyHandler(gbbrRes) - bootstrap.StartSync() return bootstrap, nil } -func createRequestHeaderHandler(hdrRes *block2.HeaderResolver) func(nonce uint64) { - return func(nonce uint64) { - err := hdrRes.RequestHeaderFromNonce(nonce) - - log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) - if err != nil { - log.Error("RequestHeaderFromNonce error: ", err.Error()) - } - } -} - -func cerateRequestTxBodyHandler(gbbrRes *block2.GenericBlockBodyResolver) func(hash []byte) { - return func(hash []byte) { - err := gbbrRes.RequestBlockBodyFromHash(hash) - - log.Info(fmt.Sprintf("requested tx body with hash %s from network\n", toB64(hash))) - if err != nil { - log.Error("RequestBlockBodyFromHash error: ", err.Error()) - return - } - } -} - // createConsensusState method creates a consensusState object func (n *Node) createConsensusState() (*spos.ConsensusState, error) { selfId, err := n.publicKey.ToByteArray() @@ -546,6 +507,22 @@ func (n *Node) createValidatorGroupSelector() (consensus.ValidatorGroupSelector, return validatorGroupSelector, nil } +// createConsensusTopic creates a consensus topic for node +func (n *Node) createConsensusTopic(messageProcessor p2p.MessageProcessor) error { + if n.messenger.HasTopicValidator(string(ConsensusTopic)) { + return ErrValidatorAlreadySet + } + + if !n.messenger.HasTopic(string(ConsensusTopic)) { + err := n.messenger.CreateTopic(string(ConsensusTopic), true) + if err != nil { + return err + } + } + + return n.messenger.RegisterMessageProcessor(string(ConsensusTopic), messageProcessor) +} + func (n *Node) generateAndSignTx( nonce uint64, value *big.Int, @@ -653,21 +630,17 @@ func (n *Node) SendTransaction( Signature: signature, } - topic := n.messenger.GetTopic(string(factory.TransactionTopic)) - - if topic == nil { - return nil, errors.New("could not get transaction topic") - } - marshalizedTx, err := n.marshalizer.Marshal(&tx) if err != nil { return nil, errors.New("could not marshal transaction") } - err = topic.BroadcastBuff(marshalizedTx) - if err != nil { - return nil, errors.New("could not broadcast transaction: " + err.Error()) - } + n.messenger.BroadcastOnPipe( + SendTransactionsPipe, + string(factory.TransactionTopic), + marshalizedTx, + ) + return &tx, nil } @@ -734,53 +707,26 @@ func (n *Node) createGenesisBlock() (*block.Header, []byte, error) { } func (n *Node) sendMessage(cnsDta *spos.ConsensusMessage) { - topic := n.messenger.GetTopic(string(ConsensusTopic)) - - if topic == nil { - log.Debug(fmt.Sprintf("could not get consensus topic")) + cnsDtaBuff, err := n.marshalizer.Marshal(cnsDta) + if err != nil { + log.Debug(err.Error()) return } - err := topic.Broadcast(cnsDta) - - if err != nil { - log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) - } + n.messenger.Broadcast( + string(ConsensusTopic), + cnsDtaBuff) } func (n *Node) broadcastBlockBody(msg []byte) { - topic := n.messenger.GetTopic(string(factory.TxBlockBodyTopic)) - - if topic == nil { - log.Debug(fmt.Sprintf("could not get tx block body topic")) - return - } - - err := topic.BroadcastBuff(msg) - - if err != nil { - log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) - } + n.messenger.Broadcast( + string(factory.TxBlockBodyTopic), + msg) } func (n *Node) broadcastHeader(msg []byte) { - topic := n.messenger.GetTopic(string(factory.HeadersTopic)) - - if topic == nil { - log.Debug(fmt.Sprintf("could not get header topic")) - return - } - - err := topic.BroadcastBuff(msg) - - if err != nil { - log.Debug(fmt.Sprintf("could not broadcast message: " + err.Error())) - } -} - -func toB64(buff []byte) string { - if buff == nil { - return "" - } - return base64.StdEncoding.EncodeToString(buff) + n.messenger.Broadcast( + string(factory.HeadersTopic), + msg, + ) } diff --git a/node/node_test.go b/node/node_test.go index 49efc4f08b0..6eb33148b42 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -15,9 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/node" "github.com/ElrondNetwork/elrond-go-sandbox/node/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - transaction2 "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -554,11 +552,6 @@ func TestGenerateAndSendBulkTransactions_ZeroTxShouldErr(t *testing.T) { func TestGenerateAndSendBulkTransactions_NilAccountAdapterShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerFake{} - mes := &mock.MessengerStub{} - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - addrConverter := mock.NewAddressConverterFake(32, "0x") keyGen := &mock.KeyGenMock{} sk, pk := keyGen.GeneratePair() @@ -701,30 +694,22 @@ func TestGenerateAndSendBulkTransactions_ShouldWork(t *testing.T) { mutRecoveredTransactions := &sync.RWMutex{} recoveredTransactions := make(map[uint64]*transaction.Transaction) signer := &mock.SinglesignMock{} - topic := p2p.NewTopic(string(factory.TransactionTopic), transaction2.NewInterceptedTransaction(signer), marshalizer) - topic.SendData = func(data []byte) error { - //handler to capture sent data - tx := transaction.Transaction{} - - err := marshalizer.Unmarshal(&tx, data) - if err != nil { - return err - } - - mutRecoveredTransactions.Lock() - recoveredTransactions[tx.Nonce] = &tx - mutRecoveredTransactions.Unlock() - - return nil - } - - mes := &mock.MessengerStub{} - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(factory.TransactionTopic) { - return topic - } - - return nil + mes := &mock.MessengerStub{ + BroadcastOnPipeCalled: func(pipe string, topic string, buff []byte) { + if topic == string(factory.TransactionTopic) { + //handler to capture sent data + tx := transaction.Transaction{} + + err := marshalizer.Unmarshal(&tx, buff) + if err != nil { + assert.Fail(t, err.Error()) + } + + mutRecoveredTransactions.Lock() + recoveredTransactions[tx.Nonce] = &tx + mutRecoveredTransactions.Unlock() + } + }, } accAdapter := getAccAdapter(big.NewInt(0)) @@ -767,82 +752,7 @@ func getAccAdapter(balance *big.Int) mock.AccountsAdapterStub { } func getPrivateKey() *mock.PrivateKeyStub { - return &mock.PrivateKeyStub{ - } -} - -func TestSendTransaction_TopicDoesNotExistsShouldErr(t *testing.T) { - n, _ := node.NewNode( - node.WithAddressConverter(mock.NewAddressConverterFake(32, "0x")), - ) - - mes := mock.NewMessengerStub() - n.SetMessenger(mes) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - nonce := uint64(50) - value := big.NewInt(567) - sender := createDummyHexAddress(64) - receiver := createDummyHexAddress(64) - txData := "data" - signature := []byte("signature") - - tx, err := n.SendTransaction( - nonce, - sender, - receiver, - value, - txData, - signature) - - assert.Equal(t, "could not get transaction topic", err.Error()) - assert.Nil(t, tx) -} - -func TestSendTransaction_BroadcastErrShouldErr(t *testing.T) { - n, _ := node.NewNode( - node.WithMarshalizer(&mock.MarshalizerFake{}), - node.WithAddressConverter(mock.NewAddressConverterFake(32, "0x")), - ) - - mes := mock.NewMessengerStub() - n.SetMessenger(mes) - - broadcastErr := errors.New("failure") - - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, &mock.MarshalizerMock{}) - topicTx.SendData = func(data []byte) error { - return broadcastErr - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(factory.TransactionTopic) { - return topicTx - } - - return nil - } - - nonce := uint64(50) - value := big.NewInt(567) - sender := createDummyHexAddress(64) - receiver := createDummyHexAddress(64) - txData := "data" - signature := []byte("signature") - - tx, err := n.SendTransaction( - nonce, - sender, - receiver, - value, - txData, - signature) - - assert.Equal(t, "could not broadcast transaction: "+broadcastErr.Error(), err.Error()) - assert.Nil(t, tx) + return &mock.PrivateKeyStub{} } func TestSendTransaction_ShouldWork(t *testing.T) { @@ -851,24 +761,14 @@ func TestSendTransaction_ShouldWork(t *testing.T) { node.WithAddressConverter(mock.NewAddressConverterFake(32, "0x")), ) - mes := mock.NewMessengerStub() - n.SetMessenger(mes) - txSent := false - topicTx := p2p.NewTopic("", &mock.StringCreatorMock{}, mock.MarshalizerMock{}) - topicTx.SendData = func(data []byte) error { - txSent = true - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == string(factory.TransactionTopic) { - return topicTx - } - - return nil + mes := &mock.MessengerStub{ + BroadcastOnPipeCalled: func(pipe string, topic string, buff []byte) { + txSent = true + }, } + n.SetMessenger(mes) nonce := uint64(50) value := big.NewInt(567) @@ -1029,10 +929,14 @@ func TestCreateShardedStores_ReturnsSuccessfully(t *testing.T) { } func getMessenger() *mock.MessengerStub { - messenger := mock.NewMessengerStub() - messenger.BootstrapCalled = func(ctx context.Context) {} - messenger.CloseCalled = func() error { - return nil + messenger := &mock.MessengerStub{ + CloseCalled: func() error { + return nil + }, + BootstrapCalled: func() error { + return nil + }, } + return messenger } diff --git a/p2p/connNotifier.go b/p2p/connNotifier.go deleted file mode 100644 index c4088e00c94..00000000000 --- a/p2p/connNotifier.go +++ /dev/null @@ -1,251 +0,0 @@ -package p2p - -import ( - "sync" - "time" - - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/multiformats/go-multiaddr" -) - -// durRefreshConnections represents the duration used to pause between refreshing connections to known peers -const durRefreshConnections = 1000 * time.Millisecond - -//the value of 2 was chosen as a mean to iterate the known peers at least 1 time but not 2 times -const maxFullCycles = 2 - -// ResultType will signal the result -type ResultType int - -const ( - // WontConnect will not try to connect to other peers - WontConnect ResultType = iota - // OnlyInboundConnections means that there are only inbound connections - OnlyInboundConnections - // SuccessfullyConnected signals that has successfully connected to a peer - SuccessfullyConnected - // NothingDone nothing has been done - NothingDone -) - -// ConnNotifier is used to manage the connections to other peers -type ConnNotifier struct { - stopChan chan bool - stoppedChan chan bool - mutIsRunning sync.RWMutex - isRunning bool - - maxAllowedPeers int - - // GetKnownPeers is a pointer to a function that will return all known peers found by a Messenger - GetKnownPeers func(sender *ConnNotifier) []peer.ID - // ConnectToPeer is a pointer to a function that has to make the Messenger object to connect ta peerID - ConnectToPeer func(sender *ConnNotifier, pid peer.ID) error - // GetConnections is a pointer to a function that returns a snapshot of all known connections held by a Messenger - GetConnections func(sender *ConnNotifier) []net.Conn - // IsConnected is a pointer to a function that returns tre if current messenger is connected to peer with ID = pid - IsConnected func(sender *ConnNotifier, pid peer.ID) bool - - indexKnownPeers int -} - -// NewConnNotifier will create a new object -func NewConnNotifier(maxAllowedPeers int) *ConnNotifier { - cn := ConnNotifier{ - maxAllowedPeers: maxAllowedPeers, - mutIsRunning: sync.RWMutex{}, - stopChan: make(chan bool, 0), - stoppedChan: make(chan bool, 0), - } - - return &cn -} - -// TaskResolveConnections resolves the connections to other peers. It should not be called too often as the -// connections are not done instantly. Even if the connection is made in a short time, there is a delay -// until the connected peer might close down the connections because it reached the maximum limit. -// This function handles the array connections that mdns service provides. -// This function always tries to find a new connection by closing the oldest one. -// It tries to create a new outbound connection by iterating over known peers for at least one cycle but not 2 or more. -func (cn *ConnNotifier) TaskResolveConnections() ResultType { - if cn.maxAllowedPeers < 1 { - //won't try to connect to other peers - return WontConnect - } - - conns := cn.getConnections() - knownPeers := cn.getKnownPeers() - inConns, _ := cn.computeInboundOutboundConns(conns) - - //test whether we only have inbound connection (security issue) - if inConns >= cn.maxAllowedPeers { - err := conns[0].Close() - if err != nil { - log.Error(err.Error()) - } - - return OnlyInboundConnections - } - - //try to connect to other peers - if len(conns) < cn.maxAllowedPeers && len(knownPeers) > 0 { - return cn.iterateThroughPeersAndTryToConnect(knownPeers) - } - - return NothingDone -} - -func (cn *ConnNotifier) getConnections() []net.Conn { - if cn.GetConnections != nil { - return cn.GetConnections(cn) - } else { - return make([]net.Conn, 0) - } -} - -func (cn *ConnNotifier) getKnownPeers() []peer.ID { - if cn.GetKnownPeers != nil { - return cn.GetKnownPeers(cn) - } else { - return make([]peer.ID, 0) - } -} - -func (cn *ConnNotifier) computeInboundOutboundConns(conns []net.Conn) (inConns, outConns int) { - //get how many inbound and outbound connection we have - for i := 0; i < len(conns); i++ { - if conns[i].Stat().Direction == net.DirInbound { - inConns++ - } - - if conns[i].Stat().Direction == net.DirOutbound { - outConns++ - } - } - - return -} - -func (cn *ConnNotifier) iterateThroughPeersAndTryToConnect(knownPeers []peer.ID) ResultType { - fullCycles := 0 - - for fullCycles < maxFullCycles { - if cn.indexKnownPeers >= len(knownPeers) { - //index out of bound, do 0 (restart the list) - cn.indexKnownPeers = 0 - fullCycles++ - } - - //get the known peerID - peerID := knownPeers[cn.indexKnownPeers] - cn.indexKnownPeers++ - - //func pointers are associated - if cn.ConnectToPeer != nil && cn.IsConnected != nil { - isConnected := cn.IsConnected(cn, peerID) - - if !isConnected { - err := cn.ConnectToPeer(cn, peerID) - - if err == nil { - return SuccessfullyConnected - } - } - } - } - - return NothingDone -} - -// Listen is called when network starts listening on an addr -func (cn *ConnNotifier) Listen(netw net.Network, ma multiaddr.Multiaddr) { - //Nothing to be done -} - -// ListenClose is called when network starts listening on an addr -func (cn *ConnNotifier) ListenClose(netw net.Network, ma multiaddr.Multiaddr) { - //Nothing to be done -} - -// Connected is called when a connection opened -func (cn *ConnNotifier) Connected(netw net.Network, conn net.Conn) { - if cn.GetConnections == nil { - err := conn.Close() - if err != nil { - log.Error(err.Error()) - } - return - } - - conns := cn.GetConnections(cn) - - //refuse other connections if max connection has been reached - if cn.maxAllowedPeers < len(conns) { - err := conn.Close() - if err != nil { - log.Error(err.Error()) - } - return - } -} - -// Disconnected is called when a connection closed -func (cn *ConnNotifier) Disconnected(netw net.Network, conn net.Conn) { - //Nothing to be done -} - -// OpenedStream is called when a stream opened -func (cn *ConnNotifier) OpenedStream(netw net.Network, stream net.Stream) { - //Nothing to be done -} - -// ClosedStream is called when a stream was closed -func (cn *ConnNotifier) ClosedStream(netw net.Network, stream net.Stream) { - //Nothing to be done -} - -// Starts the ConnNotifier main process -func (cn *ConnNotifier) Start() { - cn.mutIsRunning.Lock() - defer cn.mutIsRunning.Unlock() - - if cn.isRunning { - return - } - - cn.isRunning = true - go cn.maintainPeers() -} - -// Stops the ConnNotifier main process -func (cn *ConnNotifier) Stop() { - cn.mutIsRunning.Lock() - defer cn.mutIsRunning.Unlock() - - if !cn.isRunning { - return - } - - cn.isRunning = false - - //send stop notification - cn.stopChan <- true - //await to finalise "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" - <-cn.stoppedChan -} - -// maintainPeers is a routine that periodically calls TaskResolveConnections to resolve peer connections -func (cn *ConnNotifier) maintainPeers() { - for { - select { - case <-cn.stopChan: - log.Debug("ConnNotifier object has stopped!") - cn.stoppedChan <- true - return - case <-time.After(durRefreshConnections): - } - - cn.TaskResolveConnections() - } -} diff --git a/p2p/connNotifier_test.go b/p2p/connNotifier_test.go deleted file mode 100644 index 9fc43b2bac6..00000000000 --- a/p2p/connNotifier_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package p2p_test - -import ( - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/stretchr/testify/assert" -) - -var testConnNotifierMaxWaitTime = time.Second * 5 -var testConnNotifierWaitTimeForNoResponse = time.Second - -func TestConnNotifierTaskResolveConnectionsNotAllowedToConnect(t *testing.T) { - cn := p2p.NewConnNotifier(0) - - assert.Equal(t, p2p.WontConnect, cn.TaskResolveConnections()) -} - -func TestConnNotifierTaskResolveConnectionsOnlyInboundConnections(t *testing.T) { - cn := p2p.NewConnNotifier(1) - - //will return 2 inbound connections - cn.GetConnections = func(sender *p2p.ConnNotifier) []net.Conn { - return []net.Conn{ - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}} - } - - assert.Equal(t, p2p.OnlyInboundConnections, cn.TaskResolveConnections()) -} - -func TestConnNotifierTaskResolveConnectionsNothingDone(t *testing.T) { - cn := p2p.NewConnNotifier(1) - - assert.Equal(t, p2p.NothingDone, cn.TaskResolveConnections()) -} - -func TestConnNotifierComputeInboundOutboundConns(t *testing.T) { - cn := p2p.NewConnNotifier(1) - - //2 inbound, 3 outbound - conns := []net.Conn{ - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirOutbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirOutbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirOutbound}}} - - inbound, outbound := cn.ComputeInboundOutboundConns(conns) - - assert.Equal(t, inbound, 2) - assert.Equal(t, outbound, 3) - -} - -func TestConnNotifierTryToConnectWithSuccessRunning(t *testing.T) { - chanDone := make(chan bool, 0) - - cn := p2p.NewConnNotifier(1) - - cn.GetConnections = func(sender *p2p.ConnNotifier) []net.Conn { - return make([]net.Conn, 0) - } - - cn.IsConnected = func(sender *p2p.ConnNotifier, pid peer.ID) bool { - return false - } - - cn.GetKnownPeers = func(sender *p2p.ConnNotifier) []peer.ID { - return []peer.ID{"aaa", "bbb"} - } - - cn.ConnectToPeer = func(cn *p2p.ConnNotifier, id peer.ID) error { - chanDone <- true - - return nil - } - - cn.Start() - defer cn.Stop() - - select { - case <-chanDone: - fmt.Println("ConnectToPeer called!") - case <-time.After(testConnNotifierMaxWaitTime): - assert.Fail(t, "Should have called to connect!") - return - } -} - -func TestConnNotifierTryToConnectWithSuccessOn2PeersRunning(t *testing.T) { - chanDone := make(chan bool, 0) - - cn := p2p.NewConnNotifier(1) - - aaaTriedToConnect := int32(0) - bbbTriedToConnect := int32(0) - - cn.GetConnections = func(sender *p2p.ConnNotifier) []net.Conn { - return make([]net.Conn, 0) - } - - cn.IsConnected = func(sender *p2p.ConnNotifier, pid peer.ID) bool { - return false - } - - cn.GetKnownPeers = func(sender *p2p.ConnNotifier) []peer.ID { - return []peer.ID{"aaa", "bbb"} - } - - cn.ConnectToPeer = func(cn *p2p.ConnNotifier, id peer.ID) error { - if id == "aaa" { - atomic.AddInt32(&aaaTriedToConnect, 1) - } - - if id == "bbb" { - atomic.AddInt32(&bbbTriedToConnect, 1) - } - - return nil - } - - cn.Start() - defer cn.Stop() - - go func() { - //function to check that it tried to connect 2 times to aaa and 2 times to bbb - - for { - if atomic.LoadInt32(&aaaTriedToConnect) == 2 && - atomic.LoadInt32(&bbbTriedToConnect) == 2 { - chanDone <- true - return - } - } - }() - - select { - case <-chanDone: - fmt.Println("ConnectToPeer called 2 times for aaa and 2 times for bbb!") - case <-time.After(testConnNotifierMaxWaitTime): - assert.Fail(t, "ConnectToPeer have called 2 times for aaa and 2 times for bbb!") - return - } -} - -func TestConnNotifierTryToConnectNoOtherPeersRunning(t *testing.T) { - chanDone := make(chan bool, 0) - - cn := p2p.NewConnNotifier(1) - - cn.GetConnections = func(sender *p2p.ConnNotifier) []net.Conn { - return make([]net.Conn, 0) - } - - cn.IsConnected = func(sender *p2p.ConnNotifier, pid peer.ID) bool { - return true - } - - cn.GetKnownPeers = func(sender *p2p.ConnNotifier) []peer.ID { - return []peer.ID{"aaa", "bbb"} - } - - cn.ConnectToPeer = func(cn *p2p.ConnNotifier, id peer.ID) error { - chanDone <- true - - return nil - } - - cn.Start() - defer cn.Stop() - - select { - case <-chanDone: - fmt.Println("Should have not called to connect!") - return - case <-time.After(testConnNotifierMaxWaitTime): - - } -} - -func TestConnNotifierConnectedGetConnNilShouldCloseConn(t *testing.T) { - cn := p2p.NewConnNotifier(1) - - chanDone := make(chan bool, 0) - - connMonitored := mock.ConnMock{} - connMonitored.CloseCalled = func(connMock *mock.ConnMock) error { - chanDone <- true - - return nil - } - - go cn.Connected(nil, &connMonitored) - - select { - case <-chanDone: - fmt.Println("Connection closed as expected!") - case <-time.After(testConnNotifierMaxWaitTime): - assert.Fail(t, "Should have called conn.Close()!") - return - } -} - -func TestConnNotifierConnectedMaxConnReachedShouldCloseConn(t *testing.T) { - cn := p2p.NewConnNotifier(2) - - cn.GetConnections = func(sender *p2p.ConnNotifier) []net.Conn { - return []net.Conn{ - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}, - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}} - } - - chanDone := make(chan bool, 0) - - connMonitored := mock.ConnMock{} - connMonitored.CloseCalled = func(connMock *mock.ConnMock) error { - chanDone <- true - - return nil - } - - go cn.Connected(nil, &connMonitored) - - select { - case <-chanDone: - fmt.Println("Connection closed as expected!") - case <-time.After(testConnNotifierMaxWaitTime): - assert.Fail(t, "Should have called conn.Close()!") - return - } -} - -func TestConnNotifierConnectedCanAcceptShouldNotCloseConn(t *testing.T) { - cn := p2p.NewConnNotifier(2) - - cn.GetConnections = func(sender *p2p.ConnNotifier) []net.Conn { - return []net.Conn{ - &mock.ConnMock{Status: net.Stat{Direction: net.DirInbound}}} - } - - chanDone := make(chan bool, 0) - - connMonitored := mock.ConnMock{} - connMonitored.CloseCalled = func(connMock *mock.ConnMock) error { - chanDone <- true - - return nil - } - - go cn.Connected(nil, &connMonitored) - - select { - case <-chanDone: - assert.Fail(t, "Should have not called conn.Close()!") - return - case <-time.After(testConnNotifierWaitTimeForNoResponse): - fmt.Println("Connection not closed!") - - } -} diff --git a/p2p/connectParams.go b/p2p/connectParams.go deleted file mode 100644 index 53c0d0e3142..00000000000 --- a/p2p/connectParams.go +++ /dev/null @@ -1,79 +0,0 @@ -package p2p - -import ( - "crypto/ecdsa" - "fmt" - "math/rand" - - "github.com/btcsuite/btcd/btcec" - cr "github.com/libp2p/go-libp2p-crypto" - "github.com/libp2p/go-libp2p-peer" - "github.com/pkg/errors" -) - -const maxPorts = 65535 - -// ConnectParams is used to instantiate a Messenger object -// (contains required data by the Messenger struct) -type ConnectParams struct { - ID peer.ID - PrivKey cr.PrivKey - PubKey cr.PubKey - Port int -} - -// GeneratePrivPubKeys will generate a new private/public key pair starting from a seed -func (params *ConnectParams) GeneratePrivPubKeys(seed int64) { - r := rand.New(rand.NewSource(seed)) - - prvKey, err := ecdsa.GenerateKey(btcec.S256(), r) - - if err != nil { - panic(err) - } - - k := (*cr.Secp256k1PrivateKey)(prvKey) - - params.PrivKey = k - params.PubKey = k.GetPublic() -} - -// GenerateIDFromPubKey will set the params.ID to a hash of the params.PubKey -func (params *ConnectParams) GenerateIDFromPubKey() { - params.ID, _ = peer.IDFromPublicKey(params.PubKey) -} - -// NewConnectParamsFromPort will generate a new ConnectParams object by using the port -// as a seed for the random generation object -// SHOULD BE USED ONLY IN TESTING!!! -func NewConnectParamsFromPort(port int) (*ConnectParams, error) { - if port < 0 || port > maxPorts { - return nil, errors.New(fmt.Sprintf("port outside [0, %d]", maxPorts)) - } - - params := new(ConnectParams) - - params.Port = port - params.GeneratePrivPubKeys(int64(port)) - params.GenerateIDFromPubKey() - - return params, nil -} - -// NewConnectParams is used to generate a new ConnectParams. This is the proper -// way to initialize the object. The private key provided is used for -// data and channel encryption and can be used for authentication of messages -func NewConnectParams(port int, privKey cr.PrivKey) (*ConnectParams, error) { - if port < 0 || port > maxPorts { - return nil, errors.New(fmt.Sprintf("port outside [0, %d]", maxPorts)) - } - - params := new(ConnectParams) - - params.Port = port - params.PrivKey = privKey - params.PubKey = privKey.GetPublic() - params.GenerateIDFromPubKey() - - return params, nil -} diff --git a/p2p/connectParams_test.go b/p2p/connectParams_test.go deleted file mode 100644 index 1a8310ff6ad..00000000000 --- a/p2p/connectParams_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package p2p_test - -import ( - "bytes" - "encoding/base64" - "encoding/hex" - "fmt" - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/libp2p/go-libp2p-crypto" - "github.com/libp2p/go-libp2p-peer" - "github.com/stretchr/testify/assert" -) - -func TestConnectParamsNewConnectParamsFromPortFromInvalidPortShouldErr(t *testing.T) { - //invalid port - _, err := p2p.NewConnectParamsFromPort(65536) - assert.NotNil(t, err) -} - -func TestConnectParamsNewConnectParamsFromPortGoodPortShouldWork(t *testing.T) { - cp, err := p2p.NewConnectParamsFromPort(65535) - assert.Nil(t, err) - - buff, err := cp.PrivKey.Bytes() - assert.Nil(t, err) - - fmt.Printf("Private key: %v\n", buff) - - buff, err = cp.PubKey.Bytes() - assert.Nil(t, err) - - fmt.Printf("Public key: %v\n", buff) - fmt.Printf("ID: %v\n", cp.ID.Pretty()) - -} - -func TestConnectParamsNewConnectParamsGoodValuesShouldWork(t *testing.T) { - buffPrivKey := []byte{8, 2, 18, 32, 240, 44, 132, 237, 70, - 30, 188, 118, 0, 25, 28, 224, 190, 134, 240, 66, 58, 63, - 181, 131, 208, 151, 28, 19, 89, 49, 67, 184, 225, 63, 248, 166} - - buffPubKey := []byte{8, 2, 18, 33, 2, 177, 16, 21, 115, 117, 145, - 182, 92, 142, 155, 26, 135, 89, 80, 140, 70, 129, 67, 40, 43, 71, - 196, 19, 170, 252, 70, 103, 157, 161, 72, 124, 36} - - pid := "16Uiu2HAm7LrNF9uTDVBPxQovFGcYJGqu8ZEndNADitpeQh52yCN7" - - prv, err := crypto.UnmarshalPrivateKey(buffPrivKey) - assert.Nil(t, err) - - params, err := p2p.NewConnectParams(4000, prv) - assert.Nil(t, err) - - buffPrivKeyComputed, err := prv.Bytes() - assert.Nil(t, err) - - assert.Equal(t, 0, bytes.Compare(buffPrivKeyComputed, buffPrivKey)) - - buffPubKeyComputed, err := params.PubKey.Bytes() - assert.Nil(t, err) - - assert.Equal(t, 0, bytes.Compare(buffPrivKeyComputed, buffPrivKey)) - assert.Equal(t, 0, bytes.Compare(buffPubKeyComputed, buffPubKey)) - - assert.Equal(t, pid, params.ID.Pretty()) -} - -func TestConnectParamsSignVerifyGoodValuesShouldWork(t *testing.T) { - params, err := p2p.NewConnectParamsFromPort(4000) - assert.Nil(t, err) - - bPrivKey, _ := params.PrivKey.Bytes() - fmt.Printf("Priv key: %v\n", hex.EncodeToString(bPrivKey)) - - bPubKey, _ := params.PubKey.Bytes() - fmt.Printf("Pub key: %v\n", hex.EncodeToString(bPubKey)) - - fmt.Printf("ID: %v\n", params.ID.Pretty()) - - buffSig, err := params.PrivKey.Sign([]byte{65, 66, 67}) - assert.Nil(t, err) - fmt.Printf("Sig: %v\n", base64.StdEncoding.EncodeToString(buffSig)) - - buffPubKey, err := crypto.MarshalPublicKey(params.PubKey) - fmt.Printf("Marshaled pub key: %v\n", base64.StdEncoding.EncodeToString(buffPubKey)) - - //recovery and verify - pubKeyVerif, err := crypto.UnmarshalPublicKey(buffPubKey) - assert.Nil(t, err) - - signed, err := pubKeyVerif.Verify([]byte{65, 66, 67}, buffSig) - assert.Nil(t, err) - - fmt.Printf("Signed \"ABC\"? %v\n", signed) - idVerif, err := peer.IDFromPublicKey(pubKeyVerif) - assert.Nil(t, err) - fmt.Printf("Signed by %v\n", idVerif.Pretty()) - - assert.Equal(t, idVerif, params.ID) - -} diff --git a/p2p/errors.go b/p2p/errors.go new file mode 100644 index 00000000000..aa84094a016 --- /dev/null +++ b/p2p/errors.go @@ -0,0 +1,68 @@ +package p2p + +import ( + "github.com/pkg/errors" +) + +// ErrNilContext signals that a nil context was provided +var ErrNilContext = errors.New("nil context") + +// ErrInvalidPort signals that an invalid port was provided +var ErrInvalidPort = errors.New("invalid port provided") + +// ErrNilP2PprivateKey signals that a nil P2P private key has been provided +var ErrNilP2PprivateKey = errors.New("nil P2P private key") + +// ErrNilMockNet signals that a nil mocknet was provided. Should occur only in testing!!! +var ErrNilMockNet = errors.New("nil mocknet provided") + +// ErrNilTopic signals that a nil topic has been provided +var ErrNilTopic = errors.New("nil topic") + +// ErrTopicAlreadyExists signals that a topic already exists +var ErrTopicAlreadyExists = errors.New("topic already exists") + +// ErrTopicValidatorOperationNotSupported signals that an unsupported validator operation occurred +var ErrTopicValidatorOperationNotSupported = errors.New("topic validator operation is not supported") + +// ErrNilDiscoverer signals that a nil discoverer object has been provided +var ErrNilDiscoverer = errors.New("nil discoverer object") + +// ErrNilPipeLoadBalancer signals that a nil data throttler object has been provided +var ErrNilPipeLoadBalancer = errors.New("nil pipe load balancer object") + +// ErrPipeAlreadyExists signals that the pipe is already defined (and used) +var ErrPipeAlreadyExists = errors.New("pipe already exists") + +// ErrPipeDoNotExists signals that a requested pipe does not exists +var ErrPipeDoNotExists = errors.New("pipe does not exists") + +// ErrPipeCanNotBeDeleted signals that a pipe can not be deleted (might be the default pipe) +var ErrPipeCanNotBeDeleted = errors.New("pipe can not be deleted") + +// ErrNilMessage signals that a nil message has been received +var ErrNilMessage = errors.New("nil message") + +// ErrEmptyTopicList signals that a message with empty topic ids has been received +var ErrEmptyTopicList = errors.New("empty topicIDs") + +// ErrAlreadySeenMessage signals that the message has already been seen +var ErrAlreadySeenMessage = errors.New("already seen this message") + +// ErrNilDirectSendMessageHandler signals that the message handler for new message has not been wired +var ErrNilDirectSendMessageHandler = errors.New("nil direct sender message handler") + +// ErrPeerNotDirectlyConnected signals that the peer is not directly connected to self +var ErrPeerNotDirectlyConnected = errors.New("peer is not directly connected") + +// ErrNilHost signals that a nil host has been provided +var ErrNilHost = errors.New("nil host") + +// ErrNilValidator signals that a validator hasn't been set for the required topic +var ErrNilValidator = errors.New("no validator has been set for this topic") + +// ErrPeerDiscoveryProcessAlreadyStarted signals that mdns peer discovery is already turned on +var ErrPeerDiscoveryProcessAlreadyStarted = errors.New("mdns peer discovery is already turned enabled") + +// ErrPeerDiscoveryNotImplemented signals that peer discovery is not implemented +var ErrPeerDiscoveryNotImplemented = errors.New("unimplemented peer discovery") diff --git a/p2p/example/libp2p/autodiscovery/main.go b/p2p/example/libp2p/autodiscovery/main.go new file mode 100644 index 00000000000..d0799bc3541 --- /dev/null +++ b/p2p/example/libp2p/autodiscovery/main.go @@ -0,0 +1,160 @@ +package main + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "strings" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/display" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" + cr "github.com/libp2p/go-libp2p-crypto" +) + +var r *rand.Rand + +//The purpose of this example program is to show what happens if a peer connects to a network of 100 peers +func main() { + r = rand.New(rand.NewSource(time.Now().UnixNano())) + startingPort := 32000 + + advertiser, _ := libp2p.NewNetworkMessenger( + context.Background(), + startingPort, + genPrivKey(), + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryKadDht, + ) + startingPort++ + fmt.Printf("advertiser is %s\n", getConnectableAddress(advertiser)) + peers := make([]p2p.Messenger, 0) + go func() { + _ = advertiser.KadDhtDiscoverNewPeers() + time.Sleep(time.Second) + }() + + for i := 0; i < 99; i++ { + netPeer, _ := libp2p.NewNetworkMessenger( + context.Background(), + startingPort, + genPrivKey(), + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryKadDht, + ) + startingPort++ + + fmt.Printf("%s connecting to %s...\n", + getConnectableAddress(netPeer), + getConnectableAddress(advertiser)) + + _ = netPeer.ConnectToPeer(getConnectableAddress(advertiser)) + _ = netPeer.KadDhtDiscoverNewPeers() + + peers = append(peers, netPeer) + + go func() { + _ = netPeer.KadDhtDiscoverNewPeers() + time.Sleep(time.Second) + }() + } + + //display func + go func() { + for { + time.Sleep(time.Second) + showConnections(advertiser, peers) + } + }() + + time.Sleep(time.Second * 15) + + _ = advertiser.Close() + for _, peer := range peers { + if peer == nil { + continue + } + + _ = peer.Close() + } +} + +func getConnectableAddress(peer p2p.Messenger) string { + for _, adr := range peer.Addresses() { + if strings.Contains(adr, "127.0.0.1") { + return adr + } + } + + return "" +} + +func genPrivKey() cr.PrivKey { + prv, _, _ := cr.GenerateKeyPairWithReader(cr.Ed25519, 0, r) + return prv +} + +func showConnections(advertiser p2p.Messenger, peers []p2p.Messenger) { + header := []string{"Node", "Address", "No. of conns"} + + lines := make([]*display.LineData, 0) + lines = append(lines, createDataLine(advertiser, advertiser, peers)) + + for i := 0; i < len(peers); i++ { + lines = append(lines, createDataLine(peers[i], advertiser, peers)) + } + + table, _ := display.CreateTableString(header, lines) + + fmt.Println(table) +} + +func createDataLine(peer p2p.Messenger, advertiser p2p.Messenger, peers []p2p.Messenger) *display.LineData { + ld := &display.LineData{} + + if peer == nil { + ld.Values = []string{"", "", "0"} + return ld + } + + nodeName := "Peer" + if advertiser == peer { + nodeName = "Advertiser" + } + + ld.Values = []string{nodeName, + getConnectableAddress(peer), + strconv.Itoa(computeConnectionsCount(peer, advertiser, peers))} + + return ld +} + +func computeConnectionsCount(peer p2p.Messenger, advertiser p2p.Messenger, peers []p2p.Messenger) int { + if peer == nil { + return 0 + } + + knownPeers := 0 + if peer.IsConnected(advertiser.ID()) { + knownPeers++ + } + + for i := 0; i < len(peers); i++ { + p := peers[i] + + if p == nil { + continue + } + + if peer.IsConnected(peers[i].ID()) { + knownPeers++ + } + } + + return knownPeers +} diff --git a/p2p/example/libp2p/chatAdvertiser/main.go b/p2p/example/libp2p/chatAdvertiser/main.go new file mode 100644 index 00000000000..eb389062a1d --- /dev/null +++ b/p2p/example/libp2p/chatAdvertiser/main.go @@ -0,0 +1,128 @@ +package main + +import ( + "bufio" + "context" + "fmt" + + "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-discovery" + libp2pdht "github.com/libp2p/go-libp2p-kad-dht" + inet "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-protocol" + multiaddr "github.com/multiformats/go-multiaddr" + logging "github.com/whyrusleeping/go-logging" +) + +var logger = log.Logger("rendezvous") + +var protocolID = "/elrondnetwork/1.0.0" +var randevouzString = "ElrondNetwork - randevous" + +func handleStream(stream inet.Stream) { + logger.Info("Got a new stream!") + + // Create a buffer stream for non blocking read and write. + rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream)) + + go readData(rw) + + // 'stream' will stay open until you close it (or the other side closes it). +} + +func readData(rw *bufio.ReadWriter) { + for { + str, err := rw.ReadString('\n') + if err != nil { + fmt.Println("Error reading from buffer") + return + } + + if str == "" { + return + } + if str != "\n" { + // Green console colour: \x1b[32m + // Reset console colour: \x1b[0m + fmt.Printf("\x1b[32m%s\x1b[0m> ", str) + } + + } +} + +func main() { + log.SetAllLoggers(logging.WARNING) + log.SetLogLevel("rendezvous", "info") + + ctx := context.Background() + + // libp2p.New constructs a new libp2p Host. Other options can be added + // here. + host, err := libp2p.New(ctx, + libp2p.ListenAddrs([]multiaddr.Multiaddr(make([]multiaddr.Multiaddr, 0))...), + ) + if err != nil { + panic(err) + } + logger.Info("Host created. We are:", host.ID().Pretty()) + logger.Info(host.Addrs()) + + // Set a function as stream handler. This function is called when a peer + // initiates a connection and starts a stream with this peer. + host.SetStreamHandler(protocol.ID(protocolID), handleStream) + + // Start a DHT, for use in peer discovery. We can't just make a new DHT + // client because we want each peer to maintain its own local copy of the + // DHT, so that the bootstrapping node of the DHT can go down without + // inhibiting future peer discovery. + kademliaDHT, err := libp2pdht.New(ctx, host) + if err != nil { + panic(err) + } + + // Bootstrap the DHT. In the default configuration, this spawns a Background + // thread that will refresh the peer table every five minutes. + logger.Debug("Bootstrapping the DHT") + if err = kademliaDHT.Bootstrap(ctx); err != nil { + panic(err) + } + + // We use a rendezvous point "meet me here" to announce our location. + // This is like telling your friends to meet you at the Eiffel Tower. + logger.Info("Announcing ourselves...") + routingDiscovery := discovery.NewRoutingDiscovery(kademliaDHT) + discovery.Advertise(ctx, routingDiscovery, randevouzString) + logger.Debug("Successfully announced!") + + // Now, look for others who have announced + // This is like your friend telling you the location to meet you. + logger.Debug("Searching for other peers...") + peerChan, err := routingDiscovery.FindPeers(ctx, randevouzString) + if err != nil { + panic(err) + } + + for peer := range peerChan { + if peer.ID == host.ID() { + continue + } + logger.Debug("Found peer:", peer) + + logger.Debug("Connecting to:", peer) + stream, err := host.NewStream(ctx, peer.ID, protocol.ID(protocolID)) + + if err != nil { + logger.Warning("Connection failed:", err) + continue + } else { + rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream)) + + go readData(rw) + } + + logger.Info("Connected to:", peer) + } + + select {} +} diff --git a/p2p/example/libp2p/chatPeer/flags.go b/p2p/example/libp2p/chatPeer/flags.go new file mode 100644 index 00000000000..e58cb55fe36 --- /dev/null +++ b/p2p/example/libp2p/chatPeer/flags.go @@ -0,0 +1,64 @@ +package main + +import ( + "flag" + "strings" + + maddr "github.com/multiformats/go-multiaddr" +) + +// A new type we need for writing a custom flag parser +type addrList []maddr.Multiaddr + +func (al *addrList) String() string { + strs := make([]string, len(*al)) + for i, addr := range *al { + strs[i] = addr.String() + } + return strings.Join(strs, ",") +} + +func (al *addrList) Set(value string) error { + addr, err := maddr.NewMultiaddr(value) + if err != nil { + return err + } + *al = append(*al, addr) + return nil +} + +// IPFS bootstrap nodes. Used to find other peers in the network. +var defaultBootstrapAddrStrings = make([]string, 0) + +func StringsToAddrs(addrStrings []string) (maddrs []maddr.Multiaddr, err error) { + for _, addrString := range addrStrings { + addr, err := maddr.NewMultiaddr(addrString) + if err != nil { + return maddrs, err + } + maddrs = append(maddrs, addr) + } + return +} + +type Config struct { + BootstrapPeers addrList + ListenAddresses addrList +} + +func ParseFlags() (Config, error) { + config := Config{} + flag.Var(&config.BootstrapPeers, "peer", "Adds a peer multiaddress to the bootstrap list") + flag.Var(&config.ListenAddresses, "listen", "Adds a multiaddress to the listen list") + flag.Parse() + + if len(config.BootstrapPeers) == 0 { + bootstrapPeerAddrs, err := StringsToAddrs(defaultBootstrapAddrStrings) + if err != nil { + return config, err + } + config.BootstrapPeers = bootstrapPeerAddrs + } + + return config, nil +} diff --git a/p2p/example/libp2p/chatPeer/main.go b/p2p/example/libp2p/chatPeer/main.go new file mode 100644 index 00000000000..521cb3d4862 --- /dev/null +++ b/p2p/example/libp2p/chatPeer/main.go @@ -0,0 +1,204 @@ +package main + +import ( + "bufio" + "context" + "flag" + "fmt" + "os" + "sync" + + "github.com/ipfs/go-log" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-discovery" + libp2pdht "github.com/libp2p/go-libp2p-kad-dht" + inet "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-protocol" + multiaddr "github.com/multiformats/go-multiaddr" + logging "github.com/whyrusleeping/go-logging" +) + +var logger = log.Logger("rendezvous") + +var protocolID = "/elrondnetwork/1.0.0" +var randevouzString = "ElrondNetwork - randevous" + +var chans []chan string + +func handleStream(stream inet.Stream) { + logger.Info("Got a new stream!") + + // Create a buffer stream for non blocking read and write. + rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream)) + + chanStr := make(chan string, 10000) + chans = append(chans, chanStr) + + go readData(rw) + go writeData(rw, chanStr) + + // 'stream' will stay open until you close it (or the other side closes it). +} + +func readData(rw *bufio.ReadWriter) { + for { + str, err := rw.ReadString('\n') + if err != nil { + fmt.Println("Error reading from buffer") + return + } + + if str == "" { + return + } + if str != "\n" { + // Green console colour: \x1b[32m + // Reset console colour: \x1b[0m + fmt.Printf("\x1b[32m%s\x1b[0m> ", str) + } + + } +} + +func writeData(rw *bufio.ReadWriter, chanStr chan string) { + for { + data := <-chanStr + + _, err := rw.WriteString(data) + if err != nil { + fmt.Println("Error writing to buffer") + return + } + err = rw.Flush() + if err != nil { + fmt.Println("Error flushing buffer") + return + } + } +} + +func main() { + chans = make([]chan string, 0) + + log.SetAllLoggers(logging.WARNING) + log.SetLogLevel("rendezvous", "info") + help := flag.Bool("h", false, "Display Help") + config, err := ParseFlags() + if err != nil { + panic(err) + } + + if *help { + fmt.Println("This program demonstrates a simple p2p chat application using libp2p") + fmt.Println() + fmt.Println("Usage: Run './chat in two different terminals. Let them connect to the bootstrap nodes, announce themselves and connect to the peers") + flag.PrintDefaults() + return + } + + ctx := context.Background() + + // libp2p.New constructs a new libp2p Host. Other options can be added + // here. + host, err := libp2p.New(ctx, + libp2p.ListenAddrs([]multiaddr.Multiaddr(config.ListenAddresses)...), + ) + if err != nil { + panic(err) + } + logger.Info("Host created. We are:", host.ID().Pretty()) + logger.Info(host.Addrs()) + + // Set a function as stream handler. This function is called when a peer + // initiates a connection and starts a stream with this peer. + host.SetStreamHandler(protocol.ID(protocolID), handleStream) + + // Start a DHT, for use in peer discovery. We can't just make a new DHT + // client because we want each peer to maintain its own local copy of the + // DHT, so that the bootstrapping node of the DHT can go down without + // inhibiting future peer discovery. + kademliaDHT, err := libp2pdht.New(ctx, host) + if err != nil { + panic(err) + } + + // Bootstrap the DHT. In the default configuration, this spawns a Background + // thread that will refresh the peer table every five minutes. + logger.Debug("Bootstrapping the DHT") + if err = kademliaDHT.Bootstrap(ctx); err != nil { + panic(err) + } + + // Let's connect to the bootstrap nodes first. They will tell us about the + // other nodes in the network. + var wg sync.WaitGroup + for _, peerAddr := range config.BootstrapPeers { + peerinfo, _ := peerstore.InfoFromP2pAddr(peerAddr) + wg.Add(1) + go func() { + defer wg.Done() + if err := host.Connect(ctx, *peerinfo); err != nil { + logger.Warning(err) + } else { + logger.Info("Connection established with bootstrap node:", *peerinfo) + } + }() + } + wg.Wait() + + // We use a rendezvous point "meet me here" to announce our location. + // This is like telling your friends to meet you at the Eiffel Tower. + logger.Info("Announcing ourselves...") + routingDiscovery := discovery.NewRoutingDiscovery(kademliaDHT) + discovery.Advertise(ctx, routingDiscovery, randevouzString) + logger.Debug("Successfully announced!") + + // Now, look for others who have announced + // This is like your friend telling you the location to meet you. + logger.Debug("Searching for other peers...") + peerChan, err := routingDiscovery.FindPeers(ctx, randevouzString) + if err != nil { + panic(err) + } + + for peer := range peerChan { + if peer.ID == host.ID() { + continue + } + logger.Debug("Found peer:", peer) + + logger.Debug("Connecting to:", peer) + stream, err := host.NewStream(ctx, peer.ID, protocol.ID(protocolID)) + + if err != nil { + logger.Warning("Connection failed:", err) + continue + } else { + rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream)) + + chanStr := make(chan string, 10000) + chans = append(chans, chanStr) + + go writeData(rw, chanStr) + go readData(rw) + } + + logger.Info("Connected to:", peer) + } + + stdReader := bufio.NewReader(os.Stdin) + + for { + fmt.Print("> ") + sendData, err := stdReader.ReadString('\n') + if err != nil { + fmt.Println("Error reading from stdin") + panic(err) + } + + for i := 0; i < len(chans); i++ { + chans[i] <- fmt.Sprintf("%s\n", sendData) + } + } +} diff --git a/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go b/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go new file mode 100644 index 00000000000..2a278ce1e21 --- /dev/null +++ b/p2p/example/libp2p/internalBroadcastSpeedMeasure/main.go @@ -0,0 +1,107 @@ +package main + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p/mock" + "github.com/libp2p/go-libp2p/p2p/net/mock" +) + +func main() { + net := mocknet.New(context.Background()) + + mes1, _ := libp2p.NewMemoryMessenger(context.Background(), net, p2p.PeerDiscoveryOff) + mes2, _ := libp2p.NewMemoryMessenger(context.Background(), net, p2p.PeerDiscoveryOff) + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + + _ = mes1.CreateTopic("test1", true) + _ = mes1.CreateTopic("test2", true) + _ = mes1.CreateTopic("test3", true) + + _ = mes2.CreateTopic("test1", true) + _ = mes2.CreateTopic("test2", true) + _ = mes2.CreateTopic("test3", true) + + bytesReceived1 := int64(0) + bytesReceived2 := int64(0) + bytesReceived3 := int64(0) + + _ = mes1.RegisterMessageProcessor("test1", + &mock.MessageProcessorStub{ + ProcessMessageCalled: func(message p2p.MessageP2P) error { + atomic.AddInt64(&bytesReceived1, int64(len(message.Data()))) + + return nil + }, + }) + + _ = mes1.RegisterMessageProcessor("test2", &mock.MessageProcessorStub{ + ProcessMessageCalled: func(message p2p.MessageP2P) error { + atomic.AddInt64(&bytesReceived2, int64(len(message.Data()))) + + return nil + }, + }) + + _ = mes1.RegisterMessageProcessor("test3", &mock.MessageProcessorStub{ + ProcessMessageCalled: func(message p2p.MessageP2P) error { + atomic.AddInt64(&bytesReceived3, int64(len(message.Data()))) + + return nil + }, + }) + + time.Sleep(time.Second) + + timeStart := time.Now() + bytesSent := int64(0) + + durTest := time.Second * 5 + + fmt.Printf("Testing for %s...\n", durTest.String()) + + for time.Now().UnixNano() < timeStart.Add(durTest).UnixNano() { + buffSize := 5000 + buff := make([]byte, buffSize) + bytesSent += int64(buffSize) + + mes2.Broadcast("test1", buff) + mes2.Broadcast("test2", buff) + //topic test3 receives more requests to send + mes2.Broadcast("test3", buff) + mes2.Broadcast("test3", buff) + } + + fmt.Printf("Sent: %s -> %s\nReceived pipe 1 %s -> %s\nReceived pipe 2 %s -> %s\nReceived pipe 3 %s -> %s\n", + bytesPretty(float64(bytesSent)), bytesPerSecPretty(bytesSent, durTest), + bytesPretty(float64(bytesReceived1)), bytesPerSecPretty(bytesReceived1, durTest), + bytesPretty(float64(bytesReceived2)), bytesPerSecPretty(bytesReceived2, durTest), + bytesPretty(float64(bytesReceived3)), bytesPerSecPretty(bytesReceived3, durTest)) + +} + +func bytesPretty(bytes float64) string { + if bytes < 1024 { + return fmt.Sprintf("%.0f bytes", bytes) + } + + if bytes < 1048576 { + return fmt.Sprintf("%.2f kB", bytes/1024.0) + } + + return fmt.Sprintf("%.2f MB", bytes/1048576.0) +} + +func bytesPerSecPretty(bytes int64, dur time.Duration) string { + return bytesPretty(float64(bytes)/dur.Seconds()) + "/s" +} diff --git a/p2p/export_test.go b/p2p/export_test.go deleted file mode 100644 index 7f876c5bb08..00000000000 --- a/p2p/export_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package p2p - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" -) - -func (cn *ConnNotifier) ComputeInboundOutboundConns(conns []net.Conn) (inConns, outConns int) { - return cn.computeInboundOutboundConns(conns) -} - -func (t *Topic) EventBusData() []DataReceivedHandler { - return t.eventBusDataRcvHandlers -} - -func (t *Topic) Marsh() marshal.Marshalizer { - return t.marsh -} - -var DurTimeCache = durTimeCache -var MutGloballyRegPeers = &mutGloballyRegPeers -var Log = log - -func RecreateGlobbalyRegisteredMemPeersMap() { - globallyRegisteredPeers = make(map[peer.ID]*MemMessenger) -} diff --git a/p2p/interface.go b/p2p/interface.go deleted file mode 100644 index 53522e6ef00..00000000000 --- a/p2p/interface.go +++ /dev/null @@ -1,43 +0,0 @@ -package p2p - -import ( - "context" - "io" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/logger" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/multiformats/go-multiaddr" -) - -// Creator interface will be implemented on structs that can create new instances of their type -// We prefer this method as reflection is more costly -type Creator interface { - Create() Creator - ID() string -} - -var log = logger.NewDefaultLogger() - -// Messenger is the main struct used for communicating with other peers -type Messenger interface { - io.Closer - - ID() peer.ID - Peers() []peer.ID - Conns() []net.Conn - Marshalizer() marshal.Marshalizer - Hasher() hashing.Hasher - RouteTable() *RoutingTable - Addresses() []string - ConnectToAddresses(ctx context.Context, addresses []string) - Bootstrap(ctx context.Context) - PrintConnected() - AddAddress(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) - Connectedness(pid peer.ID) net.Connectedness - GetTopic(topicName string) *Topic - AddTopic(t *Topic) error -} diff --git a/p2p/libp2p/directSender.go b/p2p/libp2p/directSender.go new file mode 100644 index 00000000000..d9442b62a81 --- /dev/null +++ b/p2p/libp2p/directSender.go @@ -0,0 +1,208 @@ +package libp2p + +import ( + "bufio" + "context" + "encoding/binary" + "fmt" + "io" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + ggio "github.com/gogo/protobuf/io" + "github.com/gogo/protobuf/proto" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/whyrusleeping/timecache" +) + +const timeSeenMessages = time.Second * 120 + +type directSender struct { + counter uint64 + ctx context.Context + hostP2P host.Host + messageHandler func(msg p2p.MessageP2P) error + seenMessages *timecache.TimeCache +} + +// NewDirectSender returns a new instance of direct sender object +func NewDirectSender( + ctx context.Context, + h host.Host, + messageHandler func(msg p2p.MessageP2P) error, +) (*directSender, error) { + + if h == nil { + return nil, p2p.ErrNilHost + } + + if ctx == nil { + return nil, p2p.ErrNilContext + } + + if messageHandler == nil { + return nil, p2p.ErrNilDirectSendMessageHandler + } + + ds := &directSender{ + counter: uint64(time.Now().UnixNano()), + ctx: ctx, + hostP2P: h, + seenMessages: timecache.NewTimeCache(timeSeenMessages), + messageHandler: messageHandler, + } + + //wire-up a handler for direct messages + h.SetStreamHandler(DirectSendID, ds.directStreamHandler) + + return ds, nil +} + +func (ds *directSender) directStreamHandler(s net.Stream) { + reader := ggio.NewDelimitedReader(s, 1<<20) + + go func(r ggio.ReadCloser) { + for { + msg := &pubsub_pb.Message{} + + err := reader.ReadMsg(msg) + if err != nil { + //stream has encountered an error, close this go routine + + if err != io.EOF { + _ = s.Reset() + log.Debug(fmt.Sprintf("error reading rpc from %s: %s", s.Conn().RemotePeer(), err)) + } else { + // Just be nice. They probably won't read this + // but it doesn't hurt to send it. + _ = s.Close() + } + return + } + + err = ds.processReceivedDirectMessage(msg) + if err != nil { + log.Debug(err.Error()) + } + } + }(reader) +} + +func (ds *directSender) processReceivedDirectMessage(message *pubsub_pb.Message) error { + if message == nil { + return p2p.ErrNilMessage + } + + if message.TopicIDs == nil { + return p2p.ErrNilTopic + } + + if len(message.TopicIDs) == 0 { + return p2p.ErrEmptyTopicList + } + + if ds.checkAndSetSeenMessage(message) { + return p2p.ErrAlreadySeenMessage + } + + p2pMsg := NewMessage(&pubsub.Message{Message: message}) + return ds.messageHandler(p2pMsg) +} + +func (ds *directSender) checkAndSetSeenMessage(msg *pubsub_pb.Message) bool { + msgId := string(msg.GetFrom()) + string(msg.GetSeqno()) + + if ds.seenMessages.Has(msgId) { + return true + } + + ds.seenMessages.Add(msgId) + return false +} + +// NextSeqno returns the next uint64 found in *counter as byte slice +func (ds *directSender) NextSeqno(counter *uint64) []byte { + seqno := make([]byte, 8) + newVal := atomic.AddUint64(counter, 1) + binary.BigEndian.PutUint64(seqno, newVal) + return seqno +} + +// Send will send a direct message to the connected peer +func (ds *directSender) Send(topic string, buff []byte, peer p2p.PeerID) error { + conn, err := ds.getConnection(peer) + if err != nil { + return err + } + + stream, err := ds.getOrCreateStream(conn) + if err != nil { + return err + } + + msg := ds.createMessage(topic, buff, conn) + + bufw := bufio.NewWriter(stream) + w := ggio.NewDelimitedWriter(bufw) + + go func(msg proto.Message) { + err := w.WriteMsg(msg) + log.LogIfError(err) + + err = bufw.Flush() + log.LogIfError(err) + }(msg) + + return nil +} + +func (ds *directSender) getConnection(p p2p.PeerID) (net.Conn, error) { + conns := ds.hostP2P.Network().ConnsToPeer(peer.ID(p)) + + if len(conns) == 0 { + return nil, p2p.ErrPeerNotDirectlyConnected + } + + return conns[0], nil +} + +func (ds *directSender) getOrCreateStream(conn net.Conn) (net.Stream, error) { + streams := conn.GetStreams() + var foundStream net.Stream + for i := 0; i < len(streams); i++ { + isExpectedStream := streams[i].Protocol() == DirectSendID + isSendableStream := streams[i].Stat().Direction == net.DirOutbound + + if isExpectedStream && isSendableStream { + foundStream = streams[i] + break + } + } + + var err error + + if foundStream == nil { + foundStream, err = ds.hostP2P.NewStream(ds.ctx, conn.RemotePeer(), DirectSendID) + if err != nil { + return nil, err + } + } + + return foundStream, nil +} + +func (ds *directSender) createMessage(topic string, buff []byte, conn net.Conn) *pubsub_pb.Message { + seqno := ds.NextSeqno(&ds.counter) + mes := pubsub_pb.Message{} + mes.Data = buff + mes.TopicIDs = []string{topic} + mes.From = []byte(conn.LocalPeer()) + mes.Seqno = seqno + + return &mes +} diff --git a/p2p/libp2p/directSender_test.go b/p2p/libp2p/directSender_test.go new file mode 100644 index 00000000000..ad61e150384 --- /dev/null +++ b/p2p/libp2p/directSender_test.go @@ -0,0 +1,521 @@ +package libp2p_test + +import ( + "context" + "crypto/ecdsa" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p/mock" + "github.com/btcsuite/btcd/btcec" + ggio "github.com/gogo/protobuf/io" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-protocol" + "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +const timeout = time.Second * 5 + +var blankMessageHandler = func(msg p2p.MessageP2P) error { + return nil +} + +func generateHostStub() *mock.HostStub { + return &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) {}, + } +} + +func createConnStub(stream net.Stream, id peer.ID, sk crypto.PrivKey, remotePeer peer.ID) *mock.ConnStub { + return &mock.ConnStub{ + GetStreamsCalled: func() []net.Stream { + if stream == nil { + return make([]net.Stream, 0) + } + + return []net.Stream{stream} + }, + LocalPeerCalled: func() peer.ID { + return id + }, + LocalPrivateKeyCalled: func() crypto.PrivKey { + return sk + }, + RemotePeerCalled: func() peer.ID { + return remotePeer + }, + } +} + +func createLibP2PCredentialsDirectSender() (peer.ID, crypto.PrivKey) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*crypto.Secp256k1PrivateKey)(prvKey) + id, _ := peer.IDFromPublicKey(sk.GetPublic()) + + return id, sk +} + +//------- NewDirectSender + +func TestNewDirectSender_NilContextShouldErr(t *testing.T) { + hs := &mock.HostStub{} + + ds, err := libp2p.NewDirectSender(nil, hs, func(msg p2p.MessageP2P) error { + return nil + }) + + assert.Nil(t, ds) + assert.Equal(t, p2p.ErrNilContext, err) +} + +func TestNewDirectSender_NilHostShouldErr(t *testing.T) { + ds, err := libp2p.NewDirectSender(context.Background(), nil, func(msg p2p.MessageP2P) error { + return nil + }) + + assert.Nil(t, ds) + assert.Equal(t, p2p.ErrNilHost, err) +} + +func TestNewDirectSender_NilMessageHandlerShouldErr(t *testing.T) { + ds, err := libp2p.NewDirectSender(context.Background(), generateHostStub(), nil) + + assert.Nil(t, ds) + assert.Equal(t, p2p.ErrNilDirectSendMessageHandler, err) +} + +func TestNewDirectSender_OkValsShouldWork(t *testing.T) { + ds, err := libp2p.NewDirectSender(context.Background(), generateHostStub(), func(msg p2p.MessageP2P) error { + return nil + }) + + assert.NotNil(t, ds) + assert.Nil(t, err) +} + +func TestNewDirectSender_OkValsShouldCallSetStreamHandlerWithCorrectValues(t *testing.T) { + var pidCalled protocol.ID + var handlerCalled net.StreamHandler + + hs := &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) { + pidCalled = pid + handlerCalled = handler + }, + } + + _, _ = libp2p.NewDirectSender(context.Background(), hs, func(msg p2p.MessageP2P) error { + return nil + }) + + assert.NotNil(t, handlerCalled) + assert.Equal(t, libp2p.DirectSendID, pidCalled) +} + +//------- ProcessReceivedDirectMessage + +func TestDirectSender_ProcessReceivedDirectMessageNilMessageShouldErr(t *testing.T) { + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + blankMessageHandler, + ) + + err := ds.ProcessReceivedDirectMessage(nil) + + assert.Equal(t, p2p.ErrNilMessage, err) +} + +func TestDirectSender_ProcessReceivedDirectMessageNilTopicIdsShouldErr(t *testing.T) { + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + blankMessageHandler, + ) + + id, _ := createLibP2PCredentialsDirectSender() + + msg := &pubsub_pb.Message{} + msg.Data = []byte("data") + msg.Seqno = []byte("111") + msg.From = []byte(id) + msg.TopicIDs = nil + + err := ds.ProcessReceivedDirectMessage(msg) + + assert.Equal(t, p2p.ErrNilTopic, err) +} + +func TestDirectSender_ProcessReceivedDirectMessageEmptyTopicIdsShouldErr(t *testing.T) { + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + blankMessageHandler, + ) + + id, _ := createLibP2PCredentialsDirectSender() + + msg := &pubsub_pb.Message{} + msg.Data = []byte("data") + msg.Seqno = []byte("111") + msg.From = []byte(id) + msg.TopicIDs = make([]string, 0) + + err := ds.ProcessReceivedDirectMessage(msg) + + assert.Equal(t, p2p.ErrEmptyTopicList, err) +} + +func TestDirectSender_ProcessReceivedDirectMessageAlreadySeenMsgShouldErr(t *testing.T) { + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + blankMessageHandler, + ) + + id, _ := createLibP2PCredentialsDirectSender() + + msg := &pubsub_pb.Message{} + msg.Data = []byte("data") + msg.Seqno = []byte("111") + msg.From = []byte(id) + msg.TopicIDs = []string{"topic"} + + msgId := string(msg.GetFrom()) + string(msg.GetSeqno()) + ds.SeenMessages().Add(msgId) + + err := ds.ProcessReceivedDirectMessage(msg) + + assert.Equal(t, p2p.ErrAlreadySeenMessage, err) +} + +func TestDirectSender_ProcessReceivedDirectMessageShouldWork(t *testing.T) { + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + blankMessageHandler, + ) + + id, _ := createLibP2PCredentialsDirectSender() + + msg := &pubsub_pb.Message{} + msg.Data = []byte("data") + msg.Seqno = []byte("111") + msg.From = []byte(id) + msg.TopicIDs = []string{"topic"} + + err := ds.ProcessReceivedDirectMessage(msg) + + assert.Nil(t, err) +} + +func TestDirectSender_ProcessReceivedDirectMessageShouldCallMessageHandler(t *testing.T) { + wasCalled := false + + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + func(msg p2p.MessageP2P) error { + wasCalled = true + return nil + }, + ) + + id, _ := createLibP2PCredentialsDirectSender() + + msg := &pubsub_pb.Message{} + msg.Data = []byte("data") + msg.Seqno = []byte("111") + msg.From = []byte(id) + msg.TopicIDs = []string{"topic"} + + _ = ds.ProcessReceivedDirectMessage(msg) + + assert.True(t, wasCalled) +} + +func TestDirectSender_ProcessReceivedDirectMessageShouldReturnHandlersError(t *testing.T) { + checkErr := errors.New("checking error") + + ds, _ := libp2p.NewDirectSender( + context.Background(), + generateHostStub(), + func(msg p2p.MessageP2P) error { + return checkErr + }, + ) + + id, _ := createLibP2PCredentialsDirectSender() + + msg := &pubsub_pb.Message{} + msg.Data = []byte("data") + msg.Seqno = []byte("111") + msg.From = []byte(id) + msg.TopicIDs = []string{"topic"} + + err := ds.ProcessReceivedDirectMessage(msg) + + assert.Equal(t, checkErr, err) +} + +//------- SendDirectToConnectedPeer + +func TestDirectSender_SendDirectToConnectedPeerNotConnectedPeerShouldErr(t *testing.T) { + netw := &mock.NetworkStub{ + ConnsToPeerCalled: func(p peer.ID) []net.Conn { + return make([]net.Conn, 0) + }, + } + + ds, _ := libp2p.NewDirectSender( + context.Background(), + &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) {}, + NetworkCalled: func() net.Network { + return netw + }, + }, + blankMessageHandler, + ) + + err := ds.Send("topic", []byte("data"), "not connected peer") + + assert.Equal(t, p2p.ErrPeerNotDirectlyConnected, err) +} + +func TestDirectSender_SendDirectToConnectedPeerNewStreamErrorsShouldErr(t *testing.T) { + t.Parallel() + + netw := &mock.NetworkStub{} + + hs := &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) {}, + NetworkCalled: func() net.Network { + return netw + }, + } + + ds, _ := libp2p.NewDirectSender( + context.Background(), + hs, + blankMessageHandler, + ) + + id, sk := createLibP2PCredentialsDirectSender() + remotePeer := peer.ID("remote peer") + errNewStream := errors.New("new stream error") + + cs := createConnStub(nil, id, sk, remotePeer) + + netw.ConnsToPeerCalled = func(p peer.ID) []net.Conn { + return []net.Conn{cs} + } + + hs.NewStreamCalled = func(ctx context.Context, p peer.ID, pids ...protocol.ID) (net.Stream, error) { + return nil, errNewStream + } + + data := []byte("data") + topic := "topic" + err := ds.Send(topic, data, p2p.PeerID(cs.RemotePeer())) + + assert.Equal(t, errNewStream, err) +} + +func TestDirectSender_SendDirectToConnectedPeerExistingStreamShouldSendToStream(t *testing.T) { + netw := &mock.NetworkStub{} + + ds, _ := libp2p.NewDirectSender( + context.Background(), + &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) {}, + NetworkCalled: func() net.Network { + return netw + }, + }, + blankMessageHandler, + ) + + generatedCounter := ds.Counter() + + id, sk := createLibP2PCredentialsDirectSender() + remotePeer := peer.ID("remote peer") + + stream := mock.NewStreamMock() + stream.SetProtocol(libp2p.DirectSendID) + + cs := createConnStub(stream, id, sk, remotePeer) + + netw.ConnsToPeerCalled = func(p peer.ID) []net.Conn { + return []net.Conn{cs} + } + + receivedMsg := &pubsub_pb.Message{} + chanDone := make(chan bool) + + go func(s net.Stream) { + reader := ggio.NewDelimitedReader(s, 1<<20) + for { + err := reader.ReadMsg(receivedMsg) + if err != nil { + fmt.Println(err.Error()) + return + } + + chanDone <- true + } + }(stream) + + data := []byte("data") + topic := "topic" + err := ds.Send(topic, data, p2p.PeerID(cs.RemotePeer())) + + select { + case <-chanDone: + case <-time.After(timeout): + assert.Fail(t, "timeout getting data from stream") + return + } + + assert.Nil(t, err) + assert.Equal(t, receivedMsg.Data, data) + assert.Equal(t, receivedMsg.TopicIDs[0], topic) + assert.Equal(t, receivedMsg.Seqno, ds.NextSeqno(&generatedCounter)) +} + +func TestDirectSender_SendDirectToConnectedPeerNewStreamShouldSendToStream(t *testing.T) { + netw := &mock.NetworkStub{} + + hs := &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) {}, + NetworkCalled: func() net.Network { + return netw + }, + } + + ds, _ := libp2p.NewDirectSender( + context.Background(), + hs, + blankMessageHandler, + ) + + generatedCounter := ds.Counter() + + id, sk := createLibP2PCredentialsDirectSender() + remotePeer := peer.ID("remote peer") + + stream := mock.NewStreamMock() + stream.SetProtocol(libp2p.DirectSendID) + + cs := createConnStub(stream, id, sk, remotePeer) + + netw.ConnsToPeerCalled = func(p peer.ID) []net.Conn { + return []net.Conn{cs} + } + + hs.NewStreamCalled = func(ctx context.Context, p peer.ID, pids ...protocol.ID) (net.Stream, error) { + if p == remotePeer && pids[0] == libp2p.DirectSendID { + return stream, nil + } + return nil, errors.New("wrong parameters") + } + + receivedMsg := &pubsub_pb.Message{} + chanDone := make(chan bool) + + go func(s net.Stream) { + reader := ggio.NewDelimitedReader(s, 1<<20) + for { + err := reader.ReadMsg(receivedMsg) + if err != nil { + fmt.Println(err.Error()) + return + } + + chanDone <- true + } + }(stream) + + data := []byte("data") + topic := "topic" + err := ds.Send(topic, data, p2p.PeerID(cs.RemotePeer())) + + select { + case <-chanDone: + case <-time.After(timeout): + assert.Fail(t, "timeout getting data from stream") + return + } + + assert.Nil(t, err) + assert.Equal(t, receivedMsg.Data, data) + assert.Equal(t, receivedMsg.TopicIDs[0], topic) + assert.Equal(t, receivedMsg.Seqno, ds.NextSeqno(&generatedCounter)) +} + +//------- received mesages tests + +func TestDirectSender_ReceivedSentMessageShouldCallMessageHandlerTestFullCycle(t *testing.T) { + var streamHandler net.StreamHandler + netw := &mock.NetworkStub{} + + hs := &mock.HostStub{ + SetStreamHandlerCalled: func(pid protocol.ID, handler net.StreamHandler) { + streamHandler = handler + }, + NetworkCalled: func() net.Network { + return netw + }, + } + + var receivedMsg p2p.MessageP2P + chanDone := make(chan bool) + + ds, _ := libp2p.NewDirectSender( + context.Background(), + hs, + func(msg p2p.MessageP2P) error { + receivedMsg = msg + chanDone <- true + return nil + }, + ) + + id, sk := createLibP2PCredentialsDirectSender() + remotePeer := peer.ID("remote peer") + + stream := mock.NewStreamMock() + stream.SetProtocol(libp2p.DirectSendID) + + streamHandler(stream) + + cs := createConnStub(stream, id, sk, remotePeer) + + netw.ConnsToPeerCalled = func(p peer.ID) []net.Conn { + return []net.Conn{cs} + } + + data := []byte("data") + topic := "topic" + _ = ds.Send(topic, data, p2p.PeerID(cs.RemotePeer())) + + select { + case <-chanDone: + case <-time.After(timeout): + assert.Fail(t, "timeout") + return + } + + assert.NotNil(t, receivedMsg) + assert.Equal(t, data, receivedMsg.Data()) + assert.Equal(t, []string{topic}, receivedMsg.TopicIDs()) +} diff --git a/p2p/libp2p/export_test.go b/p2p/libp2p/export_test.go new file mode 100644 index 00000000000..d745ffec7a4 --- /dev/null +++ b/p2p/libp2p/export_test.go @@ -0,0 +1,37 @@ +package libp2p + +import ( + "github.com/libp2p/go-libp2p-discovery" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-interface-connmgr" + "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/whyrusleeping/timecache" +) + +func (netMes *networkMessenger) SetDiscoverer(discoverer discovery.Discoverer) { + netMes.discoverer = discoverer +} + +func (netMes *networkMessenger) ConnManager() ifconnmgr.ConnManager { + return netMes.hostP2P.ConnManager() +} + +func (netMes *networkMessenger) SetPeerDiscoveredHandler(handler PeerInfoHandler) { + netMes.peerDiscoveredHandler = handler +} + +func (netMes *networkMessenger) SetHost(host host.Host) { + netMes.hostP2P = host +} + +func (ds *directSender) ProcessReceivedDirectMessage(message *pubsub_pb.Message) error { + return ds.processReceivedDirectMessage(message) +} + +func (ds *directSender) SeenMessages() *timecache.TimeCache { + return ds.seenMessages +} + +func (ds *directSender) Counter() uint64 { + return ds.counter +} diff --git a/p2p/libp2p/memMessenger.go b/p2p/libp2p/memMessenger.go new file mode 100644 index 00000000000..2032bf6822b --- /dev/null +++ b/p2p/libp2p/memMessenger.go @@ -0,0 +1,49 @@ +package libp2p + +import ( + "context" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p/p2p/net/mock" +) + +// NewMemoryMessenger creates a new sandbox testable instance of libP2P messenger +// It should not open ports on current machine +// Should be used only in testing! +func NewMemoryMessenger( + ctx context.Context, + mockNet mocknet.Mocknet, + peerDiscoveryType p2p.PeerDiscoveryType) (*networkMessenger, error) { + + if ctx == nil { + return nil, p2p.ErrNilContext + } + + if mockNet == nil { + return nil, p2p.ErrNilMockNet + } + + h, err := mockNet.GenPeer() + if err != nil { + return nil, err + } + + mes, err := createMessenger( + ctx, + h, + false, + loadBalancer.NewOutgoingPipeLoadBalancer(), + peerDiscoveryType, + ) + if err != nil { + return nil, err + } + + mes.preconnectPeerHandler = func(pInfo peerstore.PeerInfo) { + _ = mockNet.LinkAll() + } + + return mes, err +} diff --git a/p2p/libp2p/message.go b/p2p/libp2p/message.go new file mode 100644 index 00000000000..8b9bafe17ec --- /dev/null +++ b/p2p/libp2p/message.go @@ -0,0 +1,74 @@ +package libp2p + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-pubsub" +) + +// Message is a data holder struct +type Message struct { + from []byte + data []byte + seqNo []byte + topicIds []string + signature []byte + key []byte + peer p2p.PeerID +} + +// NewMessage returns a new instance of a Message object +func NewMessage(message *pubsub.Message) *Message { + msg := &Message{ + from: message.From, + data: message.Data, + seqNo: message.Seqno, + topicIds: message.TopicIDs, + signature: message.Signature, + key: message.Key, + } + + id, err := peer.IDFromBytes(msg.from) + if err != nil { + log.Debug(err.Error()) + } else { + msg.peer = p2p.PeerID(id) + } + + return msg +} + +// From returns the message originator's peer ID +func (m *Message) From() []byte { + return m.from +} + +// Data returns the message payload +func (m *Message) Data() []byte { + return m.data +} + +// SeqNo returns the message sequence number +func (m *Message) SeqNo() []byte { + return m.seqNo +} + +// TopicIDs returns the topic on which the message was sent +func (m *Message) TopicIDs() []string { + return m.topicIds +} + +// Signature returns the message signature +func (m *Message) Signature() []byte { + return m.signature +} + +// Key returns the message public key (if it can not be recovered from From field) +func (m *Message) Key() []byte { + return m.key +} + +// Peer returns the peer that originated the message +func (m *Message) Peer() p2p.PeerID { + return m.peer +} diff --git a/p2p/libp2p/message_test.go b/p2p/libp2p/message_test.go new file mode 100644 index 00000000000..abbe08c5f07 --- /dev/null +++ b/p2p/libp2p/message_test.go @@ -0,0 +1,105 @@ +package libp2p_test + +import ( + "crypto/ecdsa" + "math/rand" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/btcsuite/btcd/btcec" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/stretchr/testify/assert" +) + +func TestMessage_Data(t *testing.T) { + data := []byte("data") + + mes := &pubsub_pb.Message{ + Data: data, + } + pMes := &pubsub.Message{Message: mes} + m := libp2p.NewMessage(pMes) + + assert.Equal(t, m.Data(), data) +} + +func TestMessage_From(t *testing.T) { + from := []byte("from") + + mes := &pubsub_pb.Message{ + From: from, + } + pMes := &pubsub.Message{Message: mes} + m := libp2p.NewMessage(pMes) + + assert.Equal(t, m.From(), from) +} + +func TestMessage_Key(t *testing.T) { + key := []byte("key") + + mes := &pubsub_pb.Message{ + Key: key, + } + pMes := &pubsub.Message{Message: mes} + m := libp2p.NewMessage(pMes) + + assert.Equal(t, m.Key(), key) +} + +func TestMessage_SeqNo(t *testing.T) { + seqNo := []byte("seqNo") + + mes := &pubsub_pb.Message{ + Seqno: seqNo, + } + pMes := &pubsub.Message{Message: mes} + m := libp2p.NewMessage(pMes) + + assert.Equal(t, m.SeqNo(), seqNo) +} + +func TestMessage_Signature(t *testing.T) { + sig := []byte("sig") + + mes := &pubsub_pb.Message{ + Signature: sig, + } + pMes := &pubsub.Message{Message: mes} + m := libp2p.NewMessage(pMes) + + assert.Equal(t, m.Signature(), sig) +} + +func TestMessage_TopicIDs(t *testing.T) { + topics := []string{"topic1", "topic2"} + + mes := &pubsub_pb.Message{ + TopicIDs: topics, + } + pMes := &pubsub.Message{Message: mes} + m := libp2p.NewMessage(pMes) + + assert.Equal(t, m.TopicIDs(), topics) +} + +func TestMessage_Peer(t *testing.T) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*crypto.Secp256k1PrivateKey)(prvKey) + id, _ := peer.IDFromPublicKey(sk.GetPublic()) + + mes := &pubsub_pb.Message{ + From: []byte(id), + } + pMes := &pubsub.Message{Message: mes} + + m := libp2p.NewMessage(pMes) + + assert.Equal(t, p2p.PeerID(id), m.Peer()) +} diff --git a/p2p/libp2p/mock/connManagerNotifieeStub.go b/p2p/libp2p/mock/connManagerNotifieeStub.go new file mode 100644 index 00000000000..9479e3b3f54 --- /dev/null +++ b/p2p/libp2p/mock/connManagerNotifieeStub.go @@ -0,0 +1,68 @@ +package mock + +import ( + "context" + + "github.com/libp2p/go-libp2p-interface-connmgr" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/multiformats/go-multiaddr" +) + +type ConnManagerNotifieeStub struct { + TagPeerCalled func(p peer.ID, tag string, val int) + UntagPeerCalled func(p peer.ID, tag string) + GetTagInfoCalled func(p peer.ID) *ifconnmgr.TagInfo + TrimOpenConnsCalled func(ctx context.Context) + + ListenCalled func(netw net.Network, ma multiaddr.Multiaddr) + ListenCloseCalled func(netw net.Network, ma multiaddr.Multiaddr) + ConnectedCalled func(netw net.Network, conn net.Conn) + DisconnectedCalled func(netw net.Network, conn net.Conn) + OpenedStreamCalled func(netw net.Network, stream net.Stream) + ClosedStreamCalled func(netw net.Network, stream net.Stream) +} + +func (cmns *ConnManagerNotifieeStub) TagPeer(p peer.ID, tag string, val int) { + cmns.TagPeerCalled(p, tag, val) +} + +func (cmns *ConnManagerNotifieeStub) UntagPeer(p peer.ID, tag string) { + cmns.UntagPeerCalled(p, tag) +} + +func (cmns *ConnManagerNotifieeStub) GetTagInfo(p peer.ID) *ifconnmgr.TagInfo { + return cmns.GetTagInfoCalled(p) +} + +func (cmns *ConnManagerNotifieeStub) TrimOpenConns(ctx context.Context) { + cmns.TrimOpenConnsCalled(ctx) +} + +func (cmns *ConnManagerNotifieeStub) Notifee() net.Notifiee { + return cmns +} + +func (cmns *ConnManagerNotifieeStub) Listen(netw net.Network, ma multiaddr.Multiaddr) { + cmns.ListenCalled(netw, ma) +} + +func (cmns *ConnManagerNotifieeStub) ListenClose(netw net.Network, ma multiaddr.Multiaddr) { + cmns.ListenCloseCalled(netw, ma) +} + +func (cmns *ConnManagerNotifieeStub) Connected(netw net.Network, conn net.Conn) { + cmns.ConnectedCalled(netw, conn) +} + +func (cmns *ConnManagerNotifieeStub) Disconnected(netw net.Network, conn net.Conn) { + cmns.DisconnectedCalled(netw, conn) +} + +func (cmns *ConnManagerNotifieeStub) OpenedStream(netw net.Network, stream net.Stream) { + cmns.OpenedStreamCalled(netw, stream) +} + +func (cmns *ConnManagerNotifieeStub) ClosedStream(netw net.Network, stream net.Stream) { + cmns.ClosedStreamCalled(netw, stream) +} diff --git a/p2p/libp2p/mock/connStub.go b/p2p/libp2p/mock/connStub.go new file mode 100644 index 00000000000..ba21c9b1c68 --- /dev/null +++ b/p2p/libp2p/mock/connStub.go @@ -0,0 +1,61 @@ +package mock + +import ( + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/multiformats/go-multiaddr" +) + +type ConnStub struct { + CloseCalled func() error + LocalPeerCalled func() peer.ID + LocalPrivateKeyCalled func() crypto.PrivKey + RemotePeerCalled func() peer.ID + RemotePublicKeyCalled func() crypto.PubKey + LocalMultiaddrCalled func() multiaddr.Multiaddr + RemoteMultiaddrCalled func() multiaddr.Multiaddr + NewStreamCalled func() (net.Stream, error) + GetStreamsCalled func() []net.Stream + StatCalled func() net.Stat +} + +func (cs *ConnStub) Close() error { + return cs.CloseCalled() +} + +func (cs *ConnStub) LocalPeer() peer.ID { + return cs.LocalPeerCalled() +} + +func (cs *ConnStub) LocalPrivateKey() crypto.PrivKey { + return cs.LocalPrivateKeyCalled() +} + +func (cs *ConnStub) RemotePeer() peer.ID { + return cs.RemotePeerCalled() +} + +func (cs *ConnStub) RemotePublicKey() crypto.PubKey { + return cs.RemotePublicKeyCalled() +} + +func (cs *ConnStub) LocalMultiaddr() multiaddr.Multiaddr { + return cs.LocalMultiaddrCalled() +} + +func (cs *ConnStub) RemoteMultiaddr() multiaddr.Multiaddr { + return cs.RemoteMultiaddrCalled() +} + +func (cs *ConnStub) NewStream() (net.Stream, error) { + return cs.NewStreamCalled() +} + +func (cs *ConnStub) GetStreams() []net.Stream { + return cs.GetStreamsCalled() +} + +func (cs *ConnStub) Stat() net.Stat { + return cs.StatCalled() +} diff --git a/p2p/libp2p/mock/discovererStub.go b/p2p/libp2p/mock/discovererStub.go new file mode 100644 index 00000000000..bb29d0308c8 --- /dev/null +++ b/p2p/libp2p/mock/discovererStub.go @@ -0,0 +1,16 @@ +package mock + +import ( + "context" + + "github.com/libp2p/go-libp2p-discovery" + "github.com/libp2p/go-libp2p-peerstore" +) + +type DiscovererStub struct { + FindPeersCalled func(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peerstore.PeerInfo, error) +} + +func (ds *DiscovererStub) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peerstore.PeerInfo, error) { + return ds.FindPeersCalled(ctx, ns, opts...) +} diff --git a/p2p/libp2p/mock/hostStub.go b/p2p/libp2p/mock/hostStub.go new file mode 100644 index 00000000000..0ec7b30c4f7 --- /dev/null +++ b/p2p/libp2p/mock/hostStub.go @@ -0,0 +1,76 @@ +package mock + +import ( + "context" + + "github.com/libp2p/go-libp2p-interface-connmgr" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-protocol" + "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multistream" +) + +type HostStub struct { + IDCalled func() peer.ID + PeerstoreCalled func() peerstore.Peerstore + AddrsCalled func() []multiaddr.Multiaddr + NetworkCalled func() net.Network + MuxCalled func() *multistream.MultistreamMuxer + ConnectCalled func(ctx context.Context, pi peerstore.PeerInfo) error + SetStreamHandlerCalled func(pid protocol.ID, handler net.StreamHandler) + SetStreamHandlerMatchCalled func(protocol.ID, func(string) bool, net.StreamHandler) + RemoveStreamHandlerCalled func(pid protocol.ID) + NewStreamCalled func(ctx context.Context, p peer.ID, pids ...protocol.ID) (net.Stream, error) + CloseCalled func() error + ConnManagerCalled func() ifconnmgr.ConnManager +} + +func (hs *HostStub) ID() peer.ID { + return hs.IDCalled() +} + +func (hs *HostStub) Peerstore() peerstore.Peerstore { + return hs.PeerstoreCalled() +} + +func (hs *HostStub) Addrs() []multiaddr.Multiaddr { + return hs.AddrsCalled() +} + +func (hs *HostStub) Network() net.Network { + return hs.NetworkCalled() +} + +func (hs *HostStub) Mux() *multistream.MultistreamMuxer { + return hs.MuxCalled() +} + +func (hs *HostStub) Connect(ctx context.Context, pi peerstore.PeerInfo) error { + return hs.ConnectCalled(ctx, pi) +} + +func (hs *HostStub) SetStreamHandler(pid protocol.ID, handler net.StreamHandler) { + hs.SetStreamHandlerCalled(pid, handler) +} + +func (hs *HostStub) SetStreamHandlerMatch(pid protocol.ID, handler func(string) bool, streamHandler net.StreamHandler) { + hs.SetStreamHandlerMatchCalled(pid, handler, streamHandler) +} + +func (hs *HostStub) RemoveStreamHandler(pid protocol.ID) { + hs.RemoveStreamHandlerCalled(pid) +} + +func (hs *HostStub) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (net.Stream, error) { + return hs.NewStreamCalled(ctx, p, pids...) +} + +func (hs *HostStub) Close() error { + return hs.CloseCalled() +} + +func (hs *HostStub) ConnManager() ifconnmgr.ConnManager { + return hs.ConnManagerCalled() +} diff --git a/p2p/libp2p/mock/messageProcessorStub.go b/p2p/libp2p/mock/messageProcessorStub.go new file mode 100644 index 00000000000..77718ef4bb0 --- /dev/null +++ b/p2p/libp2p/mock/messageProcessorStub.go @@ -0,0 +1,13 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type MessageProcessorStub struct { + ProcessMessageCalled func(message p2p.MessageP2P) error +} + +func (mps *MessageProcessorStub) ProcessReceivedMessage(message p2p.MessageP2P) error { + return mps.ProcessMessageCalled(message) +} diff --git a/p2p/libp2p/mock/networkStub.go b/p2p/libp2p/mock/networkStub.go new file mode 100644 index 00000000000..93ded7f62bb --- /dev/null +++ b/p2p/libp2p/mock/networkStub.go @@ -0,0 +1,89 @@ +package mock + +import ( + "context" + + "github.com/jbenet/goprocess" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/multiformats/go-multiaddr" +) + +type NetworkStub struct { + ConnsToPeerCalled func(p peer.ID) []net.Conn + ConnsCalled func() []net.Conn + ConnectednessCalled func(peer.ID) net.Connectedness +} + +func (ns *NetworkStub) Peerstore() peerstore.Peerstore { + panic("implement me") +} + +func (ns *NetworkStub) LocalPeer() peer.ID { + panic("implement me") +} + +func (ns *NetworkStub) DialPeer(ctx context.Context, pid peer.ID) (net.Conn, error) { + panic("implement me") +} + +func (ns *NetworkStub) ClosePeer(pid peer.ID) error { + panic("implement me") +} + +func (ns *NetworkStub) Connectedness(pid peer.ID) net.Connectedness { + return ns.ConnectednessCalled(pid) +} + +func (ns *NetworkStub) Peers() []peer.ID { + panic("implement me") +} + +func (ns *NetworkStub) Conns() []net.Conn { + return ns.ConnsCalled() +} + +func (ns *NetworkStub) ConnsToPeer(p peer.ID) []net.Conn { + return ns.ConnsToPeerCalled(p) +} + +func (ns *NetworkStub) Notify(net.Notifiee) { + panic("implement me") +} + +func (ns *NetworkStub) StopNotify(net.Notifiee) { + panic("implement me") +} + +func (ns *NetworkStub) Close() error { + panic("implement me") +} + +func (ns *NetworkStub) SetStreamHandler(net.StreamHandler) { + panic("implement me") +} + +func (ns *NetworkStub) SetConnHandler(net.ConnHandler) { + panic("implement me") +} + +func (ns *NetworkStub) NewStream(context.Context, peer.ID) (net.Stream, error) { + panic("implement me") +} + +func (ns *NetworkStub) Listen(...multiaddr.Multiaddr) error { + panic("implement me") +} + +func (ns *NetworkStub) ListenAddresses() []multiaddr.Multiaddr { + panic("implement me") +} + +func (ns *NetworkStub) InterfaceListenAddresses() ([]multiaddr.Multiaddr, error) { + panic("implement me") +} + +func (ns *NetworkStub) Process() goprocess.Process { + panic("implement me") +} diff --git a/p2p/libp2p/mock/pipeLoadBalancerStub.go b/p2p/libp2p/mock/pipeLoadBalancerStub.go new file mode 100644 index 00000000000..94e6152e2ec --- /dev/null +++ b/p2p/libp2p/mock/pipeLoadBalancerStub.go @@ -0,0 +1,28 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type PipeLoadBalancerStub struct { + AddPipeCalled func(pipe string) error + RemovePipeCalled func(pipe string) error + GetChannelOrDefaultCalled func(pipe string) chan *p2p.SendableData + CollectFromPipesCalled func() []*p2p.SendableData +} + +func (plbs *PipeLoadBalancerStub) AddPipe(pipe string) error { + return plbs.AddPipeCalled(pipe) +} + +func (plbs *PipeLoadBalancerStub) RemovePipe(pipe string) error { + return plbs.RemovePipeCalled(pipe) +} + +func (plbs *PipeLoadBalancerStub) GetChannelOrDefault(pipe string) chan *p2p.SendableData { + return plbs.GetChannelOrDefaultCalled(pipe) +} + +func (plbs *PipeLoadBalancerStub) CollectFromPipes() []*p2p.SendableData { + return plbs.CollectFromPipesCalled() +} diff --git a/p2p/libp2p/mock/streamMock.go b/p2p/libp2p/mock/streamMock.go new file mode 100644 index 00000000000..6d8e6e04d72 --- /dev/null +++ b/p2p/libp2p/mock/streamMock.go @@ -0,0 +1,108 @@ +package mock + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-protocol" +) + +type streamMock struct { + mutData sync.Mutex + buffStream *bytes.Buffer + pid protocol.ID + streamClosed bool + canRead bool +} + +func NewStreamMock() *streamMock { + return &streamMock{ + mutData: sync.Mutex{}, + buffStream: new(bytes.Buffer), + streamClosed: false, + canRead: false, + } +} + +func (sm *streamMock) Read(p []byte) (n int, err error) { + //just a mock implementation of blocking read + for { + time.Sleep(time.Millisecond * 10) + + sm.mutData.Lock() + if sm.streamClosed { + sm.mutData.Unlock() + return 0, io.EOF + } + + if sm.canRead { + n, err := sm.buffStream.Read(p) + sm.canRead = false + sm.mutData.Unlock() + + return n, err + } + sm.mutData.Unlock() + } +} + +func (sm *streamMock) Write(p []byte) (int, error) { + sm.mutData.Lock() + n, err := sm.buffStream.Write(p) + if err == nil { + sm.canRead = true + } + sm.mutData.Unlock() + + return n, err +} + +func (sm *streamMock) Close() error { + sm.mutData.Lock() + defer sm.mutData.Unlock() + + sm.streamClosed = true + return nil +} + +func (sm *streamMock) Reset() error { + sm.mutData.Lock() + defer sm.mutData.Unlock() + + sm.buffStream.Reset() + sm.canRead = false + return nil +} + +func (sm *streamMock) SetDeadline(time.Time) error { + panic("implement me") +} + +func (sm *streamMock) SetReadDeadline(time.Time) error { + panic("implement me") +} + +func (sm *streamMock) SetWriteDeadline(time.Time) error { + panic("implement me") +} + +func (sm *streamMock) Protocol() protocol.ID { + return sm.pid +} + +func (sm *streamMock) SetProtocol(pid protocol.ID) { + sm.pid = pid +} + +func (sm *streamMock) Stat() net.Stat { + return net.Stat{ + Direction: net.DirOutbound, + } +} + +func (sm *streamMock) Conn() net.Conn { + panic("implement me") +} diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go new file mode 100644 index 00000000000..a0eb408bba1 --- /dev/null +++ b/p2p/libp2p/netMessenger.go @@ -0,0 +1,563 @@ +package libp2p + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/logger" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-discovery" + "github.com/libp2p/go-libp2p-host" + "github.com/libp2p/go-libp2p-interface-connmgr" + "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-protocol" + "github.com/libp2p/go-libp2p-pubsub" + discovery2 "github.com/libp2p/go-libp2p/p2p/discovery" + "github.com/multiformats/go-multiaddr" +) + +const elrondRandezVousString = "ElrondNetworkRandezVous" +const durationBetweenSends = time.Duration(time.Microsecond * 10) + +// durMdnsCalls is used to define the duration used by mdns service when polling peers +const durationMdnsCalls = time.Second + +// DirectSendID represents the protocol ID for sending and receiving direct P2P messages +const DirectSendID = protocol.ID("/directsend/1.0.0") + +var log = logger.NewDefaultLogger() + +// PeerInfoHandler is the signature of the handler that gets called whenever an action for a peerInfo is triggered +type PeerInfoHandler func(pInfo peerstore.PeerInfo) + +type networkMessenger struct { + ctx context.Context + hostP2P host.Host + pb *pubsub.PubSub + ds p2p.DirectSender + kadDHT *dht.IpfsDHT + discoverer discovery.Discoverer + peerDiscoveryType p2p.PeerDiscoveryType + mutMdns sync.Mutex + mdns discovery2.Service + + mutTopics sync.RWMutex + topics map[string]p2p.MessageProcessor + // preconnectPeerHandler is used for notifying that a peer wants to connect to another peer so + // in the case of mocknet use, mocknet should first link the peers + preconnectPeerHandler PeerInfoHandler + peerDiscoveredHandler PeerInfoHandler + + outgoingPLB p2p.PipeLoadBalancer +} + +// NewNetworkMessenger creates a libP2P messenger by opening a port on the current machine +// Should be used in production! +func NewNetworkMessenger( + ctx context.Context, + port int, + p2pPrivKey crypto.PrivKey, + conMgr ifconnmgr.ConnManager, + outgoingPLB p2p.PipeLoadBalancer, + peerDiscoveryType p2p.PeerDiscoveryType, +) (*networkMessenger, error) { + + if ctx == nil { + return nil, p2p.ErrNilContext + } + + if port < 1 { + return nil, p2p.ErrInvalidPort + } + + if p2pPrivKey == nil { + return nil, p2p.ErrNilP2PprivateKey + } + + if outgoingPLB == nil { + return nil, p2p.ErrNilPipeLoadBalancer + } + + opts := []libp2p.Option{ + libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port)), + libp2p.Identity(p2pPrivKey), + libp2p.DefaultTransports, + libp2p.DefaultMuxers, + libp2p.DefaultSecurity, + libp2p.NATPortMap(), + libp2p.ConnectionManager(conMgr), + } + + h, err := libp2p.New(ctx, opts...) + if err != nil { + return nil, err + } + + p2pNode, err := createMessenger(ctx, h, true, outgoingPLB, peerDiscoveryType) + if err != nil { + log.LogIfError(h.Close()) + return nil, err + } + + return p2pNode, nil +} + +func createMessenger( + ctx context.Context, + h host.Host, + withSigning bool, + outgoingPLB p2p.PipeLoadBalancer, + peerDiscoveryType p2p.PeerDiscoveryType, +) (*networkMessenger, error) { + + pb, err := createPubSub(ctx, h, withSigning) + if err != nil { + return nil, err + } + + netMes := networkMessenger{ + hostP2P: h, + pb: pb, + topics: make(map[string]p2p.MessageProcessor), + ctx: ctx, + outgoingPLB: outgoingPLB, + peerDiscoveryType: peerDiscoveryType, + } + + err = netMes.applyDiscoveryMechanism(peerDiscoveryType) + if err != nil { + return nil, err + } + + netMes.ds, err = NewDirectSender(ctx, h, netMes.directMessageHandler) + if err != nil { + return nil, err + } + + go func(pubsub *pubsub.PubSub, plb p2p.PipeLoadBalancer) { + for { + dataToBeSent := plb.CollectFromPipes() + + wasSent := false + for i := 0; i < len(dataToBeSent); i++ { + sendableData := dataToBeSent[i] + + if sendableData == nil { + continue + } + + _ = pb.Publish(sendableData.Topic, sendableData.Buff) + wasSent = true + + time.Sleep(durationBetweenSends) + } + + //if nothing was sent over the network, it makes sense to sleep for a bit + //as to not make this for loop iterate at max CPU speed + if !wasSent { + time.Sleep(durationBetweenSends) + } + } + }(pb, netMes.outgoingPLB) + + for _, address := range netMes.hostP2P.Addrs() { + fmt.Println(address.String() + "/p2p/" + netMes.ID().Pretty()) + } + + return &netMes, nil +} + +func createPubSub(ctx context.Context, host host.Host, withSigning bool) (*pubsub.PubSub, error) { + optsPS := []pubsub.Option{ + pubsub.WithMessageSigning(withSigning), + } + + ps, err := pubsub.NewGossipSub(ctx, host, optsPS...) + if err != nil { + return nil, err + } + + return ps, nil +} + +func (netMes *networkMessenger) applyDiscoveryMechanism(peerDiscoveryType p2p.PeerDiscoveryType) error { + switch peerDiscoveryType { + case p2p.PeerDiscoveryKadDht: + return netMes.createKadDHT(elrondRandezVousString) + case p2p.PeerDiscoveryMdns: + return nil + case p2p.PeerDiscoveryOff: + return nil + default: + return p2p.ErrPeerDiscoveryNotImplemented + } +} + +// HandlePeerFound updates the routing table with this new peer +func (netMes *networkMessenger) HandlePeerFound(pi peerstore.PeerInfo) { + peers := netMes.hostP2P.Peerstore().Peers() + found := false + + for i := 0; i < len(peers); i++ { + if peers[i] == pi.ID { + found = true + break + } + } + + if found { + return + } + + for i := 0; i < len(pi.Addrs); i++ { + netMes.hostP2P.Peerstore().AddAddr(pi.ID, pi.Addrs[i], peerstore.PermanentAddrTTL) + } + + //will try to connect for now as the connections and peer filtering is not done yet + //TODO design a connection manager component + go func() { + err := netMes.hostP2P.Connect(netMes.ctx, pi) + + if err != nil { + log.Debug(err.Error()) + } + }() +} + +func (netMes *networkMessenger) createKadDHT(randezvous string) error { + // Start a DHT, for use in peer discovery. We can't just make a new DHT + // client because we want each peer to maintain its own local copy of the + // DHT, so that the bootstrapping node of the DHT can go down without + // inhibiting future peer discovery. + kademliaDHT, err := dht.New(netMes.ctx, netMes.hostP2P) + if err != nil { + return err + } + + if err = kademliaDHT.Bootstrap(netMes.ctx); err != nil { + return err + } + + // We use a rendezvous point "meet me here" to announce our location. + // This is like telling your friends to meet you at the Eiffel Tower. + log.Debug("Announcing ourselves...") + routingDiscovery := discovery.NewRoutingDiscovery(kademliaDHT) + discovery.Advertise(netMes.ctx, routingDiscovery, randezvous) + log.Debug("Successfully announced!") + + netMes.kadDHT = kademliaDHT + netMes.discoverer = routingDiscovery + + return nil +} + +// Close closes the host, connections and streams +func (netMes *networkMessenger) Close() error { + if netMes.kadDHT != nil { + err := netMes.kadDHT.Close() + log.LogIfError(err) + } + + netMes.mutMdns.Lock() + if netMes.mdns != nil { + err := netMes.mdns.Close() + log.LogIfError(err) + } + netMes.mutMdns.Unlock() + + return netMes.hostP2P.Close() +} + +// ID returns the messenger's ID +func (netMes *networkMessenger) ID() p2p.PeerID { + return p2p.PeerID(netMes.hostP2P.ID()) +} + +// Peers returns the list of all known peers ID (including self) +func (netMes *networkMessenger) Peers() []p2p.PeerID { + peers := make([]p2p.PeerID, 0) + + for _, p := range netMes.hostP2P.Peerstore().Peers() { + peers = append(peers, p2p.PeerID(p)) + } + return peers +} + +// Addresses returns all addresses found in peerstore +func (netMes *networkMessenger) Addresses() []string { + addrs := make([]string, 0) + + for _, address := range netMes.hostP2P.Addrs() { + addrs = append(addrs, address.String()+"/p2p/"+netMes.ID().Pretty()) + } + + return addrs +} + +// ConnectToPeer tries to open a new connection to a peer +func (netMes *networkMessenger) ConnectToPeer(address string) error { + multiAddr, err := multiaddr.NewMultiaddr(address) + if err != nil { + return err + } + + pInfo, err := peerstore.InfoFromP2pAddr(multiAddr) + if err != nil { + return err + } + + if netMes.preconnectPeerHandler != nil { + netMes.preconnectPeerHandler(*pInfo) + } + + return netMes.hostP2P.Connect(netMes.ctx, *pInfo) +} + +// KadDhtDiscoverNewPeers starts a blocking function that searches for all known peers querying all connected peers +// The default libp2p kad-dht implementation tries to connect to all of them +func (netMes *networkMessenger) KadDhtDiscoverNewPeers() error { + if netMes.discoverer == nil { + return p2p.ErrNilDiscoverer + } + + peerChan, err := netMes.discoverer.FindPeers(netMes.ctx, elrondRandezVousString) + if err != nil { + return err + } + + for { + pInfo, more := <-peerChan + + if !more { + //discovered peers channel closed + break + } + + handler := netMes.peerDiscoveredHandler + + if handler != nil { + handler(pInfo) + } + } + + return nil +} + +// TrimConnections will trigger a manual sweep onto current connection set reducing the +// number of connections if needed +func (netMes *networkMessenger) TrimConnections() { + netMes.hostP2P.ConnManager().TrimOpenConns(netMes.ctx) +} + +// Bootstrap will start the peer discovery mechanism +func (netMes *networkMessenger) Bootstrap() error { + if netMes.peerDiscoveryType == p2p.PeerDiscoveryMdns { + netMes.mutMdns.Lock() + defer netMes.mutMdns.Unlock() + + if netMes.mdns != nil { + return p2p.ErrPeerDiscoveryProcessAlreadyStarted + } + + mdns, err := discovery2.NewMdnsService( + netMes.ctx, + netMes.hostP2P, + durationMdnsCalls, + "discovery") + + if err != nil { + return err + } + + mdns.RegisterNotifee(netMes) + netMes.mdns = mdns + return nil + } + + return nil +} + +// IsConnected returns true if current node is connected to provided peer +func (netMes *networkMessenger) IsConnected(peerID p2p.PeerID) bool { + connectedness := netMes.hostP2P.Network().Connectedness(peer.ID(peerID)) + + return connectedness == net.Connected +} + +// ConnectedPeers returns the current connected peers list +func (netMes *networkMessenger) ConnectedPeers() []p2p.PeerID { + + connectedPeers := make(map[p2p.PeerID]struct{}) + + for _, conn := range netMes.hostP2P.Network().Conns() { + p := p2p.PeerID(conn.RemotePeer()) + + if netMes.IsConnected(p) { + connectedPeers[p] = struct{}{} + } + } + + peerList := make([]p2p.PeerID, len(connectedPeers)) + + index := 0 + for k := range connectedPeers { + peerList[index] = k + index++ + } + + return peerList +} + +// CreateTopic opens a new topic using pubsub infrastructure +func (netMes *networkMessenger) CreateTopic(name string, createPipeForTopic bool) error { + netMes.mutTopics.Lock() + _, found := netMes.topics[name] + if found { + netMes.mutTopics.Unlock() + return p2p.ErrTopicAlreadyExists + } + + netMes.topics[name] = nil + subscrRequest, err := netMes.pb.Subscribe(name) + if err != nil { + netMes.mutTopics.Unlock() + return err + } + netMes.mutTopics.Unlock() + + if createPipeForTopic { + err = netMes.outgoingPLB.AddPipe(name) + } + + //just a dummy func to consume messages received by the newly created topic + go func() { + for { + _, _ = subscrRequest.Next(netMes.ctx) + } + }() + + return err +} + +// HasTopic returns true if the topic has been created +func (netMes *networkMessenger) HasTopic(name string) bool { + netMes.mutTopics.RLock() + _, found := netMes.topics[name] + netMes.mutTopics.RUnlock() + + return found +} + +// HasTopicValidator returns true if the topic has a validator set +func (netMes *networkMessenger) HasTopicValidator(name string) bool { + netMes.mutTopics.RLock() + validator, _ := netMes.topics[name] + netMes.mutTopics.RUnlock() + + return validator != nil +} + +// OutgoingPipeLoadBalancer returns the pipe load balancer object used by the messenger to send data +func (netMes *networkMessenger) OutgoingPipeLoadBalancer() p2p.PipeLoadBalancer { + return netMes.outgoingPLB +} + +// BroadcastOnPipe tries to send a byte buffer onto a topic using provided pipe +func (netMes *networkMessenger) BroadcastOnPipe(pipe string, topic string, buff []byte) { + go func() { + sendable := &p2p.SendableData{ + Buff: buff, + Topic: topic, + } + netMes.outgoingPLB.GetChannelOrDefault(pipe) <- sendable + }() +} + +// BroadcastOnTopicPipe tries to send a byte buffer onto a topic using the topic name as pipe +func (netMes *networkMessenger) Broadcast(topic string, buff []byte) { + netMes.BroadcastOnPipe(topic, topic, buff) +} + +// RegisterMessageProcessor registers a message process on a topic +func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { + if handler == nil { + return p2p.ErrNilValidator + } + + netMes.mutTopics.Lock() + defer netMes.mutTopics.Unlock() + validator, found := netMes.topics[topic] + + if !found { + return p2p.ErrNilTopic + } + + if validator != nil { + return p2p.ErrTopicValidatorOperationNotSupported + } + + err := netMes.pb.RegisterTopicValidator(topic, func(i context.Context, message *pubsub.Message) bool { + err := handler.ProcessReceivedMessage(NewMessage(message)) + + return err == nil + }) + if err != nil { + return err + } + + netMes.topics[topic] = handler + return nil +} + +// UnregisterMessageProcessor registers a message processes on a topic +func (netMes *networkMessenger) UnregisterMessageProcessor(topic string) error { + netMes.mutTopics.Lock() + defer netMes.mutTopics.Unlock() + validator, found := netMes.topics[topic] + + if !found { + return p2p.ErrNilTopic + } + + if validator == nil { + return p2p.ErrTopicValidatorOperationNotSupported + } + + err := netMes.pb.UnregisterTopicValidator(topic) + if err != nil { + return err + } + + netMes.topics[topic] = nil + return nil +} + +// SendToConnectedPeer sends a direct message to a connected peer +func (netMes *networkMessenger) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { + return netMes.ds.Send(topic, buff, peerID) +} + +func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P) error { + var processor p2p.MessageProcessor + + netMes.mutTopics.RLock() + processor = netMes.topics[message.TopicIDs()[0]] + netMes.mutTopics.RUnlock() + + if processor == nil { + return p2p.ErrNilValidator + } + + go func(msg p2p.MessageP2P) { + log.LogIfError(processor.ProcessReceivedMessage(msg)) + }(message) + + return nil +} diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go new file mode 100644 index 00000000000..1bd44406f3c --- /dev/null +++ b/p2p/libp2p/netMessenger_test.go @@ -0,0 +1,1150 @@ +package libp2p_test + +import ( + "bytes" + "context" + "crypto/ecdsa" + "fmt" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/libp2p/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" + "github.com/btcsuite/btcd/btcec" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-discovery" + "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "github.com/libp2p/go-libp2p-peerstore" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" + "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/multiformats/go-multiaddr" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +var timeoutWaitResponses = time.Second * 2 + +func waitDoneWithTimeout(t *testing.T, chanDone chan bool, timeout time.Duration) { + select { + case <-chanDone: + return + case <-time.After(timeout): + assert.Fail(t, "timeout reached") + } +} + +func prepareMessengerForMatchDataReceive(mes p2p.Messenger, matchData []byte, wg *sync.WaitGroup) { + _ = mes.CreateTopic("test", false) + + _ = mes.RegisterMessageProcessor("test", + &mock.MessageProcessorStub{ + ProcessMessageCalled: func(message p2p.MessageP2P) error { + if bytes.Equal(matchData, message.Data()) { + fmt.Printf("%s got the message\n", mes.ID().Pretty()) + wg.Done() + } + + return nil + }, + }) +} + +func getConnectableAddress(mes p2p.Messenger) string { + for _, addr := range mes.Addresses() { + if strings.Contains(addr, "circuit") { + continue + } + + return addr + } + + return "" +} + +func createMockNetworkOf2() (mocknet.Mocknet, p2p.Messenger, p2p.Messenger) { + netw := mocknet.New(context.Background()) + + mes1, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryOff) + mes2, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryOff) + + return netw, mes1, mes2 +} + +func createMockMessenger() p2p.Messenger { + netw := mocknet.New(context.Background()) + + mes, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryOff) + + return mes +} + +func createLibP2PCredentialsMessenger() (peer.ID, crypto.PrivKey) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) + sk := (*crypto.Secp256k1PrivateKey)(prvKey) + id, _ := peer.IDFromPublicKey(sk.GetPublic()) + + return id, sk +} + +//------- NewMockLibp2pMessenger + +func TestNewMockLibp2pMessenger_NilContextShouldErr(t *testing.T) { + netw := mocknet.New(context.Background()) + + mes, err := libp2p.NewMemoryMessenger(nil, netw, p2p.PeerDiscoveryOff) + + assert.Nil(t, mes) + assert.Equal(t, err, p2p.ErrNilContext) +} + +func TestNewMockLibp2pMessenger_NilMocknetShouldErr(t *testing.T) { + mes, err := libp2p.NewMemoryMessenger(context.Background(), nil, p2p.PeerDiscoveryOff) + + assert.Nil(t, mes) + assert.Equal(t, err, p2p.ErrNilMockNet) +} + +func TestNewMockLibp2pMessenger_OkValsShouldWork(t *testing.T) { + netw := mocknet.New(context.Background()) + + mes, err := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryOff) + + assert.Nil(t, err) + assert.NotNil(t, mes) + + mes.Close() +} + +//------- NewSocketLibp2pMessenger + +func TestNewSocketLibp2pMessenger_NilContextShouldErr(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + mes, err := libp2p.NewNetworkMessenger( + nil, + port, + sk, + &mock.ConnManagerNotifieeStub{}, + &mock.PipeLoadBalancerStub{}, + p2p.PeerDiscoveryOff, + ) + + assert.Nil(t, mes) + assert.Equal(t, err, p2p.ErrNilContext) +} + +func TestNewSocketLibp2pMessenger_InvalidPortShouldErr(t *testing.T) { + port := 0 + + _, sk := createLibP2PCredentialsMessenger() + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + &mock.ConnManagerNotifieeStub{}, + &mock.PipeLoadBalancerStub{}, + p2p.PeerDiscoveryOff, + ) + + assert.Nil(t, mes) + assert.Equal(t, err, p2p.ErrInvalidPort) +} + +func TestNewSocketLibp2pMessenger_NilP2PprivateKeyShouldErr(t *testing.T) { + port := 4000 + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + nil, + &mock.ConnManagerNotifieeStub{}, + &mock.PipeLoadBalancerStub{}, + p2p.PeerDiscoveryOff, + ) + + assert.Nil(t, mes) + assert.Equal(t, err, p2p.ErrNilP2PprivateKey) +} + +func TestNewSocketLibp2pMessenger_NilPipeLoadBalancerShouldErr(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + &mock.ConnManagerNotifieeStub{}, + nil, + p2p.PeerDiscoveryOff, + ) + + assert.Nil(t, mes) + assert.Equal(t, err, p2p.ErrNilPipeLoadBalancer) +} + +func TestNewSocketLibp2pMessenger_NoConnMgrShouldWork(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + nil, + &mock.PipeLoadBalancerStub{ + CollectFromPipesCalled: func() []*p2p.SendableData { + return make([]*p2p.SendableData, 0) + }, + }, + p2p.PeerDiscoveryOff, + ) + + assert.NotNil(t, mes) + assert.Nil(t, err) + + mes.Close() +} + +func TestNewSocketLibp2pMessenger_WithConnMgrShouldWork(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + cns := &mock.ConnManagerNotifieeStub{ + ListenCalled: func(netw net.Network, ma multiaddr.Multiaddr) {}, + ListenCloseCalled: func(netw net.Network, ma multiaddr.Multiaddr) {}, + } + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + cns, + &mock.PipeLoadBalancerStub{ + CollectFromPipesCalled: func() []*p2p.SendableData { + return make([]*p2p.SendableData, 0) + }, + }, + p2p.PeerDiscoveryOff, + ) + + assert.NotNil(t, mes) + assert.Nil(t, err) + assert.True(t, cns == mes.ConnManager()) + + mes.Close() +} + +func TestNewSocketLibp2pMessenger_WithMdnsPeerDiscoveryShouldWork(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + nil, + &mock.PipeLoadBalancerStub{ + CollectFromPipesCalled: func() []*p2p.SendableData { + return make([]*p2p.SendableData, 0) + }, + }, + p2p.PeerDiscoveryMdns, + ) + + assert.NotNil(t, mes) + assert.Nil(t, err) + + mes.Close() +} + +func TestNewSocketLibp2pMessenger_NoPeerDiscoveryImplementationShouldError(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + mes, err := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + nil, + &mock.PipeLoadBalancerStub{ + CollectFromPipesCalled: func() []*p2p.SendableData { + return make([]*p2p.SendableData, 0) + }, + }, + 10000, + ) + + assert.Nil(t, mes) + assert.Equal(t, p2p.ErrPeerDiscoveryNotImplemented, err) +} + +//------- Messenger functionality + +func TestLibp2pMessenger_ConnectToPeerWrongAddressShouldErr(t *testing.T) { + mes1 := createMockMessenger() + + adr2 := "invalid_address" + + fmt.Printf("Connecting to %s...\n", adr2) + + err := mes1.ConnectToPeer(adr2) + assert.NotNil(t, err) + + mes1.Close() +} + +func TestLibp2pMessenger_ConnectToPeerAndClose2PeersShouldWork(t *testing.T) { + _, mes1, mes2 := createMockNetworkOf2() + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + err := mes1.ConnectToPeer(adr2) + assert.Nil(t, err) + + err = mes1.Close() + assert.Nil(t, err) + + err = mes2.Close() + assert.Nil(t, err) +} + +func TestLibp2pMessenger_IsConnectedShouldWork(t *testing.T) { + _, mes1, mes2 := createMockNetworkOf2() + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + + assert.True(t, mes1.IsConnected(mes2.ID())) + assert.True(t, mes2.IsConnected(mes1.ID())) + + mes1.Close() + mes2.Close() +} + +func TestLibp2pMessenger_CreateTopicOkValsShouldWork(t *testing.T) { + mes := createMockMessenger() + + err := mes.CreateTopic("test", true) + assert.Nil(t, err) + + mes.Close() +} + +func TestLibp2pMessenger_CreateTopicTwiceShouldErr(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + err := mes.CreateTopic("test", false) + assert.Equal(t, p2p.ErrTopicAlreadyExists, err) + + mes.Close() +} + +func TestLibp2pMessenger_HasTopicIfHaveTopicShouldReturnTrue(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + assert.True(t, mes.HasTopic("test")) + + mes.Close() +} + +func TestLibp2pMessenger_HasTopicIfDoNotHaveTopicShouldReturnFalse(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + assert.False(t, mes.HasTopic("one topic")) + + mes.Close() +} + +func TestLibp2pMessenger_HasTopicValidatorDoNotHaveTopicShouldReturnFalse(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + assert.False(t, mes.HasTopicValidator("one topic")) + + mes.Close() +} + +func TestLibp2pMessenger_HasTopicValidatorHaveTopicDoNotHaveValidatorShouldReturnFalse(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + assert.False(t, mes.HasTopicValidator("test")) + + mes.Close() +} + +func TestLibp2pMessenger_HasTopicValidatorHaveTopicHaveValidatorShouldReturnTrue(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + _ = mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{}) + + assert.True(t, mes.HasTopicValidator("test")) + + mes.Close() +} + +func TestLibp2pMessenger_RegisterTopicValidatorOnInexistentTopicShouldErr(t *testing.T) { + mes := createMockMessenger() + + err := mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{}) + + assert.Equal(t, p2p.ErrNilTopic, err) + + mes.Close() +} + +func TestLibp2pMessenger_RegisterTopicValidatorWithNilHandlerShouldErr(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + err := mes.RegisterMessageProcessor("test", nil) + + assert.Equal(t, p2p.ErrNilValidator, err) + + mes.Close() +} + +func TestLibp2pMessenger_RegisterTopicValidatorOkValsShouldWork(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + err := mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{}) + + assert.Nil(t, err) + + mes.Close() +} + +func TestLibp2pMessenger_RegisterTopicValidatorReregistrationShouldErr(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + //registration + _ = mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{}) + + //re-registration + err := mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{}) + + assert.Equal(t, p2p.ErrTopicValidatorOperationNotSupported, err) + + mes.Close() +} + +func TestLibp2pMessenger_UnegisterTopicValidatorOnInexistentTopicShouldErr(t *testing.T) { + mes := createMockMessenger() + + err := mes.UnregisterMessageProcessor("test") + + assert.Equal(t, p2p.ErrNilTopic, err) + + mes.Close() +} + +func TestLibp2pMessenger_UnegisterTopicValidatorOnANotRegisteredTopicShouldErr(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + err := mes.UnregisterMessageProcessor("test") + + assert.Equal(t, p2p.ErrTopicValidatorOperationNotSupported, err) + + mes.Close() +} + +func TestLibp2pMessenger_UnregisterTopicValidatorShouldWork(t *testing.T) { + mes := createMockMessenger() + + _ = mes.CreateTopic("test", false) + + //registration + _ = mes.RegisterMessageProcessor("test", &mock.MessageProcessorStub{}) + + //unregistration + err := mes.UnregisterMessageProcessor("test") + + assert.Nil(t, err) + + mes.Close() +} + +func TestLibp2pMessenger_BroadcastDataBetween2PeersShouldWork(t *testing.T) { + msg := []byte("test message") + + _, mes1, mes2 := createMockNetworkOf2() + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + + wg := &sync.WaitGroup{} + chanDone := make(chan bool) + wg.Add(2) + + go func() { + wg.Wait() + chanDone <- true + }() + + prepareMessengerForMatchDataReceive(mes1, msg, wg) + prepareMessengerForMatchDataReceive(mes2, msg, wg) + + fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") + time.Sleep(time.Second) + + fmt.Printf("sending message from %s...\n", mes1.ID().Pretty()) + + mes1.Broadcast("test", msg) + + waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) + + mes1.Close() + mes2.Close() +} + +func TestLibp2pMessenger_BroadcastDataOnTopicPipeBetween2PeersShouldWork(t *testing.T) { + msg := []byte("test message") + + _, mes1, mes2 := createMockNetworkOf2() + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + + wg := &sync.WaitGroup{} + chanDone := make(chan bool) + wg.Add(2) + + go func() { + wg.Wait() + chanDone <- true + }() + + prepareMessengerForMatchDataReceive(mes1, msg, wg) + prepareMessengerForMatchDataReceive(mes2, msg, wg) + + fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") + time.Sleep(time.Second) + + fmt.Printf("sending message from %s...\n", mes1.ID().Pretty()) + + mes1.Broadcast("test", msg) + + waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) + + mes1.Close() + mes2.Close() +} + +func TestLibp2pMessenger_Peers(t *testing.T) { + _, mes1, mes2 := createMockNetworkOf2() + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + + //should know both peers + foundCurrent := false + foundConnected := false + + for _, p := range mes1.Peers() { + fmt.Println(p.Pretty()) + + if p.Pretty() == mes1.ID().Pretty() { + foundCurrent = true + } + if p.Pretty() == mes2.ID().Pretty() { + foundConnected = true + } + } + + assert.True(t, foundCurrent && foundConnected) + + mes1.Close() + mes2.Close() +} + +func TestLibp2pMessenger_ConnectedPeers(t *testing.T) { + netw, mes1, mes2 := createMockNetworkOf2() + mes3, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryOff) + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + _ = mes3.ConnectToPeer(adr2) + + //connected peers: 1 ----- 2 ----- 3 + + assert.Equal(t, []p2p.PeerID{mes2.ID()}, mes1.ConnectedPeers()) + assert.Equal(t, []p2p.PeerID{mes2.ID()}, mes3.ConnectedPeers()) + assert.Equal(t, 2, len(mes2.ConnectedPeers())) + //no need to further test that mes2 is connected to mes1 and mes3 s this was tested in first 2 asserts + + mes1.Close() + mes2.Close() +} + +func TestLibp2pMessenger_ConnectedPeersShouldReturnUniquePeers(t *testing.T) { + pid1 := p2p.PeerID("pid1") + pid2 := p2p.PeerID("pid2") + pid3 := p2p.PeerID("pid3") + pid4 := p2p.PeerID("pid4") + + hs := &mock.HostStub{ + NetworkCalled: func() net.Network { + return &mock.NetworkStub{ + ConnsCalled: func() []net.Conn { + //generate a mock list that contain duplicates + return []net.Conn{ + generateConnWithRemotePeer(pid1), + generateConnWithRemotePeer(pid1), + generateConnWithRemotePeer(pid2), + generateConnWithRemotePeer(pid1), + generateConnWithRemotePeer(pid4), + generateConnWithRemotePeer(pid3), + generateConnWithRemotePeer(pid1), + generateConnWithRemotePeer(pid3), + generateConnWithRemotePeer(pid4), + generateConnWithRemotePeer(pid2), + generateConnWithRemotePeer(pid1), + generateConnWithRemotePeer(pid1), + } + }, + ConnectednessCalled: func(id peer.ID) net.Connectedness { + return net.Connected + }, + } + }, + } + + netw := mocknet.New(context.Background()) + mes, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryOff) + //we can safely close the host as the next operations will be done on a mock + mes.Close() + + mes.SetHost(hs) + + peerList := mes.ConnectedPeers() + + assert.Equal(t, 4, len(peerList)) + assert.True(t, existInList(peerList, pid1)) + assert.True(t, existInList(peerList, pid2)) + assert.True(t, existInList(peerList, pid3)) + assert.True(t, existInList(peerList, pid4)) + +} + +func existInList(list []p2p.PeerID, pid p2p.PeerID) bool { + for _, p := range list { + if bytes.Equal(p.Bytes(), pid.Bytes()) { + return true + } + } + + return false +} + +func generateConnWithRemotePeer(pid p2p.PeerID) net.Conn { + return &mock.ConnStub{ + RemotePeerCalled: func() peer.ID { + return peer.ID(pid) + }, + } +} + +func TestLibp2pMessenger_KadDhtDiscoverNewPeersNilDiscovererShouldErr(t *testing.T) { + netw := mocknet.New(context.Background()) + + mes, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryKadDht) + mes.SetDiscoverer(nil) + + err := mes.KadDhtDiscoverNewPeers() + assert.Equal(t, p2p.ErrNilDiscoverer, err) + + mes.Close() +} + +func TestLibp2pMessenger_KadDhtDiscoverNewPeersDiscovererErrsShouldErr(t *testing.T) { + ds := &mock.DiscovererStub{} + ds.FindPeersCalled = func(ctx context.Context, ns string, opts ...discovery.Option) (infos <-chan peerstore.PeerInfo, e error) { + return nil, errors.New("error") + } + + netw := mocknet.New(context.Background()) + + mes, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryKadDht) + mes.SetDiscoverer(ds) + + err := mes.KadDhtDiscoverNewPeers() + assert.Equal(t, "error", err.Error()) + + mes.Close() +} + +func TestLibp2pMessenger_KadDhtDiscoverNewPeersShouldWork(t *testing.T) { + pInfo1 := peerstore.PeerInfo{ID: peer.ID("peer1")} + pInfo2 := peerstore.PeerInfo{ID: peer.ID("peer2")} + + ds := &mock.DiscovererStub{} + ds.FindPeersCalled = func(ctx context.Context, ns string, opts ...discovery.Option) (infos <-chan peerstore.PeerInfo, e error) { + ch := make(chan peerstore.PeerInfo) + + go func(ch chan peerstore.PeerInfo) { + //emulating find peers taking some time + time.Sleep(time.Millisecond * 100) + + ch <- pInfo1 + + time.Sleep(time.Millisecond * 100) + + ch <- pInfo2 + + time.Sleep(time.Millisecond * 100) + + close(ch) + }(ch) + + return ch, nil + } + + netw := mocknet.New(context.Background()) + + mes, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryKadDht) + mes.SetDiscoverer(ds) + + foundPeers := make([]peerstore.PeerInfo, 0) + + mes.SetPeerDiscoveredHandler(func(pInfo peerstore.PeerInfo) { + foundPeers = append(foundPeers, pInfo) + }) + + err := mes.KadDhtDiscoverNewPeers() + assert.Nil(t, err) + assert.Equal(t, 2, len(foundPeers)) + assert.Equal(t, foundPeers[0], pInfo1) + assert.Equal(t, foundPeers[1], pInfo2) + + mes.Close() +} + +func TestLibp2pMessenger_KadDhtDiscoverNewPeersWithRealDiscovererShouldWork(t *testing.T) { + netw := mocknet.New(context.Background()) + + advertiser, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryKadDht) + mes1, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryKadDht) + mes2, _ := libp2p.NewMemoryMessenger(context.Background(), netw, p2p.PeerDiscoveryKadDht) + + adrAdvertiser := advertiser.Addresses()[0] + + mutAdvertiser := sync.Mutex{} + peersFoundByAdvertiser := make(map[peer.ID]peerstore.PeerInfo) + advertiser.SetPeerDiscoveredHandler(func(pInfo peerstore.PeerInfo) { + mutAdvertiser.Lock() + peersFoundByAdvertiser[pInfo.ID] = pInfo + mutAdvertiser.Unlock() + }) + + mutMes1 := sync.Mutex{} + peersFoundByMes1 := make(map[peer.ID]peerstore.PeerInfo) + mes1.SetPeerDiscoveredHandler(func(pInfo peerstore.PeerInfo) { + mutMes1.Lock() + peersFoundByMes1[pInfo.ID] = pInfo + mutMes1.Unlock() + }) + + mutMes2 := sync.Mutex{} + peersFoundByMes2 := make(map[peer.ID]peerstore.PeerInfo) + mes2.SetPeerDiscoveredHandler(func(pInfo peerstore.PeerInfo) { + mutMes2.Lock() + peersFoundByMes2[pInfo.ID] = pInfo + mutMes2.Unlock() + }) + + mes1.ConnectToPeer(adrAdvertiser) + mes2.ConnectToPeer(adrAdvertiser) + + time.Sleep(time.Second) + + err := advertiser.KadDhtDiscoverNewPeers() + assert.Nil(t, err) + + err = mes1.KadDhtDiscoverNewPeers() + assert.Nil(t, err) + + err = mes2.KadDhtDiscoverNewPeers() + assert.Nil(t, err) + + //we can not make an assertion for len to be equal to 3 because there is simple no guarantee + //that a peer always fetch entire networks known by its peers + assert.True(t, len(peersFoundByAdvertiser) >= 2) + assert.True(t, len(peersFoundByMes1) >= 2) + assert.True(t, len(peersFoundByMes2) >= 2) + + mes1.Close() + mes2.Close() + advertiser.Close() +} + +func TestLibp2pMessenger_TrimConnectionsCallsConnManagerTrimConnections(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + wasCalled := false + + cns := &mock.ConnManagerNotifieeStub{ + ListenCalled: func(netw net.Network, ma multiaddr.Multiaddr) {}, + ListenCloseCalled: func(netw net.Network, ma multiaddr.Multiaddr) {}, + TrimOpenConnsCalled: func(ctx context.Context) { + wasCalled = true + }, + } + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + cns, + &mock.PipeLoadBalancerStub{ + CollectFromPipesCalled: func() []*p2p.SendableData { + return make([]*p2p.SendableData, 0) + }, + }, + p2p.PeerDiscoveryOff, + ) + + mes.TrimConnections() + + assert.True(t, wasCalled) + + mes.Close() +} + +func TestLibp2pMessenger_SendDataThrottlerShouldReturnCorrectObject(t *testing.T) { + port := 4000 + + _, sk := createLibP2PCredentialsMessenger() + + sdt := &mock.PipeLoadBalancerStub{ + AddPipeCalled: func(pipe string) error { + return nil + }, + CollectFromPipesCalled: func() []*p2p.SendableData { + return make([]*p2p.SendableData, 0) + }, + } + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + port, + sk, + nil, + sdt, + p2p.PeerDiscoveryOff, + ) + + sdtReturned := mes.OutgoingPipeLoadBalancer() + + assert.True(t, sdt == sdtReturned) + + mes.Close() +} + +func TestLibp2pMessenger_SendDirectWithMockNetToConnectedPeerShouldWork(t *testing.T) { + msg := []byte("test message") + + _, mes1, mes2 := createMockNetworkOf2() + + adr2 := mes2.Addresses()[0] + + fmt.Printf("Connecting to %s...\n", adr2) + + _ = mes1.ConnectToPeer(adr2) + + wg := &sync.WaitGroup{} + chanDone := make(chan bool) + wg.Add(1) + + go func() { + wg.Wait() + chanDone <- true + }() + + prepareMessengerForMatchDataReceive(mes2, msg, wg) + + fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") + time.Sleep(time.Second) + + fmt.Printf("sending message from %s...\n", mes1.ID().Pretty()) + + err := mes1.SendToConnectedPeer("test", msg, mes2.ID()) + + assert.Nil(t, err) + + waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) + + mes1.Close() + mes2.Close() +} + +func TestLibp2pMessenger_SendDirectWithRealNetToConnectedPeerShouldWork(t *testing.T) { + msg := []byte("test message") + + _, sk1 := createLibP2PCredentialsMessenger() + _, sk2 := createLibP2PCredentialsMessenger() + + fmt.Println("Messenger 1:") + mes1, _ := libp2p.NewNetworkMessenger( + context.Background(), + 4000, + sk1, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff, + ) + + fmt.Println("Messenger 2:") + mes2, _ := libp2p.NewNetworkMessenger( + context.Background(), + 4001, + sk2, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff, + ) + + err := mes1.ConnectToPeer(getConnectableAddress(mes2)) + assert.Nil(t, err) + + wg := &sync.WaitGroup{} + chanDone := make(chan bool) + wg.Add(2) + + go func() { + wg.Wait() + chanDone <- true + }() + + prepareMessengerForMatchDataReceive(mes1, msg, wg) + prepareMessengerForMatchDataReceive(mes2, msg, wg) + + fmt.Println("Delaying as to allow peers to announce themselves on the opened topic...") + time.Sleep(time.Second) + + fmt.Printf("Messenger 1 is sending message from %s...\n", mes1.ID().Pretty()) + err = mes1.SendToConnectedPeer("test", msg, mes2.ID()) + assert.Nil(t, err) + + time.Sleep(time.Second) + fmt.Printf("Messenger 2 is sending message from %s...\n", mes2.ID().Pretty()) + err = mes2.SendToConnectedPeer("test", msg, mes1.ID()) + assert.Nil(t, err) + + waitDoneWithTimeout(t, chanDone, timeoutWaitResponses) + + mes1.Close() + mes2.Close() +} + +//------- Bootstrap + +func TestNetworkMessenger_BootstrapPeerDiscoveryOffShouldReturnNil(t *testing.T) { + _, sk := createLibP2PCredentialsMessenger() + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + 4000, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff, + ) + + err := mes.Bootstrap() + + assert.Nil(t, err) + + mes.Close() +} + +func TestNetworkMessenger_BootstrapMdnsPeerDiscoveryShouldReturnNil(t *testing.T) { + _, sk := createLibP2PCredentialsMessenger() + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + 23000, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryMdns, + ) + + err := mes.Bootstrap() + assert.Nil(t, err) + + mes.Close() +} + +func TestNetworkMessenger_BootstrapMdnsPeerDiscoveryCalledTwiceShouldErr(t *testing.T) { + _, sk := createLibP2PCredentialsMessenger() + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + 23000, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryMdns, + ) + + _ = mes.Bootstrap() + err := mes.Bootstrap() + assert.Equal(t, p2p.ErrPeerDiscoveryProcessAlreadyStarted, err) + + mes.Close() +} + +func TestNetworkMessenger_HandlePeerFoundNotFoundShouldTryToConnect(t *testing.T) { + _, sk := createLibP2PCredentialsMessenger() + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + 23000, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff, + ) + //closing "real" host as to check with a mock host + mes.Close() + + newPeerInfo := peerstore.PeerInfo{ + ID: peer.ID("new found peerID"), + } + testAddress := "/ip4/127.0.0.1/tcp/23000/p2p/16Uiu2HAkyqtHSEJDkYhVWTtm9j58Mq5xQJgrApBYXMwS6sdamXuE" + address, _ := multiaddr.NewMultiaddr(testAddress) + newPeerInfo.Addrs = []multiaddr.Multiaddr{address} + + chanConnected := make(chan struct{}) + + mockHost := &mock.HostStub{ + PeerstoreCalled: func() peerstore.Peerstore { + return peerstore.NewPeerstore( + pstoremem.NewKeyBook(), + pstoremem.NewAddrBook(), + pstoremem.NewPeerMetadata()) + }, + ConnectCalled: func(ctx context.Context, pi peerstore.PeerInfo) error { + if newPeerInfo.ID == pi.ID { + chanConnected <- struct{}{} + } + + return nil + }, + } + + mes.SetHost(mockHost) + + mes.HandlePeerFound(newPeerInfo) + + select { + case <-chanConnected: + return + case <-time.After(timeoutWaitResponses): + assert.Fail(t, "timeout while waiting to call host.Connect") + } +} + +func TestNetworkMessenger_HandlePeerFoundPeerFoundShouldNotTryToConnect(t *testing.T) { + _, sk := createLibP2PCredentialsMessenger() + + mes, _ := libp2p.NewNetworkMessenger( + context.Background(), + 23000, + sk, + nil, + loadBalancer.NewOutgoingPipeLoadBalancer(), + p2p.PeerDiscoveryOff, + ) + //closing "real" host as to check with a mock host + mes.Close() + + newPeerInfo := peerstore.PeerInfo{ + ID: peer.ID("new found peerID"), + } + testAddress := "/ip4/127.0.0.1/tcp/23000/p2p/16Uiu2HAkyqtHSEJDkYhVWTtm9j58Mq5xQJgrApBYXMwS6sdamXuE" + address, _ := multiaddr.NewMultiaddr(testAddress) + newPeerInfo.Addrs = []multiaddr.Multiaddr{address} + + chanConnected := make(chan struct{}) + + mockHost := &mock.HostStub{ + PeerstoreCalled: func() peerstore.Peerstore { + ps := peerstore.NewPeerstore( + pstoremem.NewKeyBook(), + pstoremem.NewAddrBook(), + pstoremem.NewPeerMetadata()) + ps.AddAddrs(newPeerInfo.ID, newPeerInfo.Addrs, peerstore.PermanentAddrTTL) + + return ps + }, + ConnectCalled: func(ctx context.Context, pi peerstore.PeerInfo) error { + if newPeerInfo.ID == pi.ID { + chanConnected <- struct{}{} + } + + return nil + }, + } + + mes.SetHost(mockHost) + + mes.HandlePeerFound(newPeerInfo) + + select { + case <-chanConnected: + assert.Fail(t, "should have not called host.Connect") + case <-time.After(timeoutWaitResponses): + return + } +} diff --git a/p2p/loadBalancer/export_test.go b/p2p/loadBalancer/export_test.go new file mode 100644 index 00000000000..0cfc8a1b4a6 --- /dev/null +++ b/p2p/loadBalancer/export_test.go @@ -0,0 +1,21 @@ +package loadBalancer + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +func (oplb *OutgoingPipeLoadBalancer) Chans() []chan *p2p.SendableData { + return oplb.chans +} + +func (oplb *OutgoingPipeLoadBalancer) Names() []string { + return oplb.names +} + +func (oplb *OutgoingPipeLoadBalancer) NamesChans() map[string]chan *p2p.SendableData { + return oplb.namesChans +} + +func DefaultSendPipe() string { + return defaultSendPipe +} diff --git a/p2p/loadBalancer/outgoingPipeLoadBalancer.go b/p2p/loadBalancer/outgoingPipeLoadBalancer.go new file mode 100644 index 00000000000..c9669503e1a --- /dev/null +++ b/p2p/loadBalancer/outgoingPipeLoadBalancer.go @@ -0,0 +1,126 @@ +package loadBalancer + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +const defaultSendPipe = "default send pipe" + +// OutgoingPipeLoadBalancer is a component that evenly balances requests to be sent +type OutgoingPipeLoadBalancer struct { + mut sync.RWMutex + chans []chan *p2p.SendableData + names []string + //namesChans is defined only for performance purposes as to fast search by name + //iteration is done directly on slices as that is used very often and is about 50x + //faster then an iteration over a map + namesChans map[string]chan *p2p.SendableData +} + +// NewOutgoingPipeLoadBalancer creates a new instance of a SendDataThrottle instance +func NewOutgoingPipeLoadBalancer() *OutgoingPipeLoadBalancer { + sdt := &OutgoingPipeLoadBalancer{ + chans: make([]chan *p2p.SendableData, 0), + names: make([]string, 0), + namesChans: make(map[string]chan *p2p.SendableData), + } + + sdt.appendPipe(defaultSendPipe) + + return sdt +} + +func (oplb *OutgoingPipeLoadBalancer) appendPipe(pipe string) { + oplb.names = append(oplb.names, pipe) + ch := make(chan *p2p.SendableData) + oplb.chans = append(oplb.chans, ch) + oplb.namesChans[pipe] = ch +} + +// AddPipe adds a new pipe to the throttler +func (oplb *OutgoingPipeLoadBalancer) AddPipe(pipe string) error { + oplb.mut.Lock() + defer oplb.mut.Unlock() + + for _, name := range oplb.names { + if name == pipe { + return p2p.ErrPipeAlreadyExists + } + } + + oplb.appendPipe(pipe) + + return nil +} + +// RemovePipe removes an existing pipe from the throttler +func (oplb *OutgoingPipeLoadBalancer) RemovePipe(pipe string) error { + if pipe == defaultSendPipe { + return p2p.ErrPipeCanNotBeDeleted + } + + oplb.mut.Lock() + defer oplb.mut.Unlock() + + index := -1 + + for idx, name := range oplb.names { + if name == pipe { + index = idx + break + } + } + + if index == -1 { + return p2p.ErrPipeDoNotExists + } + + //remove the index-th element in the chan slice + copy(oplb.chans[index:], oplb.chans[index+1:]) + oplb.chans[len(oplb.chans)-1] = nil + oplb.chans = oplb.chans[:len(oplb.chans)-1] + + //remove the index-th element in the names slice + copy(oplb.names[index:], oplb.names[index+1:]) + oplb.names = oplb.names[:len(oplb.names)-1] + + delete(oplb.namesChans, pipe) + + return nil +} + +// GetChannelOrDefault fetches the required pipe or the default if the pipe is not present +func (oplb *OutgoingPipeLoadBalancer) GetChannelOrDefault(pipe string) chan *p2p.SendableData { + oplb.mut.RLock() + defer oplb.mut.RUnlock() + + ch, _ := oplb.namesChans[pipe] + if ch != nil { + return ch + } + + return oplb.chans[0] +} + +// CollectFromPipes gets the waiting object found in the non buffered chans (it iterates through whole collection of +// defined pipes) and returns a list of those objects. If a chan do not have a waiting object to be fetch, that chan will +// be skipped. Method always returns a valid slice object and in the case that no chan has an object to be sent, +// the slice will be empty +func (oplb *OutgoingPipeLoadBalancer) CollectFromPipes() []*p2p.SendableData { + oplb.mut.RLock() + defer oplb.mut.RUnlock() + + collectedData := make([]*p2p.SendableData, 0) + + for _, channel := range oplb.chans { + select { + case sendable := <-channel: + collectedData = append(collectedData, sendable) + default: + } + } + + return collectedData +} diff --git a/p2p/loadBalancer/outgoingPipeLoadBalancer_test.go b/p2p/loadBalancer/outgoingPipeLoadBalancer_test.go new file mode 100644 index 00000000000..7709a881f3a --- /dev/null +++ b/p2p/loadBalancer/outgoingPipeLoadBalancer_test.go @@ -0,0 +1,291 @@ +package loadBalancer_test + +import ( + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p/loadBalancer" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +var errLenDifferent = errors.New("len different for names and chans") +var errLenDifferentNamesChans = errors.New("len different for names and chans") +var errMissingPipe = errors.New("missing pipe") +var errPipesMismatch = errors.New("pipes mismatch") + +func checkIntegrity(oplb *loadBalancer.OutgoingPipeLoadBalancer, name string) error { + if len(oplb.Names()) != len(oplb.Chans()) { + return errLenDifferent + } + + if len(oplb.Names()) != len(oplb.NamesChans()) { + return errLenDifferentNamesChans + } + + idxFound := -1 + for i, n := range oplb.Names() { + if n == name { + idxFound = i + break + } + } + + if idxFound == -1 && oplb.NamesChans()[name] == nil { + return errMissingPipe + } + + if oplb.NamesChans()[name] != oplb.Chans()[idxFound] { + return errPipesMismatch + } + + return nil +} + +//------- NewOutgoingPipeLoadBalancer + +func TestNewOutgoingPipeLoadBalancer_ShouldNotProduceNil(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + assert.NotNil(t, oplb) +} + +func TestNewOutgoingPipeLoadBalancer_ShouldAddDefaultPipe(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + assert.Equal(t, 1, len(oplb.Names())) + assert.Nil(t, checkIntegrity(oplb, loadBalancer.DefaultSendPipe())) +} + +//------- AddPipe + +func TestOutgoingPipeLoadBalancer_AddPipeNewPipeShouldNotErrAndAddNewPipe(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + err := oplb.AddPipe("test") + + assert.Nil(t, err) + assert.Equal(t, 2, len(oplb.Names())) + assert.Nil(t, checkIntegrity(oplb, loadBalancer.DefaultSendPipe())) + assert.Nil(t, checkIntegrity(oplb, "test")) +} + +func TestOutgoingPipeLoadBalancer_AddPipeDefaultPipeShouldErr(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + err := oplb.AddPipe(loadBalancer.DefaultSendPipe()) + + assert.Equal(t, p2p.ErrPipeAlreadyExists, err) +} + +func TestOutgoingPipeLoadBalancer_AddPipeReAddPipeShouldErr(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + _ = oplb.AddPipe("test") + err := oplb.AddPipe("test") + + assert.Equal(t, p2p.ErrPipeAlreadyExists, err) +} + +//------- RemovePipe + +func TestOutgoingPipeLoadBalancer_RemovePipeRemoveDefaultShouldErr(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + err := oplb.RemovePipe(loadBalancer.DefaultSendPipe()) + + assert.Equal(t, p2p.ErrPipeCanNotBeDeleted, err) +} + +func TestOutgoingPipeLoadBalancer_RemovePipeRemoveNotFoundPipeShouldErr(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + err := oplb.RemovePipe("test") + + assert.Equal(t, p2p.ErrPipeDoNotExists, err) +} + +func TestOutgoingPipeLoadBalancer_RemovePipeRemoveLastPipeAddedShouldWork(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + _ = oplb.AddPipe("test1") + _ = oplb.AddPipe("test2") + _ = oplb.AddPipe("test3") + + err := oplb.RemovePipe("test3") + + assert.Nil(t, err) + + assert.Equal(t, 3, len(oplb.Names())) + assert.Nil(t, checkIntegrity(oplb, loadBalancer.DefaultSendPipe())) + assert.Nil(t, checkIntegrity(oplb, "test1")) + assert.Nil(t, checkIntegrity(oplb, "test2")) + assert.Equal(t, errMissingPipe, checkIntegrity(oplb, "test3")) +} + +func TestOutgoingPipeLoadBalancer_RemovePipeRemoveFirstPipeAddedShouldWork(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + _ = oplb.AddPipe("test1") + _ = oplb.AddPipe("test2") + _ = oplb.AddPipe("test3") + + err := oplb.RemovePipe("test1") + + assert.Nil(t, err) + + assert.Equal(t, 3, len(oplb.Names())) + assert.Nil(t, checkIntegrity(oplb, loadBalancer.DefaultSendPipe())) + assert.Equal(t, errMissingPipe, checkIntegrity(oplb, "test1")) + assert.Nil(t, checkIntegrity(oplb, "test2")) + assert.Nil(t, checkIntegrity(oplb, "test3")) +} + +func TestOutgoingPipeLoadBalancer_RemovePipeRemoveMiddlePipeAddedShouldWork(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + _ = oplb.AddPipe("test1") + _ = oplb.AddPipe("test2") + _ = oplb.AddPipe("test3") + + err := oplb.RemovePipe("test2") + + assert.Nil(t, err) + + assert.Equal(t, 3, len(oplb.Names())) + assert.Nil(t, checkIntegrity(oplb, loadBalancer.DefaultSendPipe())) + assert.Nil(t, checkIntegrity(oplb, "test1")) + assert.Equal(t, errMissingPipe, checkIntegrity(oplb, "test2")) + assert.Nil(t, checkIntegrity(oplb, "test3")) +} + +//------- GetChannelOrDefault + +func TestOutgoingPipeLoadBalancer_GetChannelOrDefaultNotFoundShouldReturnDefault(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + _ = oplb.AddPipe("test1") + + pipe := oplb.GetChannelOrDefault("missing pipe") + + assert.True(t, oplb.NamesChans()[loadBalancer.DefaultSendPipe()] == pipe) +} + +func TestOutgoingPipeLoadBalancer_GetChannelOrDefaultFoundShouldReturnChannel(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + _ = oplb.AddPipe("test1") + + pipe := oplb.GetChannelOrDefault("test1") + + assert.True(t, oplb.NamesChans()["test1"] == pipe) +} + +//------- CollectFromPipes + +func TestOutgoingPipeLoadBalancer_CollectFromPipesNoObjectsWaitingShouldReturnEmpty(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + objs := oplb.CollectFromPipes() + + assert.Equal(t, 0, len(objs)) +} + +func TestOutgoingPipeLoadBalancer_CollectFromPipesShouldWork(t *testing.T) { + t.Parallel() + + oplb := loadBalancer.NewOutgoingPipeLoadBalancer() + + oplb.AddPipe("test") + + obj1 := &p2p.SendableData{Topic: "test"} + obj2 := &p2p.SendableData{Topic: "default"} + + chanDone := make(chan bool) + wg := sync.WaitGroup{} + wg.Add(3) + + //send on pipe test + go func() { + oplb.GetChannelOrDefault("test") <- obj1 + wg.Done() + }() + + //send on default pipe + go func() { + oplb.GetChannelOrDefault(loadBalancer.DefaultSendPipe()) <- obj2 + wg.Done() + }() + + //func to wait finishing sending and receiving + go func() { + wg.Wait() + chanDone <- true + }() + + //func to periodically consume from pipes + go func() { + foundObj1 := false + foundObj2 := false + + for { + objs := oplb.CollectFromPipes() + + for idx := range objs { + if !foundObj1 { + if objs[idx] == obj1 { + foundObj1 = true + } + } + + if !foundObj2 { + if objs[idx] == obj2 { + foundObj2 = true + } + } + } + + if foundObj1 && foundObj2 { + break + } + } + + wg.Done() + }() + + select { + case <-chanDone: + return + case <-time.After(time.Second * 2): + assert.Fail(t, "timeout") + return + } +} diff --git a/p2p/memMessenger.go b/p2p/memMessenger.go deleted file mode 100644 index ea5b9ad5fcc..00000000000 --- a/p2p/memMessenger.go +++ /dev/null @@ -1,668 +0,0 @@ -package p2p - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" - "github.com/btcsuite/btcutil/base58" - "github.com/libp2p/go-libp2p-crypto" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/multiformats/go-multiaddr" -) - -const signPrefix = "libp2p-pubsub:" - -var mutGloballyRegPeers *sync.RWMutex - -// globallyRegisteredPeers is the main map used for in memory communication -var globallyRegisteredPeers map[peer.ID]*MemMessenger - -func init() { - mutGloballyRegPeers = &sync.RWMutex{} - globallyRegisteredPeers = make(map[peer.ID]*MemMessenger) -} - -// ReInitializeGloballyRegisteredPeers will clean all known memMessenger instances -func ReInitializeGloballyRegisteredPeers() { - mutGloballyRegPeers.Lock() - globallyRegisteredPeers = make(map[peer.ID]*MemMessenger) - mutGloballyRegPeers.Unlock() -} - -// MemMessenger is a fake memory Messenger used for testing -// TODO keep up with NetMessenger modifications -type MemMessenger struct { - peerID peer.ID - privKey crypto.PrivKey - mutConnectedPeers sync.Mutex - connectedPeers map[peer.ID]*MemMessenger - marsh marshal.Marshalizer - hasher hashing.Hasher - rt *RoutingTable - seqNo uint64 - mutClosed sync.RWMutex - closed bool - chSend chan *pubsub.Message - mutSeenMessages sync.Mutex - seenMessages *TimeCache - mutValidators sync.RWMutex - validators map[string]pubsub.Validator - mutTopics sync.RWMutex - topics map[string]*Topic - mutGossipCache sync.Mutex - gossipCache *TimeCache -} - -// NewMemMessenger creates a memory Messenger with the same behaviour as the NetMessenger. -// Should be used in tests! -func NewMemMessenger(marsh marshal.Marshalizer, hasher hashing.Hasher, - cp *ConnectParams) (*MemMessenger, error) { - - if marsh == nil { - return nil, errors.New("marshalizer is nil! Can't create messenger") - } - - if hasher == nil { - return nil, errors.New("hasher is nil! Can't create messenger") - } - - mm := MemMessenger{ - seqNo: 0, - marsh: marsh, - closed: false, - hasher: hasher, - chSend: make(chan *pubsub.Message), - privKey: cp.PrivKey, - peerID: cp.ID, - mutConnectedPeers: sync.Mutex{}, - mutClosed: sync.RWMutex{}, - mutSeenMessages: sync.Mutex{}, - seenMessages: NewTimeCache(time.Second * 120), - mutValidators: sync.RWMutex{}, - validators: make(map[string]pubsub.Validator), - mutTopics: sync.RWMutex{}, - topics: make(map[string]*Topic), - mutGossipCache: sync.Mutex{}, - gossipCache: NewTimeCache(durTimeCache), - } - - mm.mutConnectedPeers.Lock() - mm.connectedPeers = make(map[peer.ID]*MemMessenger) - mm.connectedPeers[mm.peerID] = &mm - mm.mutConnectedPeers.Unlock() - - mm.rt = NewRoutingTable(mm.peerID) - - mutGloballyRegPeers.Lock() - globallyRegisteredPeers[mm.peerID] = &mm - mutGloballyRegPeers.Unlock() - - go mm.processLoop() - - return &mm, nil -} - -// Closes a MemMessenger. Receiving and sending data is no longer possible -func (mm *MemMessenger) Close() error { - mm.mutClosed.Lock() - defer mm.mutClosed.Unlock() - - mm.closed = true - - return nil -} - -// ID returns the current id -func (mm *MemMessenger) ID() peer.ID { - return mm.peerID -} - -// Peers returns the connected peers list -func (mm *MemMessenger) Peers() []peer.ID { - peers := make([]peer.ID, 0) - - mm.mutConnectedPeers.Lock() - for p := range mm.connectedPeers { - peers = append(peers, p) - } - mm.mutConnectedPeers.Unlock() - - return peers -} - -// Conns return the connections made by this memory messenger -func (mm *MemMessenger) Conns() []net.Conn { - conns := make([]net.Conn, 0) - - mm.mutConnectedPeers.Lock() - for p := range mm.connectedPeers { - c := &mock.ConnMock{LocalP: mm.peerID, RemoteP: p} - conns = append(conns, c) - } - mm.mutConnectedPeers.Unlock() - - return conns -} - -// Marshalizer returns the used marshalizer object -func (mm *MemMessenger) Marshalizer() marshal.Marshalizer { - return mm.marsh -} - -// Hasher returns the used marshalizer object -func (mm *MemMessenger) Hasher() hashing.Hasher { - return mm.hasher -} - -// RouteTable will return the RoutingTable object -func (mm *MemMessenger) RouteTable() *RoutingTable { - return mm.rt -} - -// Addresses will return all addresses bound to current messenger -func (mm *MemMessenger) Addresses() []string { - return []string{string(mm.peerID.Pretty())} -} - -// ConnectToAddresses is used to explicitly connect to a well known set of addresses -func (mm *MemMessenger) ConnectToAddresses(ctx context.Context, addresses []string) { - for i := 0; i < len(addresses); i++ { - addr := peer.ID(base58.Decode(addresses[i])) - - mutGloballyRegPeers.RLock() - val, ok := globallyRegisteredPeers[addr] - mutGloballyRegPeers.RUnlock() - - if !ok { - log.Error(fmt.Sprintf("Bootstrapping the peer '%v' failed! [not found]\n", addresses[i])) - continue - } - - if mm.peerID == addr { - //won't add self - continue - } - - mm.mutConnectedPeers.Lock() - //connect this to other peer - mm.connectedPeers[addr] = val - //connect other the other peer to this - val.connectedPeers[mm.peerID] = mm - mm.mutConnectedPeers.Unlock() - } -} - -// Bootstrap will try to connect to as many peers as possible -func (mm *MemMessenger) Bootstrap(ctx context.Context) { - go mm.doBootstrap() -} - -func (mm *MemMessenger) doBootstrap() { - for { - mm.mutClosed.RLock() - if mm.closed { - mm.mutClosed.RUnlock() - return - } - mm.mutClosed.RUnlock() - - temp := make(map[peer.ID]*MemMessenger, 0) - - mutGloballyRegPeers.RLock() - for k, v := range globallyRegisteredPeers { - if !mm.rt.Has(k) { - mm.rt.Update(k) - - temp[k] = v - } - } - mutGloballyRegPeers.RUnlock() - - mm.mutConnectedPeers.Lock() - for k, v := range temp { - mm.connectedPeers[k] = v - } - mm.mutConnectedPeers.Unlock() - - time.Sleep(time.Second) - } - -} - -// PrintConnected displays the connected peers -func (mm *MemMessenger) PrintConnected() { - conns := mm.Conns() - - connectedTo := fmt.Sprintf("Node %s is connected to: \n", mm.ID().Pretty()) - for i := 0; i < len(conns); i++ { - connectedTo = connectedTo + fmt.Sprintf("\t- %s with distance %d\n", conns[i].RemotePeer().Pretty(), - ComputeDistanceAD(mm.ID(), conns[i].RemotePeer())) - } - - log.Debug(connectedTo) -} - -// AddAddress adds a new address to peer store -func (mm *MemMessenger) AddAddress(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) { - mutGloballyRegPeers.RLock() - val, ok := globallyRegisteredPeers[p] - mutGloballyRegPeers.RUnlock() - - if !ok { - val = nil - } - - mm.mutConnectedPeers.Lock() - mm.connectedPeers[p] = val - mm.mutConnectedPeers.Unlock() -} - -// Connectedness tests for a connection between self and another peer -func (mm *MemMessenger) Connectedness(pid peer.ID) net.Connectedness { - mm.mutConnectedPeers.Lock() - _, ok := mm.connectedPeers[pid] - mm.mutConnectedPeers.Unlock() - - if ok { - return net.Connected - } else { - return net.NotConnected - } -} - -// GetTopic returns the topic from its name or nil if no topic with that name -// was ever registered -func (mm *MemMessenger) GetTopic(topicName string) *Topic { - mm.mutTopics.RLock() - defer mm.mutTopics.RUnlock() - - if t, ok := mm.topics[topicName]; ok { - return t - } - - return nil -} - -// AddTopic registers a new topic to this messenger -func (mm *MemMessenger) AddTopic(t *Topic) error { - //sanity checks - if t == nil { - return errors.New("topic can not be nil") - } - - if strings.Contains(t.Name(), requestTopicSuffix) { - return errors.New("topic name contains request suffix") - } - - mm.mutTopics.Lock() - - if _, ok := mm.topics[t.Name()]; ok { - mm.mutTopics.Unlock() - return errors.New("topic already exists") - } - - mm.topics[t.Name()] = t - t.CurrentPeer = mm.ID() - mm.mutTopics.Unlock() - - // func that publishes on network from Topic object - t.SendData = func(data []byte) error { - return mm.publish(t.Name(), data) - } - - // validator registration func - t.RegisterTopicValidator = func(v pubsub.Validator) error { - return mm.registerValidator(t.Name(), v) - } - - // validator unregistration func - t.UnregisterTopicValidator = func() error { - return mm.unregisterValidator(t.Name()) - } - - //wire-up a plain func for publishing on request channel - t.Request = func(hash []byte) error { - return mm.publish(t.Name()+requestTopicSuffix, hash) - } - - return nil -} - -func (mm *MemMessenger) nextSeqno() []byte { - seqno := make([]byte, 8) - counter := atomic.AddUint64(&mm.seqNo, 1) - binary.BigEndian.PutUint64(seqno, counter) - return seqno -} - -func (mm *MemMessenger) gotNewMessage(mes *pubsub.Message) { - err := verifyMessageSignature(mes.Message) - - if err != nil { - log.Error(err.Error()) - return - } - - mm.mutSeenMessages.Lock() - if mm.seenMessages.Has(msgID(mes)) { - mm.mutSeenMessages.Unlock() - return - } - mm.seenMessages.Add(msgID(mes)) - mm.mutSeenMessages.Unlock() - - if len(mes.TopicIDs) == 0 { - log.Error("no topic") - return - } - - v := pubsub.Validator(nil) - mm.mutValidators.RLock() - v, _ = mm.validators[mes.TopicIDs[0]] - mm.mutValidators.RUnlock() - - if v != nil { - if !v(context.Background(), mes) { - //message is not valid, dropping - return - } - } - - if !mm.gotNewData(mes.GetData(), mes.GetFrom(), mes.TopicIDs[0]) { - return - } - - //broadcast to peers - mm.chSend <- mes -} - -func (mm *MemMessenger) gotNewData(data []byte, peerID peer.ID, topic string) bool { - splt := strings.Split(topic, requestTopicSuffix) - - regularTopic := splt[0] - - t := mm.GetTopic(regularTopic) - - if t == nil { - //not subscribed to regular topic, drop - return false - } - - if len(splt) == 2 { - //request message - - //resolver has not been set up, let the message go to the other peers, maybe they can resolve the request - if t.ResolveRequest == nil { - return true - } - - //payload == hash - obj := t.ResolveRequest(data) - - if obj == nil { - //object not found - return true - } - - //found object, no need to resend the request message to peers - //test whether we also should broadcast the message (others might have broadcast it just before us) - has := false - - mm.mutGossipCache.Lock() - has = mm.gossipCache.Has(string(obj)) - mm.mutGossipCache.Unlock() - - if !has { - //only if the current peer did not receive an equal object to cloner, - //then it shall broadcast it - err := t.BroadcastBuff(obj) - if err != nil { - log.Error(err.Error()) - } - } - - return false - } - - //regular message - obj, err := t.CreateObject(data) - if err != nil { - log.Error(err.Error()) - return false - } - - mm.mutGossipCache.Lock() - if mm.gossipCache.Has(obj.ID()) { - //duplicate object, skip - mm.mutGossipCache.Unlock() - return false - } - - mm.gossipCache.Add(obj.ID()) - mm.mutGossipCache.Unlock() - - err = t.NewObjReceived(obj, peerID.Pretty()) - if err != nil { - log.Error(err.Error()) - return false - } - - return true -} - -func (mm *MemMessenger) publish(topic string, data []byte) error { - mm.mutClosed.Lock() - if mm.closed { - mm.mutClosed.Unlock() - return errors.New("messenger is closed") - } - mm.mutClosed.Unlock() - - seqno := mm.nextSeqno() - mes := pubsub_pb.Message{} - mes.Data = data - mes.TopicIDs = []string{topic} - mes.From = []byte(mm.ID()) - mes.Seqno = seqno - - pbsMessage := pubsub.Message{Message: &mes} - - if mm.privKey != nil { - mes.From = []byte(mm.peerID) - err := signMessage(&mes, mm.privKey, mm.peerID) - if err != nil { - return err - } - } - - v := pubsub.Validator(nil) - mm.mutValidators.RLock() - v, _ = mm.validators[mes.TopicIDs[0]] - mm.mutValidators.RUnlock() - - if v != nil { - if !v(context.Background(), &pbsMessage) { - //message is not valid, dropping - return errors.New("invalid message") - } - } - - mm.gotNewMessage(&pbsMessage) - - return nil -} - -func (mm *MemMessenger) processLoop() { - for { - select { - case mes := <-mm.chSend: - - //send to connected peers - peers := mm.Peers() - - for i := 0; i < len(peers); i++ { - peerID := peer.ID(peers[i]).Pretty() - - //do not send to originator - if mes.GetFrom() == peers[i] { - continue - } - - if peerID == mm.ID().Pretty() { - //broadcast to self allowed - mm.gotNewMessage(mes) - continue - } - - mm.mutConnectedPeers.Lock() - val, ok := mm.connectedPeers[peer.ID(base58.Decode(peerID))] - mm.mutConnectedPeers.Unlock() - - if !ok || val == nil { - log.Error("invalid peer") - continue - } - - go val.gotNewMessage(mes) - } - } - } -} - -func (mm *MemMessenger) registerValidator(topic string, v pubsub.Validator) error { - mm.mutValidators.Lock() - defer mm.mutValidators.Unlock() - - _, ok := mm.validators[topic] - - if !ok { - mm.validators[topic] = v - return nil - } - - return errors.New(fmt.Sprintf("topic %v already has a validator set", topic)) -} - -func (mm *MemMessenger) unregisterValidator(topic string) error { - mm.mutValidators.Lock() - defer mm.mutValidators.Unlock() - - _, ok := mm.validators[topic] - - if !ok { - return errors.New(fmt.Sprintf("topic %v does not have a validator set", topic)) - } - - delete(mm.validators, topic) - return nil -} - -//Helper funcs - -// msgID returns a unique ID of the passed Message -func msgID(pmsg *pubsub.Message) string { - return string(pmsg.GetFrom()) + string(pmsg.GetSeqno()) -} - -func signMessage(mes *pubsub_pb.Message, key crypto.PrivKey, pid peer.ID) error { - buff, err := mes.Marshal() - if err != nil { - return err - } - - buff = withSignPrefix(buff) - - sig, err := key.Sign(buff) - if err != nil { - return err - } - - mes.Signature = sig - - pk, _ := pid.ExtractPublicKey() - if pk == nil { - pubk, err := key.GetPublic().Bytes() - if err != nil { - return err - } - mes.Key = pubk - } - - return nil -} - -func verifyMessageSignature(m *pubsub_pb.Message) error { - pubk, err := messagePubKey(m) - if err != nil { - return err - } - - xm := *m - xm.Signature = nil - xm.Key = nil - buff, err := xm.Marshal() - if err != nil { - return err - } - - buff = withSignPrefix(buff) - - valid, err := pubk.Verify(buff, m.Signature) - if err != nil { - return err - } - - if !valid { - return fmt.Errorf("invalid signature") - } - - return nil -} - -func withSignPrefix(bytes []byte) []byte { - return append([]byte(signPrefix), bytes...) -} - -func messagePubKey(m *pubsub_pb.Message) (crypto.PubKey, error) { - var pubk crypto.PubKey - - pid, err := peer.IDFromBytes(m.From) - if err != nil { - return nil, err - } - - if m.Key == nil { - // no attached key, it must be extractable from the source ID - pubk, err = pid.ExtractPublicKey() - if err != nil { - return nil, fmt.Errorf("cannot extract signing key: %s", err.Error()) - } - if pubk == nil { - return nil, fmt.Errorf("cannot extract signing key") - } - } else { - pubk, err = crypto.UnmarshalPublicKey(m.Key) - if err != nil { - return nil, fmt.Errorf("cannot unmarshal signing key: %s", err.Error()) - } - - // verify that the source ID matches the attached key - if !pid.MatchesPublicKey(pubk) { - return nil, fmt.Errorf("bad signing key; source ID %s doesn't match key", pid) - } - } - - return pubk, nil -} diff --git a/p2p/memMessenger_test.go b/p2p/memMessenger_test.go deleted file mode 100644 index eea91d0540f..00000000000 --- a/p2p/memMessenger_test.go +++ /dev/null @@ -1,1132 +0,0 @@ -package p2p_test - -import ( - "bytes" - "context" - "fmt" - "strconv" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/stretchr/testify/assert" -) - -var testMemMarshalizer = &mock.MarshalizerMock{} -var testMemHasher = &mock.HasherMock{} - -type testMemStringCreator struct { - Data string -} - -// Create will return a new instance of string. Dummy, just to implement Create interface as strings are immutable -func (sc *testMemStringCreator) Create() p2p.Creator { - return &testMemStringCreator{} -} - -// ID will return the same string as ID -func (sc *testMemStringCreator) ID() string { - return sc.Data -} - -type structMemTest1 struct { - Nonce int - Data float64 -} - -func (s1 *structMemTest1) Create() p2p.Creator { - return &structMemTest1{} -} - -func (s1 *structMemTest1) ID() string { - return strconv.Itoa(s1.Nonce) -} - -type structMemTest2 struct { - Nonce string - Data []byte -} - -func (s2 *structMemTest2) Create() p2p.Creator { - return &structMemTest2{} -} - -func (s2 *structMemTest2) ID() string { - return s2.Nonce -} - -func createMemMessenger(t *testing.T, port int) (*p2p.MemMessenger, error) { - cp, err := p2p.NewConnectParamsFromPort(port) - assert.Nil(t, err) - - mm, err := p2p.NewMemMessenger(testMemMarshalizer, testMemHasher, cp) - if err != nil { - return nil, err - } - - return mm, nil -} - -func resetMemMessengers() { - (*p2p.MutGloballyRegPeers).Lock() - defer (*p2p.MutGloballyRegPeers).Unlock() - - p2p.RecreateGlobbalyRegisteredMemPeersMap() -} - -func TestMemMessenger_RecreationSameNodeShouldWork(t *testing.T) { - fmt.Println() - - resetMemMessengers() - - port := 4000 - - node1, err := createMemMessenger(t, port) - assert.Nil(t, err) - - node2, err := createMemMessenger(t, port) - assert.Nil(t, err) - - if node1.ID().Pretty() != node2.ID().Pretty() { - t.Fatal("ID mismatch") - } -} - -func TestMemMessenger_SendToSelfShouldWork(t *testing.T) { - resetMemMessengers() - - node, err := createMemMessenger(t, 4500) - assert.Nil(t, err) - - var counter int32 - - err = node.AddTopic(p2p.NewTopic("test topic", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - node.GetTopic("test topic").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testMemStringCreator)).Data - - fmt.Printf("Got message: %v\n", payload) - - if payload == "ABC" { - atomic.AddInt32(&counter, 1) - } - }) - - err = node.GetTopic("test topic").Broadcast(testMemStringCreator{Data: "ABC"}) - assert.Nil(t, err) - - time.Sleep(time.Second) - - if atomic.LoadInt32(&counter) != int32(1) { - assert.Fail(t, "Should have been 1 (message received to self)") - } - -} - -func TestMemMessenger_NodesPingPongOn2TopicsShouldWork(t *testing.T) { - fmt.Println() - - resetMemMessengers() - - node1, err := createMemMessenger(t, 5100) - assert.Nil(t, err) - - node2, err := createMemMessenger(t, 5101) - assert.Nil(t, err) - - time.Sleep(time.Second) - - node1.ConnectToAddresses(context.Background(), []string{node2.Addresses()[0]}) - - time.Sleep(time.Second) - - assert.Equal(t, net.Connected, node1.Connectedness(node2.ID())) - assert.Equal(t, net.Connected, node2.Connectedness(node1.ID())) - - fmt.Printf("Node 1 is %s\n", node1.Addresses()[0]) - fmt.Printf("Node 2 is %s\n", node2.Addresses()[0]) - - fmt.Printf("Node 1 has the addresses: %v\n", node1.Addresses()) - fmt.Printf("Node 2 has the addresses: %v\n", node2.Addresses()) - - var val int32 = 0 - - //create 2 topics on each node - err = node1.AddTopic(p2p.NewTopic("ping", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - err = node1.AddTopic(p2p.NewTopic("pong", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - - err = node2.AddTopic(p2p.NewTopic("ping", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - err = node2.AddTopic(p2p.NewTopic("pong", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - - //assign some event handlers on topics - node1.GetTopic("ping").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testMemStringCreator)).Data - - if payload == "ping string" { - err = node1.GetTopic("pong").Broadcast(testMemStringCreator{"pong string"}) - assert.Nil(t, err) - } - }) - - node1.GetTopic("pong").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testMemStringCreator)).Data - - fmt.Printf("node1 received: %v\n", payload) - - if payload == "pong string" { - atomic.AddInt32(&val, 1) - } - }) - - //for node2 topic ping we do not need an event handler in this test - node2.GetTopic("pong").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testMemStringCreator)).Data - - fmt.Printf("node2 received: %v\n", payload) - - if payload == "pong string" { - atomic.AddInt32(&val, 1) - } - }) - - err = node2.GetTopic("ping").Broadcast(testMemStringCreator{"ping string"}) - assert.Nil(t, err) - - time.Sleep(time.Second) - - if atomic.LoadInt32(&val) != 2 { - t.Fatal("Should have been 2 (pong from node1: self and node2: received from node1)") - } - - err = node1.Close() - assert.Nil(t, err) - err = node2.Close() - assert.Nil(t, err) -} - -func TestMemMessenger_SimpleBroadcast5nodesInlineShouldWork(t *testing.T) { - fmt.Println() - - resetMemMessengers() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 6100+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other daisy-chain - for i := 1; i < 5; i++ { - node := nodes[i] - node.ConnectToAddresses(context.Background(), []string{nodes[i-1].Addresses()[0]}) - } - - time.Sleep(time.Second) - - wg := sync.WaitGroup{} - wg.Add(5) - done := make(chan bool, 0) - - go func() { - defer close(done) - - wg.Wait() - }() - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - wg.Done() - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - node.GetTopic("test").AddDataReceived(recv) - } - - fmt.Println() - fmt.Println() - - fmt.Println("Broadcasting...") - err := nodes[0].GetTopic("test").Broadcast(testMemStringCreator{Data: "Foo"}) - assert.Nil(t, err) - - select { - case <-done: - fmt.Println("Got all messages!") - case <-time.After(time.Second): - assert.Fail(t, "not all messages were received") - } - - //closing - for i := 0; i < len(nodes); i++ { - err = nodes[i].Close() - assert.Nil(t, err) - } -} - -func TestMemMessenger_SimpleBroadcast5nodesBetterConnectedShouldWork(t *testing.T) { - fmt.Println() - - resetMemMessengers() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 7000+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{nodes[0].Addresses()[0]}) - nodes[2].ConnectToAddresses(context.Background(), []string{nodes[1].Addresses()[0], nodes[0].Addresses()[0]}) - nodes[3].ConnectToAddresses(context.Background(), []string{nodes[2].Addresses()[0]}) - nodes[4].ConnectToAddresses(context.Background(), []string{nodes[3].Addresses()[0], nodes[0].Addresses()[0]}) - - time.Sleep(time.Second) - - wg := sync.WaitGroup{} - wg.Add(5) - done := make(chan bool, 0) - - go func() { - defer close(done) - - wg.Wait() - }() - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - wg.Done() - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - node.GetTopic("test").AddDataReceived(recv) - } - - fmt.Println() - fmt.Println() - - fmt.Println("Broadcasting...") - err := nodes[0].GetTopic("test").Broadcast(testMemStringCreator{Data: "Foo"}) - assert.Nil(t, err) - - select { - case <-done: - fmt.Println("Got all messages!") - case <-time.After(time.Second): - assert.Fail(t, "not all messages were received") - } - - //closing - for i := 0; i < len(nodes); i++ { - err = nodes[i].Close() - assert.Nil(t, err) - } -} - -func TestMemMessenger_SendingNilShouldErr(t *testing.T) { - resetMemMessengers() - - node1, err := createMemMessenger(t, 9000) - assert.Nil(t, err) - - err = node1.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - err = node1.GetTopic("test").Broadcast(nil) - assert.NotNil(t, err) -} - -func TestMemMessenger_CreateNodeWithNilMarshalizerShouldErr(t *testing.T) { - resetMemMessengers() - - cp, err := p2p.NewConnectParamsFromPort(11000) - assert.Nil(t, err) - - _, err = p2p.NewMemMessenger(nil, testMemHasher, cp) - - assert.NotNil(t, err) -} - -func TestMemMessenger_CreateNodeWithNilHasherShouldErr(t *testing.T) { - resetMemMessengers() - - cp, err := p2p.NewConnectParamsFromPort(12000) - assert.Nil(t, err) - - _, err = p2p.NewMemMessenger(testMemMarshalizer, nil, cp) - - assert.NotNil(t, err) -} - -func TestMemMessenger_SingleRoundBootstrapShouldNotProduceLonelyNodes(t *testing.T) { - resetMemMessengers() - - if testing.Short() { - t.Skip("skipping test in short mode") - } - - startPort := 12000 - endPort := 12009 - - nodes := make([]p2p.Messenger, 0) - - recv := make(map[string]*p2p.MessageInfo) - mut := sync.RWMutex{} - - //prepare messengers - for i := startPort; i <= endPort; i++ { - node, err := createMemMessenger(t, i) - - err = node.AddTopic(p2p.NewTopic("test topic", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - - node.GetTopic("test topic").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - mut.Lock() - recv[node.ID().Pretty()] = msgInfo - - fmt.Printf("%v got message: %v\n", node.ID().Pretty(), (*data.(*testMemStringCreator)).Data) - - mut.Unlock() - - }) - - nodes = append(nodes, node) - } - - time.Sleep(time.Second) - - //call bootstrap to connect with each other - for i := 0; i < len(nodes); i++ { - node := nodes[i] - - node.Bootstrap(context.Background()) - } - - time.Sleep(time.Second * 10) - - for i := 0; i < len(nodes); i++ { - nodes[i].PrintConnected() - fmt.Println() - } - - time.Sleep(time.Second) - - //broadcasting something - fmt.Println("Broadcasting a message...") - err := nodes[0].GetTopic("test topic").Broadcast(testMemStringCreator{"a string to broadcast"}) - assert.Nil(t, err) - - fmt.Println("Waiting...") - - //waiting 2 seconds - time.Sleep(time.Second * 2) - - notRecv := 0 - didRecv := 0 - - for i := 0; i < len(nodes); i++ { - - mut.RLock() - _, found := recv[nodes[i].ID().Pretty()] - mut.RUnlock() - - if !found { - fmt.Printf("Peer %s didn't got the message!\n", nodes[i].ID().Pretty()) - notRecv++ - } else { - didRecv++ - } - } - - fmt.Println() - fmt.Println("Did recv:", didRecv) - fmt.Println("Did not recv:", notRecv) - - assert.Equal(t, 0, notRecv) -} - -func TestMemMessenger_BadObjectToUnmarshalShouldFilteredOut(t *testing.T) { - //stress test to check if the node is able to cope - //with unmarshaling a bad object - //both structs have the same fields but incompatible types - - //node1 registers topic 'test' with struct1 - //node2 registers topic 'test' with struct2 - - resetMemMessengers() - - node1, err := createMemMessenger(t, 13000) - assert.Nil(t, err) - - node2, err := createMemMessenger(t, 13001) - assert.Nil(t, err) - - //connect nodes - node1.ConnectToAddresses(context.Background(), []string{node2.Addresses()[0]}) - - //wait a bit - time.Sleep(time.Second) - - //create topics for each node - err = node1.AddTopic(p2p.NewTopic("test", &structMemTest1{}, testMemMarshalizer)) - assert.Nil(t, err) - err = node2.AddTopic(p2p.NewTopic("test", &structMemTest2{}, testMemMarshalizer)) - assert.Nil(t, err) - - counter := int32(0) - - //node 1 sends, node 2 receives - node2.GetTopic("test").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("received: %v", data) - atomic.AddInt32(&counter, 1) - }) - - err = node1.GetTopic("test").Broadcast(&structMemTest1{Nonce: 4, Data: 4.5}) - assert.Nil(t, err) - - //wait a bit - time.Sleep(time.Second) - - //check that the message was filtered out - assert.Equal(t, int32(0), atomic.LoadInt32(&counter)) -} - -func TestMemMessenger_BroadcastOnInexistentTopicShouldFilteredOut(t *testing.T) { - //stress test to check if the node is able to cope - //with receiving on an inexistent topic - resetMemMessengers() - - node1, err := createMemMessenger(t, 14000) - assert.Nil(t, err) - - node2, err := createMemMessenger(t, 14001) - assert.Nil(t, err) - - //connect nodes - node1.ConnectToAddresses(context.Background(), []string{node2.Addresses()[0]}) - - //wait a bit - time.Sleep(time.Second) - - //create topics for each node - err = node1.AddTopic(p2p.NewTopic("test1", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - err = node2.AddTopic(p2p.NewTopic("test2", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - - counter := int32(0) - - //node 1 sends, node 2 receives - node2.GetTopic("test2").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("received: %v", data) - atomic.AddInt32(&counter, 1) - }) - - err = node1.GetTopic("test1").Broadcast(testMemStringCreator{"Foo"}) - assert.Nil(t, err) - - //wait a bit - time.Sleep(time.Second) - - //check that the message was filtered out - assert.Equal(t, int32(0), atomic.LoadInt32(&counter)) -} - -func TestMemMessenger_MultipleRoundBootstrapShouldNotProduceLonelyNodes(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - resetMemMessengers() - - startPort := 12000 - endPort := 12009 - - nodes := make([]p2p.Messenger, 0) - - recv := make(map[string]*p2p.MessageInfo) - mut := sync.RWMutex{} - - //prepare messengers - for i := startPort; i <= endPort; i++ { - node, err := createMemMessenger(t, i) - - err = node.AddTopic(p2p.NewTopic("test topic", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - - node.GetTopic("test topic").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - mut.Lock() - recv[node.ID().Pretty()] = msgInfo - - fmt.Printf("%v got message: %v\n", node.ID().Pretty(), (*data.(*testMemStringCreator)).Data) - - mut.Unlock() - - }) - - nodes = append(nodes, node) - } - - time.Sleep(time.Second) - - //call bootstrap to connect with each other only on n - 2 nodes - for i := 0; i < len(nodes)-2; i++ { - node := nodes[i] - - node.Bootstrap(context.Background()) - } - - fmt.Println("Bootstrapping round 1...") - time.Sleep(time.Second * 10) - - for i := 0; i < len(nodes); i++ { - nodes[i].PrintConnected() - fmt.Println() - } - - time.Sleep(time.Second) - - //second round bootstrap for the last 2 nodes - for i := len(nodes) - 2; i < len(nodes); i++ { - node := nodes[i] - - node.Bootstrap(context.Background()) - } - - fmt.Println("Bootstrapping round 2...") - time.Sleep(time.Second * 10) - - for i := 0; i < len(nodes); i++ { - nodes[i].PrintConnected() - fmt.Println() - } - - time.Sleep(time.Second) - - //broadcasting something - fmt.Println("Broadcasting a message...") - err := nodes[0].GetTopic("test topic").Broadcast(testMemStringCreator{"a string to broadcast"}) - assert.Nil(t, err) - - fmt.Println("Waiting...") - - //waiting 2 seconds - time.Sleep(time.Second * 2) - - notRecv := 0 - didRecv := 0 - - for i := 0; i < len(nodes); i++ { - - mut.RLock() - _, found := recv[nodes[i].ID().Pretty()] - mut.RUnlock() - - if !found { - fmt.Printf("Peer %s didn't got the message!\n", nodes[i].ID().Pretty()) - notRecv++ - } else { - didRecv++ - } - } - - fmt.Println() - fmt.Println("Did recv:", didRecv) - fmt.Println("Did not recv:", notRecv) - - assert.Equal(t, 0, notRecv) -} - -func TestMemMessenger_BroadcastWithValidatorsShouldWork(t *testing.T) { - resetMemMessengers() - - fmt.Println() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 13150+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{nodes[0].Addresses()[0]}) - nodes[2].ConnectToAddresses(context.Background(), []string{nodes[1].Addresses()[0], nodes[0].Addresses()[0]}) - nodes[3].ConnectToAddresses(context.Background(), []string{nodes[2].Addresses()[0]}) - nodes[4].ConnectToAddresses(context.Background(), []string{nodes[3].Addresses()[0], nodes[0].Addresses()[0]}) - - time.Sleep(time.Second) - - counter := int32(0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - atomic.AddInt32(&counter, 1) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - node.GetTopic("test").AddDataReceived(recv) - } - - // dummy validator that prevents propagation of "AAA" message - v := func(ctx context.Context, mes *pubsub.Message) bool { - marsh := mock.MarshalizerMock{} - obj := &testMemStringCreator{} - err := marsh.Unmarshal(obj, mes.GetData()) - assert.Nil(t, err) - - return obj.Data != "AAA" - } - - //node 2 has validator in place - err := nodes[2].GetTopic("test").RegisterValidator(v) - assert.Nil(t, err) - - fmt.Println() - fmt.Println() - - //send AAA, wait 1 sec, check that 4 peers got the message - atomic.StoreInt32(&counter, 0) - fmt.Println("Broadcasting AAA...") - err = nodes[0].GetTopic("test").Broadcast(testMemStringCreator{Data: "AAA"}) - assert.Nil(t, err) - time.Sleep(time.Second) - assert.Equal(t, int32(4), atomic.LoadInt32(&counter)) - fmt.Printf("%d peers got the message!\n", atomic.LoadInt32(&counter)) - - //send BBB, wait 1 sec, check that all peers got the message - atomic.StoreInt32(&counter, 0) - fmt.Println("Broadcasting BBB...") - err = nodes[0].GetTopic("test").Broadcast(testMemStringCreator{Data: "BBB"}) - assert.Nil(t, err) - time.Sleep(time.Second) - assert.Equal(t, int32(5), atomic.LoadInt32(&counter)) - fmt.Printf("%d peers got the message!\n", atomic.LoadInt32(&counter)) - - //add the validator on node 4 - err = nodes[4].GetTopic("test").RegisterValidator(v) - assert.Nil(t, err) - - //send AAA, wait 1 sec, check that no peers got the message as the filtering should work - atomic.StoreInt32(&counter, 0) - fmt.Println("Broadcasting AAA...") - err = nodes[0].GetTopic("test").Broadcast(testMemStringCreator{Data: "AAA"}) - assert.Nil(t, err) - time.Sleep(time.Second) - assert.Equal(t, int32(0), atomic.LoadInt32(&counter)) - fmt.Printf("%d peers got the message!\n", atomic.LoadInt32(&counter)) - - //closing - for i := 0; i < len(nodes); i++ { - err = nodes[i].Close() - assert.Nil(t, err) - } -} - -func TestMemMessenger_RequestResolveTestCfg1ShouldWork(t *testing.T) { - resetMemMessengers() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 15000+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{nodes[0].Addresses()[0]}) - nodes[2].ConnectToAddresses(context.Background(), []string{nodes[1].Addresses()[0], nodes[0].Addresses()[0]}) - nodes[3].ConnectToAddresses(context.Background(), []string{nodes[2].Addresses()[0]}) - nodes[4].ConnectToAddresses(context.Background(), []string{nodes[3].Addresses()[0], nodes[0].Addresses()[0]}) - - time.Sleep(time.Second) - - counter1 := int32(0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if data.(*testMemStringCreator).Data == "Real object1" { - atomic.AddInt32(&counter1, 1) - } - - fmt.Printf("Received: %v\n", data.(*testMemStringCreator).Data) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - } - - //to simplify, only node 0 should have a recv event handler - nodes[0].GetTopic("test").AddDataReceived(recv) - - //setup a resolver func for node 3 - nodes[3].GetTopic("test").ResolveRequest = func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testMemStringCreator{Data: "Real object1"}) - return buff - } - - return nil - } - - //node0 requests an unavailable data - err := nodes[0].GetTopic("test").SendRequest([]byte("B000")) - assert.Nil(t, err) - fmt.Println("Sent request B000") - time.Sleep(time.Second * 2) - assert.Equal(t, int32(0), atomic.LoadInt32(&counter1)) - - //node0 requests an available data on node 3 - err = nodes[0].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - time.Sleep(time.Second * 2) - assert.Equal(t, int32(1), atomic.LoadInt32(&counter1)) -} - -func TestMemMessenger_RequestResolveTestCfg2ShouldWork(t *testing.T) { - resetMemMessengers() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 15100+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{nodes[0].Addresses()[0]}) - nodes[2].ConnectToAddresses(context.Background(), []string{nodes[1].Addresses()[0], nodes[0].Addresses()[0]}) - nodes[3].ConnectToAddresses(context.Background(), []string{nodes[2].Addresses()[0]}) - nodes[4].ConnectToAddresses(context.Background(), []string{nodes[3].Addresses()[0], nodes[0].Addresses()[0]}) - - time.Sleep(time.Second) - - counter1 := int32(0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if data.(*testMemStringCreator).Data == "Real object1" { - atomic.AddInt32(&counter1, 1) - } - - fmt.Printf("Received: %v from %v\n", data.(*testMemStringCreator).Data, msgInfo.Peer) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - } - - //to simplify, only node 1 should have a recv event handler - nodes[1].GetTopic("test").AddDataReceived(recv) - - //resolver func for node 0 and 2 - resolverOK := func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testMemStringCreator{Data: "Real object1"}) - return buff - } - - return nil - } - - //resolver func for other nodes - resolverNOK := func(hash []byte) []byte { - panic("Should have not reached this point") - - return nil - } - - nodes[0].GetTopic("test").ResolveRequest = resolverOK - nodes[2].GetTopic("test").ResolveRequest = resolverOK - - nodes[3].GetTopic("test").ResolveRequest = resolverNOK - nodes[4].GetTopic("test").ResolveRequest = resolverNOK - - //node1 requests an available data - err := nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - time.Sleep(time.Second * 2) - assert.True(t, atomic.LoadInt32(&counter1) == int32(1) || atomic.LoadInt32(&counter1) == int32(2)) - -} - -func TestMemMessenger_RequestResolveTestSelfShouldWork(t *testing.T) { - resetMemMessengers() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 15200+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{nodes[0].Addresses()[0]}) - nodes[2].ConnectToAddresses(context.Background(), []string{nodes[1].Addresses()[0], nodes[0].Addresses()[0]}) - nodes[3].ConnectToAddresses(context.Background(), []string{nodes[2].Addresses()[0]}) - nodes[4].ConnectToAddresses(context.Background(), []string{nodes[3].Addresses()[0], nodes[0].Addresses()[0]}) - - time.Sleep(time.Second) - - counter1 := int32(0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if data.(*testMemStringCreator).Data == "Real object1" { - atomic.AddInt32(&counter1, 1) - } - - fmt.Printf("Received: %v from %v\n", data.(*testMemStringCreator).Data, msgInfo.Peer) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - } - - //to simplify, only node 1 should have a recv event handler - nodes[1].GetTopic("test").AddDataReceived(recv) - - //resolver func for node 1 - resolverOK := func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testMemStringCreator{Data: "Real object1"}) - return buff - } - - return nil - } - - //resolver func for other nodes - resolverNOK := func(hash []byte) []byte { - panic("Should have not reached this point") - - return nil - } - - nodes[1].GetTopic("test").ResolveRequest = resolverOK - - nodes[0].GetTopic("test").ResolveRequest = resolverNOK - nodes[2].GetTopic("test").ResolveRequest = resolverNOK - nodes[3].GetTopic("test").ResolveRequest = resolverNOK - nodes[4].GetTopic("test").ResolveRequest = resolverNOK - - //node1 requests an available data - err := nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - time.Sleep(time.Second * 2) - assert.Equal(t, int32(1), atomic.LoadInt32(&counter1)) - -} - -func TestMemMessenger_RequestResolveResendingShouldWork(t *testing.T) { - resetMemMessengers() - - nodes := make([]*p2p.MemMessenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createMemMessenger(t, 15300+i) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{nodes[0].Addresses()[0]}) - nodes[2].ConnectToAddresses(context.Background(), []string{nodes[1].Addresses()[0], nodes[0].Addresses()[0]}) - nodes[3].ConnectToAddresses(context.Background(), []string{nodes[2].Addresses()[0]}) - nodes[4].ConnectToAddresses(context.Background(), []string{nodes[3].Addresses()[0], nodes[0].Addresses()[0]}) - - time.Sleep(time.Second) - - counter1 := int32(0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - atomic.AddInt32(&counter1, 1) - - fmt.Printf("Received: %v from %v\n", data.(*testMemStringCreator).Data, msgInfo.Peer) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testMemStringCreator{}, testMemMarshalizer)) - assert.Nil(t, err) - } - - //to simplify, only node 1 should have a recv event handler - nodes[1].GetTopic("test").AddDataReceived(recv) - - //resolver func for node 0 and 2 - resolverOK := func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testMemStringCreator{Data: "Real object0"}) - return buff - } - - return nil - } - - //resolver func for other nodes - resolverNOK := func(hash []byte) []byte { - panic("Should have not reached this point") - - return nil - } - - nodes[0].GetTopic("test").ResolveRequest = resolverOK - nodes[2].GetTopic("test").ResolveRequest = resolverOK - - nodes[3].GetTopic("test").ResolveRequest = resolverNOK - nodes[4].GetTopic("test").ResolveRequest = resolverNOK - - //node1 requests an available data - err := nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - time.Sleep(time.Second * 2) - assert.True(t, atomic.LoadInt32(&counter1) == int32(1)) - - //resending - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Re-sent request A000") - time.Sleep(time.Second * 2) - assert.True(t, atomic.LoadInt32(&counter1) == int32(1)) - - //delaying - time.Sleep(p2p.DurTimeCache - time.Second*3) - - //resending - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Re-sent request A000") - time.Sleep(time.Second * 2) - assert.True(t, atomic.LoadInt32(&counter1) == int32(2)) - -} diff --git a/p2p/mock/connMock.go b/p2p/mock/connMock.go deleted file mode 100644 index 3b0bc19d4e9..00000000000 --- a/p2p/mock/connMock.go +++ /dev/null @@ -1,71 +0,0 @@ -package mock - -import ( - cr "github.com/libp2p/go-libp2p-crypto" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/multiformats/go-multiaddr" -) - -// ConnMock is used for testing -type ConnMock struct { - LocalP peer.ID - RemoteP peer.ID - Status net.Stat - - CloseCalled func(*ConnMock) error -} - -// Closes the connection, dummy -func (cm *ConnMock) Close() error { - if cm.CloseCalled != nil { - return cm.CloseCalled(cm) - } - - return nil -} - -// LocalPeer returns the current peer -func (cm *ConnMock) LocalPeer() peer.ID { - return cm.LocalP -} - -// LocalPrivateKey returns current private key used in P2P -func (cm *ConnMock) LocalPrivateKey() cr.PrivKey { - panic("implement me") -} - -// RemotePeer returns the other side peer -func (cm *ConnMock) RemotePeer() peer.ID { - return cm.RemoteP -} - -// RemotePublicKey returns the public key of the other side peer, dummy, will panic -func (cm ConnMock) RemotePublicKey() cr.PubKey { - panic("implement me") -} - -// LocalMultiaddr is dummy, will panic -func (cm ConnMock) LocalMultiaddr() multiaddr.Multiaddr { - panic("implement me") -} - -// RemoteMultiaddr is dummy, will panic -func (cm ConnMock) RemoteMultiaddr() multiaddr.Multiaddr { - panic("implement me") -} - -// NewStream is dummy, will panic -func (cm ConnMock) NewStream() (net.Stream, error) { - panic("implement me") -} - -// GetStreams is dummy, will panic -func (cm ConnMock) GetStreams() []net.Stream { - panic("implement me") -} - -// Stat is dummy, will panic -func (cm ConnMock) Stat() net.Stat { - return cm.Status -} diff --git a/p2p/mock/hasherMock.go b/p2p/mock/hasherMock.go deleted file mode 100644 index e5f5265f5a6..00000000000 --- a/p2p/mock/hasherMock.go +++ /dev/null @@ -1,31 +0,0 @@ -package mock - -import ( - "crypto/sha256" -) - -var hasherMockEmptyHash []byte - -// HasherMock is used in testing -type HasherMock struct { -} - -// Computes the SHA256 of the provided string -func (hm *HasherMock) Compute(s string) []byte { - h := sha256.New() - h.Write([]byte(s)) - return h.Sum(nil) -} - -// EmptyHash returns the hash of the empty string -func (hm *HasherMock) EmptyHash() []byte { - if len(hasherMockEmptyHash) == 0 { - hasherMockEmptyHash = hm.Compute("") - } - return hasherMockEmptyHash -} - -// Size return SHA256 output size: 32 -func (*HasherMock) Size() int { - return sha256.Size -} diff --git a/p2p/mock/marshalizerMock.go b/p2p/mock/marshalizerMock.go deleted file mode 100644 index ed2825f6480..00000000000 --- a/p2p/mock/marshalizerMock.go +++ /dev/null @@ -1,48 +0,0 @@ -package mock - -import ( - "encoding/json" - - "github.com/pkg/errors" -) - -var errMarshalizerFails = errors.New("marshalizerMock generic error") - -// MarshalizerMock is used for testing -type MarshalizerMock struct { - Fail bool -} - -// Marshal encodes an object to its byte array representation -func (m *MarshalizerMock) Marshal(obj interface{}) ([]byte, error) { - if m.Fail { - return nil, errMarshalizerFails - } - - if obj == nil { - return nil, errors.New("NIL object to serilize from!") - } - - return json.Marshal(obj) -} - -// Unmarshal decodes a byte array and applies the data on an instantiated struct -func (m *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { - if m.Fail { - return errMarshalizerFails - } - - if obj == nil { - return errors.New("NIL object to serilize to!") - } - - if buff == nil { - return errors.New("NIL byte buffer to deserialize from!") - } - - if len(buff) == 0 { - return errors.New("Empty byte buffer to deserialize from!") - } - - return json.Unmarshal(buff, obj) -} diff --git a/p2p/netMessenger.go b/p2p/netMessenger.go deleted file mode 100644 index ee62cf288c8..00000000000 --- a/p2p/netMessenger.go +++ /dev/null @@ -1,577 +0,0 @@ -package p2p - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ipfs/go-ipfs-addr" - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p-host" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/libp2p/go-libp2p-peerstore" - "github.com/libp2p/go-libp2p-protocol" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/p2p/discovery" - "github.com/multiformats/go-multiaddr" -) - -// durMdnsCalls is used to define the duration used by mdns service when polling peers -const durMdnsCalls = time.Second - -// durTimeCache represents the duration for gossip messages to be saved in cache -const durTimeCache = time.Second * 5 - -// requestTopicSuffix is added to a known topic to generate the topic's request counterpart -const requestTopicSuffix = "_REQUEST" - -// PubSubStrategy defines the strategy for broadcasting messages in the network -type PubSubStrategy int - -const ( - // FloodSub strategy to use when broadcasting messages - FloodSub = iota - // GossipSub strategy to use when broadcasting messages - GossipSub -) - -type message struct { - buff []byte - topic string -} - -// NetMessenger implements a libP2P node with added functionality -type NetMessenger struct { - context context.Context - protocol protocol.ID - p2pNode host.Host - ps *pubsub.PubSub - mdns discovery.Service - mutChansSend sync.RWMutex - chansSend map[string]chan []byte - mutBootstrap sync.Mutex - marsh marshal.Marshalizer - hasher hashing.Hasher - rt *RoutingTable - cn *ConnNotifier - mutClosed sync.RWMutex - closed bool - mutTopics sync.RWMutex - topics map[string]*Topic - mutGossipCache sync.RWMutex - gossipCache *TimeCache - - chSendMessages chan *message -} - -// NewNetMessenger creates a new instance of NetMessenger. -func NewNetMessenger(ctx context.Context, marsh marshal.Marshalizer, hasher hashing.Hasher, - cp *ConnectParams, maxAllowedPeers int, pubsubStrategy PubSubStrategy) (*NetMessenger, error) { - - if marsh == nil { - return nil, errors.New("marshalizer is nil! Can't create node") - } - - if hasher == nil { - return nil, errors.New("hasher is nil! Can't create node") - } - - node := NetMessenger{ - context: ctx, - marsh: marsh, - hasher: hasher, - topics: make(map[string]*Topic, 0), - mutGossipCache: sync.RWMutex{}, - gossipCache: NewTimeCache(durTimeCache), - chSendMessages: make(chan *message), - } - - node.cn = NewConnNotifier(maxAllowedPeers) - - timeStart := time.Now() - - opts := []libp2p.Option{ - libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", cp.Port)), - libp2p.Identity(cp.PrivKey), - libp2p.DefaultTransports, - libp2p.DefaultMuxers, - libp2p.DefaultSecurity, - libp2p.NATPortMap(), - } - - hostP2P, err := libp2p.New(ctx, opts...) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - _ = hostP2P.Close() - } - }() - - adrTable := fmt.Sprintf("Node: %v has the following addr table: \n", hostP2P.ID().Pretty()) - for i, addr := range hostP2P.Addrs() { - adrTable = adrTable + fmt.Sprintf("%d: %s/ipfs/%s\n", i, addr, hostP2P.ID().Pretty()) - } - log.Debug(adrTable) - - node.p2pNode = hostP2P - node.chansSend = make(map[string]chan []byte) - - err = node.createPubSub(hostP2P, pubsubStrategy, ctx) - if err != nil { - return nil, err - } - - node.rt = NewRoutingTable(hostP2P.ID()) - - node.connNotifierRegistration(ctx) - - log.Debug(fmt.Sprintf("Created node in %v\n", time.Now().Sub(timeStart))) - - return &node, nil -} - -func (nm *NetMessenger) createPubSub(hostP2P host.Host, pubsubStrategy PubSubStrategy, ctx context.Context) error { - optsPS := []pubsub.Option{ - pubsub.WithMessageSigning(true), - } - - switch pubsubStrategy { - case FloodSub: - { - ps, err := pubsub.NewFloodSub(ctx, hostP2P, optsPS...) - if err != nil { - return err - } - nm.ps = ps - } - case GossipSub: - { - ps, err := pubsub.NewGossipSub(ctx, hostP2P, optsPS...) - if err != nil { - return err - } - nm.ps = ps - } - default: - return errors.New("unknown pubsub strategy") - } - - go func(ps *pubsub.PubSub, ch chan *message) { - for { - select { - case msg := <-ch: - err := ps.Publish(msg.topic, msg.buff) - - log.LogIfError(err) - } - - time.Sleep(time.Microsecond * 100) - } - }(nm.ps, nm.chSendMessages) - - return nil -} - -func (nm *NetMessenger) connNotifierRegistration(ctx context.Context) { - //register the notifier - nm.p2pNode.Network().Notify(nm.cn) - - nm.cn.GetKnownPeers = func(cn *ConnNotifier) []peer.ID { - return nm.RouteTable().NearestPeersAll() - } - - nm.cn.ConnectToPeer = func(cn *ConnNotifier, pid peer.ID) error { - pinfo := nm.p2pNode.Peerstore().PeerInfo(pid) - - if err := nm.p2pNode.Connect(ctx, pinfo); err != nil { - return err - } - - return nil - } - - nm.cn.GetConnections = func(sender *ConnNotifier) []net.Conn { - return nm.Conns() - } - - nm.cn.IsConnected = func(sender *ConnNotifier, pid peer.ID) bool { - return nm.Connectedness(pid) == net.Connected - } - - nm.cn.Start() -} - -// Closes a NetMessenger -func (nm *NetMessenger) Close() error { - nm.mutClosed.Lock() - nm.closed = true - nm.mutClosed.Unlock() - - errorEncountered := error(nil) - - if nm.mdns != nil { - //unregistration and closing - nm.mdns.UnregisterNotifee(nm) - err := nm.mdns.Close() - if err != nil { - errorEncountered = err - } - } - - nm.cn.Stop() - err := nm.p2pNode.Network().Close() - if err != nil { - errorEncountered = err - } - err = nm.p2pNode.Close() - if err != nil { - errorEncountered = err - } - - nm.mdns = nil - nm.cn = nil - nm.ps = nil - - return errorEncountered -} - -// ID returns the current id -func (nm *NetMessenger) ID() peer.ID { - return nm.p2pNode.ID() -} - -// Peers returns the connected peers list -func (nm *NetMessenger) Peers() []peer.ID { - return nm.p2pNode.Peerstore().Peers() -} - -// Conns return the connections made by this memory messenger -func (nm *NetMessenger) Conns() []net.Conn { - return nm.p2pNode.Network().Conns() -} - -// Marshalizer returns the used marshalizer object -func (nm *NetMessenger) Marshalizer() marshal.Marshalizer { - return nm.marsh -} - -// Hasher returns the used object for hashing data -func (nm *NetMessenger) Hasher() hashing.Hasher { - return nm.hasher -} - -// RouteTable will return the RoutingTable object -func (nm *NetMessenger) RouteTable() *RoutingTable { - return nm.rt -} - -// Addresses will return all addresses bound to current messenger -func (nm *NetMessenger) Addresses() []string { - addrs := make([]string, 0) - - for _, address := range nm.p2pNode.Addrs() { - addrs = append(addrs, address.String()+"/ipfs/"+nm.ID().Pretty()) - } - - return addrs -} - -// ConnectToAddresses is used to explicitly connect to a well known set of addresses -func (nm *NetMessenger) ConnectToAddresses(ctx context.Context, addresses []string) { - peers := 0 - - timeStart := time.Now() - - for i := 0; i < len(addresses); i++ { - pinfo, err := nm.ParseAddressIpfs(addresses[i]) - - if err != nil { - log.Error(fmt.Sprintf("Bootstrapping the peer '%v' failed with error %v\n", addresses[i], err)) - continue - } - - if err := nm.p2pNode.Connect(ctx, *pinfo); err != nil { - log.Error(fmt.Sprintf("Bootstrapping the peer '%v' failed with error %v\n", addresses[i], err)) - continue - } - - peers++ - } - - log.Debug(fmt.Sprintf("Connected to %d peers in %v\n", peers, time.Now().Sub(timeStart))) -} - -// Bootstrap will try to connect to as many peers as possible -func (nm *NetMessenger) Bootstrap(ctx context.Context) { - nm.mutClosed.RLock() - if nm.closed { - nm.mutClosed.RUnlock() - return - } - nm.mutClosed.RUnlock() - - nm.mutBootstrap.Lock() - if nm.mdns != nil { - //already started the bootstrap process, return - nm.mutBootstrap.Unlock() - return - } - - mdns, err := discovery.NewMdnsService(context.Background(), nm.p2pNode, durMdnsCalls, "discovery") - - if err != nil { - panic(err) - } - - mdns.RegisterNotifee(nm) - nm.mdns = mdns - - nm.mutBootstrap.Unlock() - - nm.cn.Start() -} - -// HandlePeerFound updates the routing table with this new peer -func (nm *NetMessenger) HandlePeerFound(pi peerstore.PeerInfo) { - if nm.Peers() == nil { - return - } - - peers := nm.Peers() - found := false - - for i := 0; i < len(peers); i++ { - if peers[i] == pi.ID { - found = true - break - } - } - - if found { - return - } - - for i := 0; i < len(pi.Addrs); i++ { - nm.AddAddress(pi.ID, pi.Addrs[i], peerstore.PermanentAddrTTL) - } - - nm.RouteTable().Update(pi.ID) -} - -// PrintConnected displays the connected peers -func (nm *NetMessenger) PrintConnected() { - conns := nm.Conns() - - connectedTo := fmt.Sprintf("Node %s is connected to: \n", nm.ID().Pretty()) - for i := 0; i < len(conns); i++ { - connectedTo = connectedTo + fmt.Sprintf("\t- %s with distance %d\n", conns[i].RemotePeer().Pretty(), - ComputeDistanceAD(nm.ID(), conns[i].RemotePeer())) - } - - log.Debug(connectedTo) -} - -// AddAddress adds a new address to peer store -func (nm *NetMessenger) AddAddress(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) { - nm.p2pNode.Network().Peerstore().AddAddr(p, addr, ttl) -} - -// Connectedness tests for a connection between self and another peer -func (nm *NetMessenger) Connectedness(pid peer.ID) net.Connectedness { - return nm.p2pNode.Network().Connectedness(pid) -} - -// ParseAddressIpfs translates the string containing the address of the node to a PeerInfo object -func (nm *NetMessenger) ParseAddressIpfs(address string) (*peerstore.PeerInfo, error) { - addr, err := ipfsaddr.ParseString(address) - if err != nil { - return nil, err - } - - pinfo, err := peerstore.InfoFromP2pAddr(addr.Multiaddr()) - if err != nil { - return nil, err - } - - return pinfo, nil -} - -// AddTopic registers a new topic to this messenger -func (nm *NetMessenger) AddTopic(t *Topic) error { - //sanity checks - if t == nil { - return errors.New("topic can not be nil") - } - - if strings.Contains(t.Name(), requestTopicSuffix) { - return errors.New("topic name contains request suffix") - } - - nm.mutTopics.Lock() - - _, ok := nm.topics[t.Name()] - if ok { - nm.mutTopics.Unlock() - return errors.New("topic already exists") - } - - nm.topics[t.Name()] = t - t.CurrentPeer = nm.ID() - nm.mutTopics.Unlock() - - subscr, err := nm.ps.Subscribe(t.Name()) - if err != nil { - return err - } - - subscrRequest, err := nm.ps.Subscribe(t.Name() + requestTopicSuffix) - if err != nil { - return err - } - - // async func for passing received data to Topic object - go func() { - for { - msg, err := subscr.Next(nm.context) - if err != nil { - log.Error(err.Error()) - continue - } - - obj, err := t.CreateObject(msg.GetData()) - if err != nil { - log.Error(err.Error()) - continue - } - - nm.mutGossipCache.Lock() - if nm.gossipCache.Has(obj.ID()) { - //duplicate object, skip - nm.mutGossipCache.Unlock() - continue - } - - nm.gossipCache.Add(obj.ID()) - nm.mutGossipCache.Unlock() - - err = t.NewObjReceived(obj, msg.GetFrom().Pretty()) - if err != nil { - log.Error(err.Error()) - continue - } - } - }() - - // func that publishes on network from Topic object - t.SendData = func(data []byte) error { - nm.mutClosed.RLock() - if nm.closed { - nm.mutClosed.RUnlock() - return nil - } - nm.mutClosed.RUnlock() - - go func(topicName string, buffer []byte) { - nm.chSendMessages <- &message{ - buff: buffer, - topic: topicName, - } - }(t.Name(), data) - - return nil - } - - // validator registration func - t.RegisterTopicValidator = func(v pubsub.Validator) error { - return nm.ps.RegisterTopicValidator(t.Name(), v) - } - - // validator unregistration func - t.UnregisterTopicValidator = func() error { - return nm.ps.UnregisterTopicValidator(t.Name()) - } - - nm.createRequestTopicAndBind(t, subscrRequest) - - return nil -} - -// createRequestTopicAndBind is used to wire-up the func pointers to the request channel created automatically -// it also implements a validator function for not broadcast the request if it can resolve -func (nm *NetMessenger) createRequestTopicAndBind(t *Topic, subscriberRequest *pubsub.Subscription) { - // there is no need to have a function on received data - // the logic will be called inside validator func as this is the first func called - // and only if the result was nil, the validator actually let the message pass through its peers - v := func(ctx context.Context, mes *pubsub.Message) bool { - //resolver has not been set up, let the message go to the other peers, maybe they can resolve the request - if t.ResolveRequest == nil { - return true - } - - //resolved payload - buff := t.ResolveRequest(mes.GetData()) - - if buff == nil { - //object not found - return true - } - - //found object, no need to resend the request message to peers - //test whether we also should broadcast the message (others might have broadcast it just before us) - has := false - - nm.mutGossipCache.RLock() - has = nm.gossipCache.Has(string(buff)) - nm.mutGossipCache.RUnlock() - - if !has { - //only if the current peer did not receive an equal object to cloner, - //then it shall broadcast it - err := t.BroadcastBuff(buff) - if err != nil { - log.Error(err.Error()) - } - } - return false - } - - //wire-up a plain func for publishing on request channel - t.Request = func(hash []byte) error { - go func(topicName string, buffer []byte) { - nm.chSendMessages <- &message{ - buff: buffer, - topic: topicName, - } - }(t.Name()+requestTopicSuffix, hash) - - return nil - } - - //wire-up the validator - err := nm.ps.RegisterTopicValidator(t.Name()+requestTopicSuffix, v) - if err != nil { - log.Error(err.Error()) - } -} - -// GetTopic returns the topic from its name or nil if no topic with that name -// was ever registered -func (nm *NetMessenger) GetTopic(topicName string) *Topic { - nm.mutTopics.RLock() - defer nm.mutTopics.RUnlock() - - if t, ok := nm.topics[topicName]; ok { - return t - } - - return nil -} diff --git a/p2p/netMessenger_test.go b/p2p/netMessenger_test.go deleted file mode 100644 index 9f990d713af..00000000000 --- a/p2p/netMessenger_test.go +++ /dev/null @@ -1,1134 +0,0 @@ -package p2p_test - -import ( - "bytes" - "context" - "fmt" - "strconv" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" -) - -type testNetStringCreator struct { - Data string -} - -type structNetTest1 struct { - Nonce int - Data float64 -} - -type structNetTest2 struct { - Nonce string - Data []byte -} - -//------- testNetStringNewer - -// New will return a new instance of string. Dummy, just to implement Cloner interface as strings are immutable -func (sc *testNetStringCreator) Create() p2p.Creator { - return &testNetStringCreator{} -} - -// ID will return the same string as ID -func (sc *testNetStringCreator) ID() string { - return sc.Data -} - -//------- structNetTest1 - -func (s1 *structNetTest1) Create() p2p.Creator { - return &structNetTest1{} -} - -func (s1 *structNetTest1) ID() string { - return strconv.Itoa(s1.Nonce) -} - -//------- structNetTest2 - -func (s2 *structNetTest2) Create() p2p.Creator { - return &structNetTest2{} -} - -func (s2 *structNetTest2) ID() string { - return s2.Nonce -} - -var testNetMessengerMaxWaitResponse = time.Duration(time.Second * 5) -var testNetMessengerWaitResponseUnreceivedMsg = time.Duration(time.Second) - -var pubsubAnnounceDuration = time.Second * 2 - -var startingPort = 4000 - -func createNetMessenger(t *testing.T, port int, nConns int) (*p2p.NetMessenger, error) { - return createNetMessengerPubSub(t, port, nConns, p2p.FloodSub) -} - -func createNetMessengerPubSub(t *testing.T, port int, nConns int, strategy p2p.PubSubStrategy) (*p2p.NetMessenger, error) { - cp, err := p2p.NewConnectParamsFromPort(port) - assert.Nil(t, err) - - return p2p.NewNetMessenger(context.Background(), &mock.MarshalizerMock{}, &mock.HasherMock{}, cp, nConns, strategy) -} - -func waitForConnectionsToBeMade(nodes []p2p.Messenger, connectGraph map[int][]int, chanDone chan bool) { - for { - fullyConnected := true - - //for each element in the connect graph, check that is really connected to other peers - for k, v := range connectGraph { - for _, peerIndex := range v { - if nodes[k].Connectedness(nodes[peerIndex].ID()) != net.Connected { - fullyConnected = false - break - } - } - } - - if fullyConnected { - break - } - - time.Sleep(time.Millisecond) - } - chanDone <- true -} - -func waitForWaitGroup(wg *sync.WaitGroup, chanDone chan bool) { - wg.Wait() - chanDone <- true -} - -func waitForValue(value *int32, expected int32, chanDone chan bool) { - for { - if atomic.LoadInt32(value) == expected { - break - } - - time.Sleep(time.Nanosecond) - } - - chanDone <- true -} - -func closeAllNodes(nodes []p2p.Messenger) { - fmt.Println("### Closing nodes... ###") - for i := 0; i < len(nodes); i++ { - err := nodes[i].Close() - if err != nil { - p2p.Log.Error(err.Error()) - } - } -} - -func getConnectableAddress(addresses []string) string { - for _, addr := range addresses { - if strings.Contains(addr, "127.0.0.1") { - return addr - } - } - - return "" -} - -func createTestNetwork(t *testing.T) ([]p2p.Messenger, error) { - nodes := make([]p2p.Messenger, 0) - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createNetMessenger(t, startingPort+i, 10) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, getConnectableAddress(node.Addresses())) - } - - //connect one with each other manually - // node0 --------- node1 - // | | - // +------------ node2 - // | | - // | node3 - // | | - // +------------ node4 - - nodes[1].ConnectToAddresses(context.Background(), []string{getConnectableAddress(nodes[0].Addresses())}) - nodes[2].ConnectToAddresses(context.Background(), []string{ - getConnectableAddress(nodes[1].Addresses()), - getConnectableAddress(nodes[0].Addresses())}) - nodes[3].ConnectToAddresses(context.Background(), []string{getConnectableAddress(nodes[2].Addresses())}) - nodes[4].ConnectToAddresses(context.Background(), []string{ - getConnectableAddress(nodes[3].Addresses()), - getConnectableAddress(nodes[0].Addresses())}) - - connectGraph := make(map[int][]int) - connectGraph[0] = []int{1, 2, 4} - connectGraph[1] = []int{0, 2} - connectGraph[2] = []int{0, 1, 3} - connectGraph[3] = []int{2, 4} - connectGraph[4] = []int{0, 3} - - chanDone := make(chan bool, 0) - go waitForConnectionsToBeMade(nodes, connectGraph, chanDone) - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - return nodes, errors.New("could not make connections") - } - - return nodes, nil -} - -func TestNetMessenger_RecreationSameNodeShouldWork(t *testing.T) { - fmt.Println() - - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - node, _ := createNetMessenger(t, startingPort, 10) - nodes = append(nodes, node) - - node, err := createNetMessenger(t, startingPort, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - if nodes[0].ID().Pretty() != nodes[1].ID().Pretty() { - t.Fatal("ID mismatch") - } -} - -func TestNetMessenger_SendToSelfShouldWork(t *testing.T) { - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - node, err := createNetMessenger(t, startingPort, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - wg := sync.WaitGroup{} - wg.Add(1) - chanDone := make(chan bool) - go waitForWaitGroup(&wg, chanDone) - - err = nodes[0].AddTopic(p2p.NewTopic("test topic", &testNetStringCreator{}, &mock.MarshalizerMock{})) - nodes[0].GetTopic("test topic").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testNetStringCreator)).Data - - fmt.Printf("Got message: %v\n", payload) - - if payload == "ABC" { - wg.Done() - } - }) - assert.Nil(t, err) - - err = nodes[0].GetTopic("test topic").Broadcast(testNetStringCreator{Data: "ABC"}) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have been 1 (message received to self)") - } -} - -func TestNetMessenger_NodesPingPongOn2TopicsShouldWork(t *testing.T) { - fmt.Println() - - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - node, err := createNetMessenger(t, startingPort, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - node, err = createNetMessenger(t, startingPort+1, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - connectGraph := make(map[int][]int) - connectGraph[0] = []int{1} - connectGraph[1] = []int{0} - - nodes[0].ConnectToAddresses(context.Background(), []string{getConnectableAddress(nodes[1].Addresses())}) - - wg := sync.WaitGroup{} - chanDone := make(chan bool) - go waitForConnectionsToBeMade(nodes, connectGraph, chanDone) - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Could not make a connection between the 2 peers") - return - } - - fmt.Printf("Node 1 is %s\n", nodes[0].Addresses()[0]) - fmt.Printf("Node 2 is %s\n", nodes[1].Addresses()[0]) - - fmt.Printf("Node 1 has the addresses: %v\n", nodes[0].Addresses()) - fmt.Printf("Node 2 has the addresses: %v\n", nodes[1].Addresses()) - - //create 2 topics on each node - err = nodes[0].AddTopic(p2p.NewTopic("ping", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - err = nodes[0].AddTopic(p2p.NewTopic("pong", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - - err = nodes[1].AddTopic(p2p.NewTopic("ping", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - err = nodes[1].AddTopic(p2p.NewTopic("pong", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - - time.Sleep(pubsubAnnounceDuration) - - wg.Add(2) - go waitForWaitGroup(&wg, chanDone) - - //assign some event handlers on topics - nodes[0].GetTopic("ping").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testNetStringCreator)).Data - - if payload == "ping string" { - fmt.Println("Ping received, sending pong...") - err = nodes[0].GetTopic("pong").Broadcast(testNetStringCreator{"pong string"}) - assert.Nil(t, err) - } - }) - - nodes[0].GetTopic("pong").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testNetStringCreator)).Data - - fmt.Printf("node1 received: %v\n", payload) - - if payload == "pong string" { - fmt.Println("Pong received!") - wg.Done() - } - }) - - //for node2 topic ping we do not need an event handler in this test - nodes[1].GetTopic("pong").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - payload := (*data.(*testNetStringCreator)).Data - - fmt.Printf("node2 received: %v\n", payload) - - if payload == "pong string" { - fmt.Println("Pong received!") - wg.Done() - } - }) - - err = nodes[1].GetTopic("ping").Broadcast(testNetStringCreator{"ping string"}) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have been 2 (pong from node1: self and node2: received from node1)") - } -} - -func TestNetMessenger_SimpleBroadcast5nodesInlineShouldWork(t *testing.T) { - fmt.Println() - - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - //create 5 nodes - for i := 0; i < 5; i++ { - node, err := createNetMessenger(t, startingPort+i, 10) - assert.Nil(t, err) - - nodes = append(nodes, node) - - fmt.Printf("Node %v is %s\n", i+1, node.Addresses()[0]) - } - - //connect one with each other daisy-chain - for i := 1; i < 5; i++ { - node := nodes[i] - node.ConnectToAddresses(context.Background(), []string{getConnectableAddress(nodes[i-1].Addresses())}) - } - - connectGraph := make(map[int][]int) - connectGraph[0] = []int{1} - connectGraph[1] = []int{0, 2} - connectGraph[2] = []int{1, 3} - connectGraph[3] = []int{2, 4} - connectGraph[4] = []int{3} - - chanDone := make(chan bool) - go waitForConnectionsToBeMade(nodes, connectGraph, chanDone) - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Could not make connections") - return - } - - wg := sync.WaitGroup{} - wg.Add(5) - go waitForWaitGroup(&wg, chanDone) - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - node.GetTopic("test").AddDataReceived( - func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("%v received from %v: %v\n", node.ID(), msgInfo.Peer, data.(*testNetStringCreator).Data) - wg.Done() - }) - assert.Nil(t, err) - } - - fmt.Println() - fmt.Println() - - time.Sleep(pubsubAnnounceDuration) - - fmt.Println("Broadcasting...") - err := nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "Foo"}) - assert.Nil(t, err) - - select { - case <-chanDone: - fmt.Println("Got all messages!") - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "not all messages were received") - } -} - -func TestNetMessenger_SimpleBroadcast5nodesBetterConnectedShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - wg := sync.WaitGroup{} - wg.Add(5) - go waitForWaitGroup(&wg, chanDone) - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - node.GetTopic("test").AddDataReceived( - func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("%v received from %v: %v\n", node.ID(), msgInfo.Peer, data.(*testNetStringCreator).Data) - wg.Done() - }) - assert.Nil(t, err) - } - - fmt.Println() - fmt.Println() - - time.Sleep(pubsubAnnounceDuration) - - fmt.Println("Broadcasting...") - err = nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "Foo"}) - assert.Nil(t, err) - - select { - case <-chanDone: - fmt.Println("Got all messages!") - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "not all messages were received") - } -} - -func TestNetMessenger_SendingNilShouldErr(t *testing.T) { - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - node, err := createNetMessenger(t, startingPort, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - err = node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - err = node.GetTopic("test").Broadcast(nil) - assert.NotNil(t, err) -} - -func TestNetMessenger_CreateNodeWithNilMarshalizerShouldErr(t *testing.T) { - cp, err := p2p.NewConnectParamsFromPort(startingPort) - assert.Nil(t, err) - - _, err = p2p.NewNetMessenger(context.Background(), nil, &mock.HasherMock{}, cp, 10, p2p.FloodSub) - assert.NotNil(t, err) -} - -func TestNetMessenger_CreateNodeWithNilHasherShouldErr(t *testing.T) { - cp, err := p2p.NewConnectParamsFromPort(startingPort) - assert.Nil(t, err) - - _, err = p2p.NewNetMessenger(context.Background(), &mock.MarshalizerMock{}, nil, cp, 10, p2p.FloodSub) - assert.NotNil(t, err) -} - -func TestNetMessenger_BadObjectToUnmarshalShouldFilteredOut(t *testing.T) { - //stress test to check if the node is able to cope - //with unmarshaling a bad object - //both structs have the same fields but incompatible types - - //node1 registers topic 'test' with struct1 - //node2 registers topic 'test' with struct2 - - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - node, err := createNetMessenger(t, startingPort, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - node, err = createNetMessenger(t, startingPort+1, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - //connect nodes - nodes[0].ConnectToAddresses(context.Background(), []string{getConnectableAddress(nodes[1].Addresses())}) - - connectGraph := make(map[int][]int) - connectGraph[0] = []int{1} - connectGraph[1] = []int{0} - - chanDone := make(chan bool) - go waitForConnectionsToBeMade(nodes, connectGraph, chanDone) - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Could not make a connection between the 2 peers") - return - } - - wg := sync.WaitGroup{} - wg.Add(1) - go waitForWaitGroup(&wg, chanDone) - - //create topics for each node - err = nodes[0].AddTopic(p2p.NewTopic("test", &structNetTest1{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - err = nodes[1].AddTopic(p2p.NewTopic("test", &structNetTest2{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - - time.Sleep(pubsubAnnounceDuration) - - //node 1 sends, node 2 receives - nodes[1].GetTopic("test").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("received: %v", data) - wg.Done() - }) - - err = nodes[0].GetTopic("test").Broadcast(&structNetTest1{Nonce: 4, Data: 4.5}) - assert.Nil(t, err) - - select { - case <-chanDone: - assert.Fail(t, "Should have not received the message") - case <-time.After(testNetMessengerWaitResponseUnreceivedMsg): - } -} - -func TestNetMessenger_BroadcastOnInexistentTopicShouldFilteredOut(t *testing.T) { - //stress test to check if the node is able to cope - //with receiving on an inexistent topic - - nodes := make([]p2p.Messenger, 0) - - defer func() { - closeAllNodes(nodes) - }() - - node, err := createNetMessenger(t, startingPort, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - node, err = createNetMessenger(t, startingPort+1, 10) - assert.Nil(t, err) - nodes = append(nodes, node) - - //connect nodes - nodes[0].ConnectToAddresses(context.Background(), []string{getConnectableAddress(nodes[1].Addresses())}) - - connectGraph := make(map[int][]int) - connectGraph[0] = []int{1} - connectGraph[1] = []int{0} - - chanDone := make(chan bool) - go waitForConnectionsToBeMade(nodes, connectGraph, chanDone) - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Could not make a connection between the 2 peers") - return - } - - wg := sync.WaitGroup{} - wg.Add(1) - go waitForWaitGroup(&wg, chanDone) - - //create topics for each node - err = nodes[0].AddTopic(p2p.NewTopic("test1", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - err = nodes[1].AddTopic(p2p.NewTopic("test2", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - - time.Sleep(pubsubAnnounceDuration) - - //node 1 sends, node 2 receives - nodes[1].GetTopic("test2").AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("received: %v", data) - wg.Done() - }) - - err = nodes[0].GetTopic("test1").Broadcast(testNetStringCreator{"Foo"}) - assert.Nil(t, err) - - select { - case <-chanDone: - assert.Fail(t, "Should have not received the message") - case <-time.After(testNetMessengerWaitResponseUnreceivedMsg): - } -} - -func TestNetMessenger_BroadcastWithValidatorsShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - wg := sync.WaitGroup{} - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - fmt.Printf("%v got from %v the message: %v\n", msgInfo.CurrentPeer, msgInfo.Peer, data) - wg.Done() - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - node.GetTopic("test").AddDataReceived(recv) - } - - time.Sleep(pubsubAnnounceDuration) - - // dummy validator that prevents propagation of "AAA" message - v := func(ctx context.Context, mes *pubsub.Message) bool { - obj := &testNetStringCreator{} - - marsh := mock.MarshalizerMock{} - err := marsh.Unmarshal(obj, mes.GetData()) - assert.Nil(t, err) - - return obj.Data != "AAA" - } - - //node 2 has validator in place - err = nodes[2].GetTopic("test").RegisterValidator(v) - assert.Nil(t, err) - - fmt.Println() - fmt.Println() - - //send AAA, wait, check that 4 peers got the message - fmt.Println("Broadcasting AAA...") - wg.Add(4) - go waitForWaitGroup(&wg, chanDone) - err = nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "AAA"}) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "not all 4 peers got AAA message") - return - } - - //send BBB, wait, check that all peers got the message - fmt.Println("Broadcasting BBB...") - wg.Add(5) - go waitForWaitGroup(&wg, chanDone) - - err = nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "BBB"}) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "not all 5 peers got BBB message") - return - } - - //add the validator on node 4 - err = nodes[4].GetTopic("test").RegisterValidator(v) - assert.Nil(t, err) - - fmt.Println("Waiting for cooldown period (timecache should empty map)") - time.Sleep(p2p.DurTimeCache + time.Millisecond*100) - - //send AAA, wait, check that 2 peers got the message - fmt.Println("Resending AAA...") - wg.Add(2) - go waitForWaitGroup(&wg, chanDone) - - err = nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "AAA"}) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "not all 2 peers got AAA message") - } -} - -func TestNetMessenger_BroadcastToGossipSubShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - wg := sync.WaitGroup{} - doWaitGroup := false - counter := int32(0) - - recv1 := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if doWaitGroup { - wg.Done() - } - - atomic.AddInt32(&counter, 1) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - node.GetTopic("test").AddDataReceived(recv1) - } - - time.Sleep(pubsubAnnounceDuration) - - //send a piggyback message, wait 1 sec - fmt.Println("Broadcasting piggyback message...") - err = nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "piggyback"}) - assert.Nil(t, err) - time.Sleep(time.Second) - fmt.Printf("%d peers got the message!\n", atomic.LoadInt32(&counter)) - - atomic.StoreInt32(&counter, 0) - - fmt.Println("Broadcasting AAA...") - doWaitGroup = true - wg.Add(5) - go waitForWaitGroup(&wg, chanDone) - err = nodes[0].GetTopic("test").Broadcast(testNetStringCreator{Data: "AAA"}) - assert.Nil(t, err) - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "not all 5 peers got AAA message") - } -} - -func TestNetMessenger_BroadcastToUnknownSubShouldErr(t *testing.T) { - fmt.Println() - - _, err := createNetMessengerPubSub(t, startingPort, 10, 500) - assert.NotNil(t, err) -} - -func TestNetMessenger_RequestResolveTestCfg1ShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if data.(*testNetStringCreator).Data == "Real object1" { - chanDone <- true - } - - fmt.Printf("Received: %v\n", data.(*testNetStringCreator).Data) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - } - - time.Sleep(pubsubAnnounceDuration) - - //to simplify, only node 0 should have a recv event handler - nodes[0].GetTopic("test").AddDataReceived(recv) - - //setup a resolver func for node 3 - nodes[3].GetTopic("test").ResolveRequest = func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testNetStringCreator{Data: "Real object1"}) - return buff - } - - return nil - } - - //node0 requests an unavailable data - err = nodes[0].GetTopic("test").SendRequest([]byte("B000")) - assert.Nil(t, err) - fmt.Println("Sent request B000") - select { - case <-chanDone: - assert.Fail(t, "Should have not sent object") - case <-time.After(testNetMessengerWaitResponseUnreceivedMsg): - } - - //node0 requests an available data on node 3 - err = nodes[0].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have sent object") - return - } -} - -func TestNetMessenger_RequestResolveTestCfg2ShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if data.(*testNetStringCreator).Data == "Real object1" { - chanDone <- true - } - - fmt.Printf("Received: %v from %v\n", data.(*testNetStringCreator).Data, msgInfo.Peer) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - } - - time.Sleep(pubsubAnnounceDuration) - - //to simplify, only node 1 should have a recv event handler - nodes[1].GetTopic("test").AddDataReceived(recv) - - //resolver func for node 0 and 2 - resolverOK := func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testNetStringCreator{Data: "Real object1"}) - return buff - } - - return nil - } - - //resolver func for other nodes - resolverNOK := func(hash []byte) []byte { - panic("Should have not reached this point") - - return nil - } - - nodes[0].GetTopic("test").ResolveRequest = resolverOK - nodes[2].GetTopic("test").ResolveRequest = resolverOK - - nodes[3].GetTopic("test").ResolveRequest = resolverNOK - nodes[4].GetTopic("test").ResolveRequest = resolverNOK - - //node1 requests an available data - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have sent object") - } - -} - -func TestNetMessenger_RequestResolveTestSelfShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - if data.(*testNetStringCreator).Data == "Real object1" { - chanDone <- true - } - - fmt.Printf("Received: %v from %v\n", data.(*testNetStringCreator).Data, msgInfo.Peer) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - } - - time.Sleep(pubsubAnnounceDuration) - - //to simplify, only node 1 should have a recv event handler - nodes[1].GetTopic("test").AddDataReceived(recv) - - //resolver func for node 1 - resolverOK := func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testNetStringCreator{Data: "Real object1"}) - return buff - } - - return nil - } - - //resolver func for other nodes - resolverNOK := func(hash []byte) []byte { - panic("Should have not reached this point") - - return nil - } - - nodes[1].GetTopic("test").ResolveRequest = resolverOK - - nodes[0].GetTopic("test").ResolveRequest = resolverNOK - nodes[2].GetTopic("test").ResolveRequest = resolverNOK - nodes[3].GetTopic("test").ResolveRequest = resolverNOK - nodes[4].GetTopic("test").ResolveRequest = resolverNOK - - //node1 requests an available data - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have self-sent object") - } - -} - -func TestNetMessenger_RequestResolveResendingShouldWork(t *testing.T) { - var nodes []p2p.Messenger - - defer func() { - closeAllNodes(nodes) - }() - - nodes, err := createTestNetwork(t) - if err != nil { - assert.Fail(t, err.Error()) - return - } - - chanDone := make(chan bool, 0) - - counter := int32(0) - - recv := func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - atomic.AddInt32(&counter, 1) - - fmt.Printf("Received: %v from %v\n", data.(*testNetStringCreator).Data, msgInfo.Peer) - } - - //print connected and create topics - for i := 0; i < 5; i++ { - node := nodes[i] - node.PrintConnected() - - err := node.AddTopic(p2p.NewTopic("test", &testNetStringCreator{}, &mock.MarshalizerMock{})) - assert.Nil(t, err) - } - - time.Sleep(pubsubAnnounceDuration) - - //to simplify, only node 1 should have a recv event handler - nodes[1].GetTopic("test").AddDataReceived(recv) - - //resolver func for node 0 and 2 - resolverOK := func(hash []byte) []byte { - if bytes.Equal(hash, []byte("A000")) { - marshalizer := &mock.MarshalizerMock{} - buff, _ := marshalizer.Marshal(&testNetStringCreator{Data: "Real object0"}) - return buff - } - - return nil - } - - //resolver func for other nodes - resolverNOK := func(hash []byte) []byte { - panic("Should have not reached this point") - - return nil - } - - nodes[0].GetTopic("test").ResolveRequest = resolverOK - nodes[2].GetTopic("test").ResolveRequest = resolverOK - - nodes[3].GetTopic("test").ResolveRequest = resolverNOK - nodes[4].GetTopic("test").ResolveRequest = resolverNOK - - //node1 requests an available data - go waitForValue(&counter, 1, chanDone) - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Sent request A000") - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have received 1 object") - return - } - - //resending request. This should be filtered out - atomic.StoreInt32(&counter, 0) - go waitForValue(&counter, 1, chanDone) - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Re-sent request A000") - select { - case <-chanDone: - assert.Fail(t, "Should have not received") - return - case <-time.After(testNetMessengerWaitResponseUnreceivedMsg): - } - - fmt.Println("delaying as to clear timecache buffer") - time.Sleep(p2p.DurTimeCache + time.Millisecond*100) - - //resending - atomic.StoreInt32(&counter, 0) - go waitForValue(&counter, 1, chanDone) - err = nodes[1].GetTopic("test").SendRequest([]byte("A000")) - assert.Nil(t, err) - fmt.Println("Re-sent request A000") - select { - case <-chanDone: - case <-time.After(testNetMessengerMaxWaitResponse): - assert.Fail(t, "Should have received 2 objects") - return - } - -} diff --git a/p2p/nodeError.go b/p2p/nodeError.go deleted file mode 100644 index 58c227d35cc..00000000000 --- a/p2p/nodeError.go +++ /dev/null @@ -1,18 +0,0 @@ -package p2p - -// NodeError is used inside p2p package and contains the receiver peer, sender peer and (if any) other nested errors -type NodeError struct { - PeerRecv string - PeerSend string - Err string - NestedErrors []NodeError -} - -func (e *NodeError) Error() string { - return e.Err -} - -// NewNodeError returns a new instance of the struct NodeError -func NewNodeError(peerRecv string, peerSend string, err string) *NodeError { - return &NodeError{PeerRecv: peerRecv, PeerSend: peerSend, Err: err} -} diff --git a/p2p/nodeError_test.go b/p2p/nodeError_test.go deleted file mode 100644 index 987b2519f25..00000000000 --- a/p2p/nodeError_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package p2p_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" -) - -func TestNodeError(t *testing.T) { - err := p2p.NewNodeError("A", "B", "MES") - - if err.Err != err.Error() { - t.Fatal("Node error should return Err on Error()") - } -} diff --git a/p2p/p2p.go b/p2p/p2p.go new file mode 100644 index 00000000000..f2128525c66 --- /dev/null +++ b/p2p/p2p.go @@ -0,0 +1,129 @@ +package p2p + +import ( + "io" + "strings" + + "github.com/mr-tron/base58/base58" +) + +// PeerDiscoveryType defines the peer discovery mechanism to use +type PeerDiscoveryType int + +func (pdt PeerDiscoveryType) String() string { + switch pdt { + case PeerDiscoveryOff: + return "off" + case PeerDiscoveryKadDht: + return "kad-dht" + case PeerDiscoveryMdns: + return "mdns" + default: + return "unknown" + } +} + +// LoadPeerDiscoveryTypeFromString outputs a peer discovery type by parsing the string argument +// Errors if string is not recognized +func LoadPeerDiscoveryTypeFromString(str string) (PeerDiscoveryType, error) { + str = strings.ToLower(str) + + if str == PeerDiscoveryOff.String() { + return PeerDiscoveryOff, nil + } + + if str == PeerDiscoveryMdns.String() { + return PeerDiscoveryMdns, nil + } + + if str == PeerDiscoveryKadDht.String() { + return PeerDiscoveryKadDht, nil + } + + return PeerDiscoveryOff, ErrPeerDiscoveryNotImplemented +} + +const ( + // PeerDiscoveryOff will not enable peer discovery + PeerDiscoveryOff PeerDiscoveryType = iota + // PeerDiscoveryMdns will enable mdns mechanism + PeerDiscoveryMdns + // PeerDiscoveryKadDht wil enable kad-dht mechanism + PeerDiscoveryKadDht +) + +// MessageProcessor is the interface used to describe what a receive message processor should do +// All implementations that will be called from Messenger implementation will need to satisfy this interface +type MessageProcessor interface { + ProcessReceivedMessage(message MessageP2P) error +} + +// SendableData represents the struct used in data throttler implementation +type SendableData struct { + Buff []byte + Topic string +} + +// PeerID is a p2p peer identity. +type PeerID string + +// Bytes returns the peer ID as byte slice +func (pid PeerID) Bytes() []byte { + return []byte(pid) +} + +// Pretty returns a b58-encoded string of the peer id +func (pid PeerID) Pretty() string { + return base58.Encode(pid.Bytes()) +} + +// Messenger is the main struct used for communication with other peers +type Messenger interface { + io.Closer + + ID() PeerID + Peers() []PeerID + + Addresses() []string + ConnectToPeer(address string) error + KadDhtDiscoverNewPeers() error + IsConnected(peerID PeerID) bool + ConnectedPeers() []PeerID + TrimConnections() + Bootstrap() error + + CreateTopic(name string, createPipeForTopic bool) error + HasTopic(name string) bool + HasTopicValidator(name string) bool + RegisterMessageProcessor(topic string, handler MessageProcessor) error + UnregisterMessageProcessor(topic string) error + OutgoingPipeLoadBalancer() PipeLoadBalancer + BroadcastOnPipe(pipe string, topic string, buff []byte) + Broadcast(topic string, buff []byte) + SendToConnectedPeer(topic string, buff []byte, peerID PeerID) error +} + +// MessageP2P defines what a p2p message can do (should return) +type MessageP2P interface { + From() []byte + Data() []byte + SeqNo() []byte + TopicIDs() []string + Signature() []byte + Key() []byte + Peer() PeerID +} + +// PipeLoadBalancer defines what a load balancer that uses pipes should do +type PipeLoadBalancer interface { + AddPipe(pipe string) error + RemovePipe(pipe string) error + GetChannelOrDefault(pipe string) chan *SendableData + CollectFromPipes() []*SendableData +} + +// DirectSender defines a component that can send direct messages to connected peers +type DirectSender interface { + NextSeqno(counter *uint64) []byte + Send(topic string, buff []byte, peer PeerID) error +} diff --git a/p2p/p2p_test.go b/p2p/p2p_test.go new file mode 100644 index 00000000000..0b5a061f2b3 --- /dev/null +++ b/p2p/p2p_test.go @@ -0,0 +1,43 @@ +package p2p_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/stretchr/testify/assert" +) + +func TestPeerDiscoveryType_StringOffUsingMixedCaseShouldWork(t *testing.T) { + t.Parallel() + + pdt, err := p2p.LoadPeerDiscoveryTypeFromString("oFf") + + assert.Equal(t, p2p.PeerDiscoveryOff, pdt) + assert.Nil(t, err) +} + +func TestPeerDiscoveryType_StringMdnsUsingMixedCaseShouldWork(t *testing.T) { + t.Parallel() + + pdt, err := p2p.LoadPeerDiscoveryTypeFromString("mDnS") + + assert.Equal(t, p2p.PeerDiscoveryMdns, pdt) + assert.Nil(t, err) +} + +func TestPeerDiscoveryType_StringKadDhtUsingMixedCaseShouldWork(t *testing.T) { + t.Parallel() + + pdt, err := p2p.LoadPeerDiscoveryTypeFromString("kAd-DhT") + + assert.Equal(t, p2p.PeerDiscoveryKadDht, pdt) + assert.Nil(t, err) +} + +func TestPeerDiscoveryType_StringNotImplementedShouldErr(t *testing.T) { + t.Parallel() + + _, err := p2p.LoadPeerDiscoveryTypeFromString("not-implemented-type") + + assert.Equal(t, p2p.ErrPeerDiscoveryNotImplemented, err) +} diff --git a/p2p/readme.md b/p2p/readme.md new file mode 100644 index 00000000000..4f2c002c1a6 --- /dev/null +++ b/p2p/readme.md @@ -0,0 +1,12 @@ +# P2P protocol description + +The `Messenger` interface with its implementation are +used to define the way to communicate between Elrond nodes. + +There are 2 ways to send data to the other peers: +1. Broadcasting messages on a `pubsub` using topics; +1. Direct sending messages to the connected peers. + +The first type is used to send messages that has to reach every node +(from corresponding shard, metachain, consensus group, etc.) and the second type is +used to resolve requests comming from directly connected peers. diff --git a/p2p/routingTable.go b/p2p/routingTable.go deleted file mode 100644 index 6803a3fb317..00000000000 --- a/p2p/routingTable.go +++ /dev/null @@ -1,227 +0,0 @@ -package p2p - -import ( - "container/list" - "fmt" - "sync" - - "github.com/gogo/protobuf/sortkeys" - "github.com/libp2p/go-libp2p-peer" - "github.com/pkg/errors" -) - -// RoutingTable maintains the distances between current peer and other peers -// It implements a Kademlia algorithm -type RoutingTable struct { - list *list.List - current peer.ID - mut sync.RWMutex - dists1 map[uint32][]peer.ID - dists2 map[peer.ID]uint32 - dists3 []uint32 - ComputeDistance func(pid1 peer.ID, pid2 peer.ID) uint32 -} - -// NewRoutingTable creates a new instance of the RoutingTable struct -func NewRoutingTable(crt peer.ID) *RoutingTable { - rt := &RoutingTable{current: crt, list: list.New(), - dists1: make(map[uint32][]peer.ID), dists2: make(map[peer.ID]uint32), - ComputeDistance: ComputeDistanceAD} - rt.Update(crt) - - return rt -} - -// Peers return the peers known and their corresponding distances -func (rt *RoutingTable) Peers() ([]peer.ID, []uint32) { - rt.mut.RLock() - defer rt.mut.RUnlock() - - ps := make([]peer.ID, 0, rt.list.Len()) - dists := make([]uint32, 0, rt.list.Len()) - for e := rt.list.Front(); e != nil; e = e.Next() { - id := e.Value.(peer.ID) - ps = append(ps, id) - dists = append(dists, rt.dists2[id]) - } - - return ps, dists -} - -// Has return true if the current RoutingTable has a peer id -func (rt *RoutingTable) Has(id peer.ID) bool { - rt.mut.RLock() - defer rt.mut.RUnlock() - - for e := rt.list.Front(); e != nil; e = e.Next() { - if e.Value.(peer.ID) == id { - return true - } - } - - return false -} - -// Len returns the length (no. of known peers) -func (rt *RoutingTable) Len() int { - rt.mut.RLock() - defer rt.mut.RUnlock() - - return rt.list.Len() -} - -// Update inserts or modifies a peer inside RoutingTable -func (rt *RoutingTable) Update(p peer.ID) { - rt.mut.Lock() - defer rt.mut.Unlock() - - //compute distance (current - p) - dist := uint32(0) - if rt.ComputeDistance != nil { - dist = rt.ComputeDistance(rt.current, p) - } - - //get array from map distance->peers - rt.dists2[p] = dist - pids := rt.dists1[dist] - if pids == nil { - pids = []peer.ID{} - } - - if len(pids) == 0 { - //distance is new, append to list - rt.dists3 = append(rt.dists3, dist) - //keep sorted - sortkeys.Uint32s(rt.dists3[:]) - } - - //search the peer is array of peers - found := false - for i := 0; i < len(pids); i++ { - if pids[i] == p { - found = true - break - } - } - - if !found { - //add peer into array, save array - pids = append(pids, p) - rt.list.PushFront(p) - } - //update distance in map pid->distance - rt.dists1[dist] = pids -} - -// GetDistance returns the distance (in Kademlia values) between current peer and provided peer -func (rt *RoutingTable) GetDistance(p peer.ID) (uint32, error) { - rt.mut.RLock() - defer rt.mut.RUnlock() - - if !rt.Has(p) { - return uint32(0), errors.New(fmt.Sprintf("Peer ID %v was not found!", p.Pretty())) - } - - return rt.dists2[p], nil -} - -// ComputeDistanceAD is a function to compute the Kademlia developed by Elrond Team -func ComputeDistanceAD(p1 peer.ID, p2 peer.ID) uint32 { - buff1 := []byte(p1) - buff2 := []byte(p2) - - for len(buff1) < len(buff2) { - buff1 = append([]byte{0}, buff1...) - } - - for len(buff2) < len(buff1) { - buff2 = append([]byte{0}, buff2...) - } - - var sum uint32 = 0 - for i := 0; i < len(buff1); i++ { - sum += CountOneBits(buff1[i] ^ buff2[i]) - } - - return sum -} - -// CountOneBits counts the bits inside a byte -func CountOneBits(num byte) uint32 { - var sum uint32 = 0 - - operand := byte(128) - - for operand > 0 { - if (num & operand) > 0 { - sum++ - } - - operand = operand / 2 - } - - return sum -} - -// NearestPeers returns the first n peers of the routing table sorted ASC by their distance -func (rt *RoutingTable) NearestPeers(maxNo int) []peer.ID { - found := 0 - peers := make([]peer.ID, 0) - - for i := 0; i < len(rt.dists3) && found < maxNo; i++ { - //get the peers by using the distance as key. - //started from smallest - distPeers := rt.dists1[rt.dists3[i]] - - for j := 0; j < len(distPeers) && found < maxNo; j++ { - if distPeers[j] == rt.current { - //ignore current peer - continue - } - - peers = append(peers, distPeers[j]) - found++ - } - } - - return peers -} - -// NearestPeersAll returns all the known peers sorted ASC -func (rt *RoutingTable) NearestPeersAll() []peer.ID { - peers := make([]peer.ID, 0) - - for i := 0; i < len(rt.dists3); i++ { - //get the peers by using the distance as key. - //started from smallest - distPeers := rt.dists1[rt.dists3[i]] - - for j := 0; j < len(distPeers); j++ { - if distPeers[j] == rt.current { - //ignore current peer - continue - } - - peers = append(peers, distPeers[j]) - } - } - - return peers -} - -// Prints all known peers with their distances sorted ASC -func (rt *RoutingTable) Print() { - for i := 0; i < len(rt.dists3); i++ { - fmt.Printf("Distance %d:\n", rt.dists3[i]) - - pids := rt.dists1[rt.dists3[i]] - - for j := 0; j < len(pids); j++ { - fmt.Printf("\t %v", pids[j].Pretty()) - if pids[j] == rt.current { - fmt.Printf("*") - } - fmt.Println() - } - } -} diff --git a/p2p/routingTable_test.go b/p2p/routingTable_test.go deleted file mode 100644 index 54778ca609c..00000000000 --- a/p2p/routingTable_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package p2p_test - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "math/rand" - "strconv" - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/libp2p/go-libp2p-peer" - tu "github.com/libp2p/go-testutil" - "github.com/stretchr/testify/assert" -) - -func TestCalculateDistanceDifferentLengths(t *testing.T) { - buff1 := []byte{0, 0} - buff2 := []byte{255} - - assert.Equal(t, uint32(8), p2p.ComputeDistanceAD(peer.ID(string(buff1)), peer.ID(string(buff2)))) - - buff1 = []byte{0} - buff2 = []byte{1, 0} - - assert.Equal(t, uint32(1), p2p.ComputeDistanceAD(peer.ID(string(buff1)), peer.ID(string(buff2)))) -} - -func TestCalculateDistanceLarge(t *testing.T) { - buff1 := []byte{0} - buff2 := []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255} - - assert.Equal(t, uint32(8*14), p2p.ComputeDistanceAD(peer.ID(string(buff1)), peer.ID(string(buff2)))) -} - -func TestCalculateDistance(t *testing.T) { - pid1 := getPID([]byte{0, 0, 0, 1}) - pid2 := getPID([]byte{1, 0, 0, 0}) - - assert.Equal(t, uint32(2), p2p.ComputeDistanceAD(pid1, pid2)) -} - -func TestRoutingTable(t *testing.T) { - pid1 := getPID([]byte{0, 0, 0, 1}) - pid2 := getPID([]byte{1, 0, 0, 0}) - pid3 := getPID([]byte{0, 0, 0, 3}) - - rt := p2p.NewRoutingTable(pid1) - assert.Equal(t, 1, rt.Len()) - rt.Update(pid2) - assert.Equal(t, 2, rt.Len()) - dist, err := rt.GetDistance(pid2) - assert.Nil(t, err) - assert.Equal(t, uint32(2), dist) - - rt.Update(pid3) - assert.Equal(t, 3, rt.Len()) - dist, err = rt.GetDistance(pid3) - assert.Nil(t, err) - assert.Equal(t, uint32(1), dist) - - peers, dists := rt.Peers() - - assert.Equal(t, peers[0], pid3) - assert.Equal(t, dists[0], uint32(1)) - - assert.Equal(t, peers[1], pid2) - assert.Equal(t, dists[1], uint32(2)) - - assert.Equal(t, peers[2], pid1) - assert.Equal(t, dists[2], uint32(0)) - -} - -func TestRoutingTableNotFound(t *testing.T) { - pid1 := getPID([]byte{0, 0, 0, 1}) - pid2 := getPID([]byte{1, 0, 0, 0}) - - rt := p2p.NewRoutingTable(pid1) - assert.True(t, rt.Has(pid1)) - assert.False(t, rt.Has(pid2)) - - _, err := rt.GetDistance(pid2) - - assert.NotNil(t, err) -} - -func TestRoutingTableMultiple(t *testing.T) { - pid1 := getPID([]byte{0, 0, 0, 1}) - pid2 := getPID([]byte{1, 0, 0, 0}) - - rt := p2p.NewRoutingTable(pid1) - assert.Equal(t, 1, rt.Len()) - rt.Update(pid2) - assert.Equal(t, 2, rt.Len()) - rt.Update(pid2) - assert.Equal(t, 2, rt.Len()) - dist, err := rt.GetDistance(pid2) - assert.Nil(t, err) - assert.Equal(t, uint32(2), dist) -} - -func getPID(buff []byte) peer.ID { - return peer.ID(string(buff)) -} - -// Looks for race conditions in table operations. For a more 'certain' -// test, increase the loop counter from 1000 to a much higher number -// and set GOMAXPROCS above 1 -func TestTableMultithreaded(t *testing.T) { - local := peer.ID("localPeer") - tab := p2p.NewRoutingTable(local) - var peers []peer.ID - for i := 0; i < 500; i++ { - peers = append(peers, tu.RandPeerIDFatal(t)) - } - - done := make(chan struct{}) - go func() { - for i := 0; i < 10000; i++ { - n := rand.Intn(len(peers)) - tab.Update(peers[n]) - } - done <- struct{}{} - }() - - go func() { - for i := 0; i < 10000; i++ { - n := rand.Intn(len(peers)) - tab.Update(peers[n]) - } - done <- struct{}{} - }() - - <-done - <-done - - for i := 0; i < len(peers); i++ { - if !tab.Has(peers[i]) { - assert.Fail(t, fmt.Sprintf("Not found %v", peers[i].Pretty())) - } - } -} - -func TestClosestPeers(t *testing.T) { - pid1 := getPID([]byte{0, 0, 0, 1}) - pid2 := getPID([]byte{1, 0, 0, 0}) - pid3 := getPID([]byte{0, 0, 0, 3}) - pid4 := getPID([]byte{0, 1, 0, 0}) - pid5 := getPID([]byte{0, 0, 1, 0}) - pid6 := getPID([]byte{255, 0, 0, 0}) - - rt := p2p.NewRoutingTable(pid1) - rt.Update(pid2) - rt.Update(pid3) - rt.Update(pid6) - rt.Update(pid4) - rt.Update(pid5) - - peers := rt.NearestPeers(100) - assert.Equal(t, 5, len(peers)) - - peers = rt.NearestPeersAll() - assert.Equal(t, 5, len(peers)) - - assert.Equal(t, pid3, peers[0]) - assert.Equal(t, pid2, peers[1]) - assert.Equal(t, pid4, peers[2]) - assert.Equal(t, pid5, peers[3]) - assert.Equal(t, pid6, peers[4]) - - peers = rt.NearestPeers(5) - assert.Equal(t, 5, len(peers)) - - assert.Equal(t, pid3, peers[0]) - assert.Equal(t, pid2, peers[1]) - assert.Equal(t, pid4, peers[2]) - assert.Equal(t, pid5, peers[3]) - assert.Equal(t, pid6, peers[4]) - - peers = rt.NearestPeers(3) - assert.Equal(t, 3, len(peers)) - - assert.Equal(t, pid3, peers[0]) - assert.Equal(t, pid2, peers[1]) - assert.Equal(t, pid4, peers[2]) - - rt.Print() -} - -func TestLargeSetOfPeers(t *testing.T) { - id := "Node0" - - rt := p2p.NewRoutingTable(peer.ID(id)) - - for i := 1; i <= 200; i++ { - id = "Node" + strconv.Itoa(i) - - rt.Update(peer.ID(id)) - - found := rt.Has(peer.ID(id)) - - if !found { - fmt.Printf("Peer %s not found!\n", id) - } - } - - rt.Print() - - fmt.Println() - fmt.Println("Nearest peers:") - - peers := rt.NearestPeers(13) - - for i := 0; i < len(peers); i++ { - fmt.Println("-", peers[i].Pretty()) - } -} - -func TestLonelyPeers(t *testing.T) { - peers := make([]string, 0) - rts := make([]p2p.RoutingTable, 0) - nearest := map[peer.ID][]peer.ID{} - - //use a simple hash method - h := sha256.New() - - for i := 0; i < 200; i++ { - h.Write([]byte("Node" + strconv.Itoa(i))) - hashed := h.Sum(nil) - p := hex.EncodeToString(hashed) - peers = append(peers, p) - - rt := p2p.NewRoutingTable(peer.ID(p)) - - rts = append(rts, *rt) - } - - for i := 0; i < len(peers); i++ { - for j := 0; j < len(peers); j++ { - if i == j { - continue - } - - rts[i].Update(peer.ID(peers[j])) - } - } - - nearest = make(map[peer.ID][]peer.ID) - - for i := 0; i < len(peers); i++ { - nearestPeers := rts[i].NearestPeers(10) - - nearest[peer.ID(peers[i])] = nearestPeers - } - - for i := 0; i < len(peers); i++ { - assert.Equal(t, 0, testLonelyPeer(peers[i], peers, nearest)) - } -} - -func testLonelyPeer(start string, peers []string, conn map[peer.ID][]peer.ID) int { - reached := make(map[peer.ID]bool) - - for i := 0; i < len(peers); i++ { - reached[peer.ID(peers[i])] = peers[i] == start - } - - job := make(map[peer.ID]bool) - job[peer.ID(start)] = false - - traverseRec2(conn, reached, job) - - notReached := 0 - - fmt.Printf("%s has not reached: ", start) - for i := 0; i < len(peers); i++ { - if !reached[peer.ID(peers[i])] { - fmt.Printf("%s, ", peers[i]) - notReached++ - } - } - fmt.Println() - - return notReached -} - -func traverseRec2(conn map[peer.ID][]peer.ID, reached map[peer.ID]bool, job map[peer.ID]bool) { - var foundPeerID peer.ID = "" - - //find first element that was not processed - for k, v := range job { - if !v { - foundPeerID = k - break - } - } - - if foundPeerID == "" { - //done, no more peers were found to process - return - } - - job[foundPeerID] = true - reached[foundPeerID] = true - - peersToCheck := conn[foundPeerID] - - //append sub-peers to list - for _, pid := range peersToCheck { - _, found := job[pid] - - if !found { - job[pid] = false - } - } - - traverseRec2(conn, reached, job) -} - -func BenchmarkAdd(t *testing.B) { - local := peer.ID("localPeer") - tab := p2p.NewRoutingTable(local) - - for i := 0; i < t.N; i++ { - tab.Update(tu.RandPeerIDFatal(t)) - } -} diff --git a/p2p/timeCache.go b/p2p/timeCache.go deleted file mode 100644 index 228ee0515fd..00000000000 --- a/p2p/timeCache.go +++ /dev/null @@ -1,67 +0,0 @@ -package p2p - -//struct adapted from github.com/whyrusleeping/timecache/timecache.go by the Elrond Team - -import ( - "container/list" - "time" -) - -// TimeCache is used for keeping a key for a specified amount of time -type TimeCache struct { - keys *list.List - data map[string]time.Time - span time.Duration -} - -// NewTimeCache is used to create a new instance of TimeCache object -func NewTimeCache(span time.Duration) *TimeCache { - return &TimeCache{ - keys: list.New(), - data: make(map[string]time.Time), - span: span, - } -} - -// Add a new key -func (tc *TimeCache) Add(key string) { - _, ok := tc.data[key] - if ok { - return - } - - tc.sweep() - - tc.data[key] = time.Now() - tc.keys.PushFront(key) -} - -// clean-up func -func (tc *TimeCache) sweep() { - for { - back := tc.keys.Back() - if back == nil { - return - } - - v := back.Value.(string) - t, ok := tc.data[v] - if !ok { - panic("inconsistent cache state") - } - - if time.Since(t) > tc.span { - tc.keys.Remove(back) - delete(tc.data, v) - } else { - return - } - } -} - -// Has returns the existence of a key making a sweep cleanup before testing -func (tc *TimeCache) Has(key string) bool { - tc.sweep() - _, ok := tc.data[key] - return ok -} diff --git a/p2p/timeCache_test.go b/p2p/timeCache_test.go deleted file mode 100644 index 91fcb468db4..00000000000 --- a/p2p/timeCache_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package p2p - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestTimeCacheAddHasAddingAfterDurShouldRetFalse(t *testing.T) { - tc := NewTimeCache(time.Second) - - tc.Add("AAA") - - time.Sleep(time.Second * 2) - - assert.False(t, tc.Has("AAA")) -} - -func TestTimeCacheAddHasAddingBeforeDurShouldRetTrue(t *testing.T) { - tc := NewTimeCache(time.Second) - - tc.Add("AAA") - - time.Sleep(time.Millisecond * 500) - - assert.True(t, tc.Has("AAA")) -} - -func TestTimeCacheAddHasReaddingAfterDurShouldWork(t *testing.T) { - tc := NewTimeCache(time.Second) - - tc.Add("AAA") - - time.Sleep(time.Second * 2) - - if !tc.Has("AAA") { - tc.Add("AAA") - } else { - assert.Fail(t, "Should have not had the object!") - } - - time.Sleep(time.Millisecond * 500) - - assert.True(t, tc.Has("AAA")) -} diff --git a/p2p/topic.go b/p2p/topic.go deleted file mode 100644 index abb0513882b..00000000000 --- a/p2p/topic.go +++ /dev/null @@ -1,203 +0,0 @@ -package p2p - -import ( - "sync" - - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/libp2p/go-libp2p-peer" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/pkg/errors" -) - -// topicChannelBufferSize is used to control the object channel buffer size -const topicChannelBufferSize = 10000 - -// DataReceivedHandler is the signature for the event handler used by Topic struct -type DataReceivedHandler func(name string, data interface{}, msgInfo *MessageInfo) - -// Topic struct defines a type of message that can be received and broadcast -// The use of Creator interface gives this struct's a generic use -// It works in the following manner: -// - the message is received (if it passes the authentication filter) -// - a new object with the same type of ObjTemplate is created -// - this new object will be used to unmarshal received data -// - an async func will call each and every event handler registered on eventBus -// - the method Broadcast is used to send messages containing object's serialized data to other peers -type Topic struct { - // Name of the topic - name string - // ObjTemplate is used as a template to generate new objects whenever a new message is received - ObjTemplate Creator - marsh marshal.Marshalizer - objChan chan MessageInfo - mutEventBus sync.RWMutex - eventBusDataRcvHandlers []DataReceivedHandler - // SendData will be called by Topic struct whenever a user of this struct tries to send data to other peers - // It is a function pointer that connects Topic struct with pubsub implementation - SendData func(data []byte) error - RegisterTopicValidator func(v pubsub.Validator) error - UnregisterTopicValidator func() error - ResolveRequest func(hash []byte) []byte - Request func(hash []byte) error - CurrentPeer peer.ID -} - -// MessageInfo will retain additional info about the message, should we care -// when receiving an object on current topic -type MessageInfo struct { - Data Creator - Peer string - CurrentPeer string -} - -// NewTopic creates a new Topic struct -func NewTopic(name string, objTemplate Creator, marsh marshal.Marshalizer) *Topic { - topic := Topic{name: name, ObjTemplate: objTemplate, marsh: marsh} - topic.objChan = make(chan MessageInfo, topicChannelBufferSize) - - go topic.processData() - - return &topic -} - -// AddDataReceived registers a new event handler on the eventBus so it can be called async whenever a new object is unmarshaled -func (t *Topic) AddDataReceived(eventHandler DataReceivedHandler) { - if eventHandler == nil { - //won't add a nil event handler to list - return - } - - t.mutEventBus.Lock() - defer t.mutEventBus.Unlock() - - t.eventBusDataRcvHandlers = append(t.eventBusDataRcvHandlers, eventHandler) -} - -// CreateObject will instantiate a Cloner interface and instantiate its fields -// with the help of a marshalizer implementation -func (t *Topic) CreateObject(data []byte) (Creator, error) { - // create new instance of the object - newObj := t.ObjTemplate.Create() - - if data == nil { - return nil, errors.New("nil message not allowed") - } - - if len(data) == 0 { - return nil, errors.New("empty message not allowed") - } - - //unmarshal data from the message - err := t.marsh.Unmarshal(newObj, data) - if err != nil { - return nil, err - } - - return newObj, err -} - -// NewObjReceived is called from the lower data layer -// it will ignore nils or improper formatted messages -func (t *Topic) NewObjReceived(obj Creator, peerID string) error { - if obj == nil { - return errors.New("nil object not allowed") - } - - //add to the channel so it can be consumed async - t.objChan <- MessageInfo{Data: obj, Peer: peerID, CurrentPeer: t.CurrentPeer.Pretty()} - return nil -} - -// Broadcast should be called whenever a higher order struct needs to send over the wire an object -// Optionally, the message can be authenticated -func (t *Topic) Broadcast(data interface{}) error { - if data == nil { - return errors.New("can not process nil data") - } - - if t.SendData == nil { - return errors.New("nil SendData handler") - } - - //assemble the message - payload, err := t.marsh.Marshal(data) - if err != nil { - return err - } - - return t.SendData(payload) -} - -// BroadcastBuff should be called whenever a higher order struct needs to send over the wire already -// serialized data -// Optionally, the message can be authenticated -func (t *Topic) BroadcastBuff(payload []byte) error { - if payload == nil { - return errors.New("can not process nil data") - } - - if t.SendData == nil { - return errors.New("send to nil the assembled message?") - } - - return t.SendData(payload) -} - -func (t *Topic) processData() { - for { - select { - case obj := <-t.objChan: - //a new object is in pump, it has been consumed, - //call each event handler from the list - t.mutEventBus.RLock() - for i := 0; i < len(t.eventBusDataRcvHandlers); i++ { - t.eventBusDataRcvHandlers[i](t.name, obj.Data, &obj) - } - t.mutEventBus.RUnlock() - } - } -} - -// RegisterValidator adds a validator to this topic -// It delegates the functionality to registerValidator function pointer -func (t *Topic) RegisterValidator(v pubsub.Validator) error { - if t.RegisterTopicValidator == nil { - return errors.New("can not delegate registration to parent") - } - - return t.RegisterTopicValidator(v) -} - -// UnregisterValidator removes the validator associated to this topic -// It delegates the functionality to unregisterValidator function pointer -func (t *Topic) UnregisterValidator() error { - if t.UnregisterTopicValidator == nil { - return errors.New("can not delegate unregistration to parent") - } - - return t.UnregisterTopicValidator() -} - -// SendRequest sends the hash to all known peers that subscribed to the channel [t.Name]_REQUEST -// It delegates the functionality to sendRequest function pointer -// The object, if exists, should return on the main event bus (regular topic channel) -func (t *Topic) SendRequest(hash []byte) error { - if hash == nil { - return errors.New("invalid hash to send") - } - - if len(hash) == 0 { - return errors.New("invalid hash to send") - } - - if t.Request == nil { - return errors.New("can not delegate request to parent") - } - - return t.Request(hash) -} - -// Name returns the topic name -func (t *Topic) Name() string { - return t.name -} diff --git a/p2p/topic_test.go b/p2p/topic_test.go deleted file mode 100644 index cb5395a5a57..00000000000 --- a/p2p/topic_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package p2p_test - -import ( - "bytes" - "fmt" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p/mock" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" -) - -type testTopicStringCreator struct { - Data string -} - -// New will return a new instance of string. Dummy, just to implement Cloner interface as strings are immutable -func (sc *testTopicStringCreator) Create() p2p.Creator { - return &testTopicStringCreator{} -} - -// ID will return the same string as ID -func (sc *testTopicStringCreator) ID() string { - return sc.Data -} - -func TestTopic_AddEventHandlerNilShouldNotAddHandler(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - topic.AddDataReceived(nil) - - assert.Equal(t, len(topic.EventBusData()), 0) -} - -func TestTopic_AddEventHandlerWithArealFuncShouldWork(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - topic.AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - - }) - - assert.Equal(t, len(topic.EventBusData()), 1) -} - -func TestTopic_CreateObjectNilDataShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - _, err := topic.CreateObject(nil) - - assert.NotNil(t, err) -} - -func TestTopic_CreateObjectEmptyDataShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - _, err := topic.CreateObject(make([]byte, 0)) - - assert.NotNil(t, err) -} - -func TestTopic_CreateObjectMarshalizerFailsShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, marshalizer) - - marshalizer.Fail = true - - _, err := topic.CreateObject(make([]byte, 1)) - - assert.NotNil(t, err) -} - -func TestTopic_NewObjReceivedNilObjShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.NewObjReceived(nil, "") - - assert.NotNil(t, err) -} - -func TestTopic_NewObjReceivedOKMsgShouldWork(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - wg := sync.WaitGroup{} - wg.Add(1) - - cnt := int32(0) - //attach event handler - topic.AddDataReceived(func(name string, data interface{}, msgInfo *p2p.MessageInfo) { - assert.Equal(t, name, "test") - - switch data.(type) { - case p2p.Creator: - atomic.AddInt32(&cnt, 1) - default: - assert.Fail(t, "The data should have been string!") - } - - wg.Done() - - }) - - marsh := mock.MarshalizerMock{} - payload, err := marsh.Marshal(&testTopicStringCreator{Data: "aaaa"}) - assert.Nil(t, err) - - obj, err := topic.CreateObject(payload) - assert.Nil(t, err) - err = topic.NewObjReceived(obj, "") - assert.Nil(t, err) - - //start a go routine as watchdog for the wg.Wait() - go func() { - time.Sleep(time.Second * 2) - wg.Done() - }() - - //wait for the go routine to finish - wg.Wait() - - assert.Equal(t, atomic.LoadInt32(&cnt), int32(1)) -} - -//------- Broadcast - -func TestTopic_BroadcastNilDataShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.Broadcast(nil) - - assert.NotNil(t, err) -} - -func TestTopic_BroadcastMarshalizerFailsShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, marshalizer) - - topic.SendData = func(data []byte) error { - return nil - } - - marshalizer.Fail = true - - err := topic.Broadcast("a string") - - assert.NotNil(t, err) -} - -func TestTopic_BroadcastNoOneToSendShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.Broadcast("a string") - - assert.NotNil(t, err) -} - -func TestTopic_BroadcastSendOkShouldWork(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - topic.SendData = func(data []byte) error { - if topic.Name() != "test" { - return errors.New("should have been test") - } - - if data == nil { - return errors.New("should have not been nil") - } - - fmt.Printf("Message: %v\n", data) - return nil - } - - err := topic.Broadcast("a string") - assert.Nil(t, err) -} - -//------- BroadcastBuff - -func TestTopic_BroadcastBuffNilDataShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.BroadcastBuff(nil) - - assert.NotNil(t, err) -} - -func TestTopic_BroadcastBuffNoOneToSendShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.BroadcastBuff([]byte("a string")) - - assert.NotNil(t, err) -} - -func TestTopic_BroadcastBuffSendOkShouldWork(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - topic := p2p.NewTopic("test", &testTopicStringCreator{}, marshalizer) - - topic.SendData = func(data []byte) error { - if topic.Name() != "test" { - return errors.New("should have been test") - } - - if data == nil { - return errors.New("should have not been nil") - } - - fmt.Printf("Message: %v\n", string(data)) - return nil - } - - buff, err := marshalizer.Marshal(testTopicStringCreator{"AAA"}) - assert.Nil(t, err) - - err = topic.BroadcastBuff(buff) - assert.Nil(t, err) -} - -//------- SendRequest - -func TestTopic_SendRequestNilHashShouldRetErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - err := topic.SendRequest(nil) - - assert.NotNil(t, err) -} - -func TestTopic_SendRequestEmptyHashShouldRetErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - err := topic.SendRequest(make([]byte, 0)) - - assert.NotNil(t, err) -} - -func TestTopic_SendRequestNoHandlerShouldRetErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - err := topic.SendRequest(make([]byte, 1)) - - assert.NotNil(t, err) -} - -func TestTopic_SendRequestShouldWork(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - topic.Request = func(hash []byte) error { - if bytes.Equal(hash, []byte("AAAA")) { - return nil - } - - return errors.New("should have not got here") - } - err := topic.SendRequest([]byte("AAAA")) - - assert.Nil(t, err) -} - -//------- RegisterValidator - -func TestTopic_RegisterValidatorNoHandlerShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.RegisterValidator(nil) - assert.NotNil(t, err) -} - -func TestTopic_RegisterValidatorShouldWork(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - err := topic.RegisterValidator(nil) - assert.Nil(t, err) -} - -//------- UnregisterValidator - -func TestTopic_UnregisterValidatorNoHandlerShouldErr(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - err := topic.UnregisterValidator() - assert.NotNil(t, err) -} - -func TestTopic_UnregisterValidatorShouldWork(t *testing.T) { - t.Parallel() - - topic := p2p.NewTopic("test", &testTopicStringCreator{}, &mock.MarshalizerMock{}) - - topic.UnregisterTopicValidator = func() error { - return nil - } - - err := topic.UnregisterValidator() - assert.Nil(t, err) -} - -//------- Benchmarks - -type benchmark struct { - field1 []byte - field2 []byte - field3 []byte - field4 []byte - field5 []byte - field6 []byte - field7 uint64 - field8 uint64 - field9 uint64 - field10 int64 - field11 int64 - field12 string - field13 string - field14 string -} - -func BenchmarkTopic_NewObjectCreationPlainInit(b *testing.B) { - obj1 := benchmark{} - - for i := 0; i < b.N; i++ { - obj1 = benchmark{} - } - - obj1.field1 = make([]byte, 0) -} - -func BenchmarkTopic_NewObjectCreationReflectionNew(b *testing.B) { - obj1 := benchmark{} - - for i := 0; i < b.N; i++ { - reflect.New(reflect.TypeOf(obj1)).Interface() - } -} diff --git a/process/block/export_test.go b/process/block/export_test.go index 4f2c0a8f18e..978bcfa0e16 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -5,8 +5,6 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) @@ -26,22 +24,6 @@ func (bp *blockProcessor) ReceivedTransaction(txHash []byte) { bp.receivedTransaction(txHash) } -func (hi *HeaderInterceptor) ProcessHdr(hdr p2p.Creator, rawData []byte) error { - return hi.processHdr(hdr, rawData) -} - -func (gbbi *GenericBlockBodyInterceptor) ProcessBodyBlock(bodyBlock p2p.Creator, rawData []byte) error { - return gbbi.processBodyBlock(bodyBlock, rawData) -} - -func (hdrRes *HeaderResolver) ResolveHdrRequest(rd process.RequestData) ([]byte, error) { - return hdrRes.resolveHdrRequest(rd) -} - -func (gbbRes *GenericBlockBodyResolver) ResolveBlockBodyRequest(rd process.RequestData) ([]byte, error) { - return gbbRes.resolveBlockBodyRequest(rd) -} - func (bp *blockProcessor) ComputeHeaderHash(hdr *block.Header) ([]byte, error) { return bp.computeHeaderHash(hdr) } diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index dcba2dfcdc3..7b505ab5312 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -3,7 +3,6 @@ package block import ( "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -34,16 +33,6 @@ func (inHdr *InterceptedHeader) Hash() []byte { return inHdr.hash } -// Create returns a new instance of this struct (used in topics) -func (inHdr *InterceptedHeader) Create() p2p.Creator { - return NewInterceptedHeader(inHdr.multiSigVerifier) -} - -// ID returns the ID of this object. Set to return the hash of the header -func (inHdr *InterceptedHeader) ID() string { - return string(inHdr.hash) -} - // Shard returns the shard ID for which this header is addressed func (inHdr *InterceptedHeader) Shard() uint32 { return inHdr.ShardId diff --git a/process/block/interceptedBlockHeader_test.go b/process/block/interceptedBlockHeader_test.go index c93f94fbb9f..78e6131567c 100644 --- a/process/block/interceptedBlockHeader_test.go +++ b/process/block/interceptedBlockHeader_test.go @@ -38,7 +38,7 @@ func TestInterceptedHeader_GetHeaderShouldReturnHeader(t *testing.T) { assert.True(t, hdr.GetHeader() == hdr.Header) } -func TestInterceptedHeader_GetterSetterHashID(t *testing.T) { +func TestInterceptedHeader_GetterSetterHash(t *testing.T) { t.Parallel() hash := []byte("hash") @@ -48,7 +48,6 @@ func TestInterceptedHeader_GetterSetterHashID(t *testing.T) { hdr.SetHash(hash) assert.Equal(t, hash, hdr.Hash()) - assert.Equal(t, string(hash), hdr.ID()) } func TestInterceptedHeader_ShardShouldWork(t *testing.T) { @@ -63,28 +62,6 @@ func TestInterceptedHeader_ShardShouldWork(t *testing.T) { assert.Equal(t, shard, hdr.Shard()) } -func TestInterceptedHeader_CreateShouldNotProduceNils(t *testing.T) { - t.Parallel() - - multiSig := mock.NewMultiSigner() - hdr := block.NewInterceptedHeader(multiSig) - hdrCreated := hdr.Create() - - assert.NotNil(t, hdrCreated) - assert.NotNil(t, hdrCreated.(*block.InterceptedHeader).Header) -} - -func TestInterceptedHeader_CreateShouldNotProduceSameObject(t *testing.T) { - t.Parallel() - - multiSig := mock.NewMultiSigner() - hdr := block.NewInterceptedHeader(multiSig) - hdrCreated := hdr.Create() - - assert.False(t, hdrCreated == hdr) - assert.False(t, hdrCreated.(*block.InterceptedHeader).Header == hdr.Header) -} - func TestInterceptedHeader_IntegrityNilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() diff --git a/process/block/interceptedPeerBlock.go b/process/block/interceptedPeerBlock.go index 9e5f8850a6c..0166f955bf4 100644 --- a/process/block/interceptedPeerBlock.go +++ b/process/block/interceptedPeerBlock.go @@ -2,7 +2,6 @@ package block import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -30,16 +29,6 @@ func (inPeerBlkBdy *InterceptedPeerBlockBody) Hash() []byte { return inPeerBlkBdy.hash } -// Create returns a new instance of this struct (used in topics) -func (inPeerBlkBdy *InterceptedPeerBlockBody) Create() p2p.Creator { - return NewInterceptedPeerBlockBody() -} - -// ID returns the ID of this object. Set to return the hash of the peer block body -func (inPeerBlkBdy *InterceptedPeerBlockBody) ID() string { - return string(inPeerBlkBdy.hash) -} - // Shard returns the shard ID for which this body is addressed func (inPeerBlkBdy *InterceptedPeerBlockBody) Shard() uint32 { return inPeerBlkBdy.ShardID diff --git a/process/block/interceptedPeerBlock_test.go b/process/block/interceptedPeerBlock_test.go index 3a9887ace4e..d9450ac8fc2 100644 --- a/process/block/interceptedPeerBlock_test.go +++ b/process/block/interceptedPeerBlock_test.go @@ -27,7 +27,7 @@ func TestInterceptedPeerBlockBody_GetUnderlingObjectShouldReturnBlock(t *testing assert.True(t, peerBlockBody.GetUnderlyingObject() == peerBlockBody.PeerBlockBody) } -func TestInterceptedPeerBlockBody_GetterSetterHashID(t *testing.T) { +func TestInterceptedPeerBlockBody_GetterSetterHash(t *testing.T) { t.Parallel() hash := []byte("hash") @@ -36,7 +36,6 @@ func TestInterceptedPeerBlockBody_GetterSetterHashID(t *testing.T) { peerBlockBody.SetHash(hash) assert.Equal(t, hash, peerBlockBody.Hash()) - assert.Equal(t, string(hash), peerBlockBody.ID()) } func TestInterceptedPeerBlockBody_ShardShouldWork(t *testing.T) { @@ -50,26 +49,6 @@ func TestInterceptedPeerBlockBody_ShardShouldWork(t *testing.T) { assert.Equal(t, shard, peerBlockBody.Shard()) } -func TestInterceptedPeerBlockBody_CreateShouldNotProduceNils(t *testing.T) { - t.Parallel() - - peerBlockBody := block.NewInterceptedPeerBlockBody() - peerBlockCreated := peerBlockBody.Create() - - assert.NotNil(t, peerBlockCreated) - assert.NotNil(t, peerBlockCreated.(*block.InterceptedPeerBlockBody).PeerBlockBody) -} - -func TestInterceptedPeerBlockBody_CreateShouldNotProduceSameObject(t *testing.T) { - t.Parallel() - - peerBlockBody := block.NewInterceptedPeerBlockBody() - peerBlockCreated := peerBlockBody.Create() - - assert.False(t, peerBlockBody == peerBlockCreated) - assert.False(t, peerBlockCreated.(*block.InterceptedPeerBlockBody).PeerBlockBody == peerBlockBody.PeerBlockBody) -} - func TestInterceptedPeerBlockBody_IntegrityInvalidStateBlockShouldErr(t *testing.T) { t.Parallel() diff --git a/process/block/interceptedStateBlock.go b/process/block/interceptedStateBlock.go index 91abf441fd9..4d62cc910bc 100644 --- a/process/block/interceptedStateBlock.go +++ b/process/block/interceptedStateBlock.go @@ -2,7 +2,6 @@ package block import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -30,16 +29,6 @@ func (inStateBlkBdy *InterceptedStateBlockBody) Hash() []byte { return inStateBlkBdy.hash } -// Create returns a new instance of this struct (used in topics) -func (inStateBlkBdy *InterceptedStateBlockBody) Create() p2p.Creator { - return NewInterceptedStateBlockBody() -} - -// ID returns the ID of this object. Set to return the hash of the state block body -func (inStateBlkBdy *InterceptedStateBlockBody) ID() string { - return string(inStateBlkBdy.hash) -} - // Shard returns the shard ID for which this body is addressed func (inStateBlkBdy *InterceptedStateBlockBody) Shard() uint32 { return inStateBlkBdy.ShardID diff --git a/process/block/interceptedStateBlock_test.go b/process/block/interceptedStateBlock_test.go index 1b88ac390e3..d71430f3af0 100644 --- a/process/block/interceptedStateBlock_test.go +++ b/process/block/interceptedStateBlock_test.go @@ -26,7 +26,7 @@ func TestInterceptedStateBlockBody_GetUnderlingObjectShouldReturnBlock(t *testin assert.True(t, stateBlockBody.GetUnderlyingObject() == stateBlockBody.StateBlockBody) } -func TestInterceptedStateBlockBody_GetterSetterHashID(t *testing.T) { +func TestInterceptedStateBlockBody_GetterSetterHash(t *testing.T) { t.Parallel() hash := []byte("hash") @@ -35,7 +35,6 @@ func TestInterceptedStateBlockBody_GetterSetterHashID(t *testing.T) { stateBlockBody.SetHash(hash) assert.Equal(t, hash, stateBlockBody.Hash()) - assert.Equal(t, string(hash), stateBlockBody.ID()) } func TestInterceptedStateBlockBody_ShardShouldWork(t *testing.T) { @@ -49,27 +48,6 @@ func TestInterceptedStateBlockBody_ShardShouldWork(t *testing.T) { assert.Equal(t, shard, stateBlockBody.Shard()) } -func TestInterceptedStateBlockBody_CreateShouldNotProduceNils(t *testing.T) { - t.Parallel() - - stateBlockBody := block.NewInterceptedStateBlockBody() - stateBlockCreated := stateBlockBody.Create() - - assert.NotNil(t, stateBlockCreated) - assert.NotNil(t, stateBlockCreated.(*block.InterceptedStateBlockBody).StateBlockBody) -} - -func TestInterceptedStateBlockBody_CreateShouldNotProduceSameObject(t *testing.T) { - t.Parallel() - - stateBlockBody := block.NewInterceptedStateBlockBody() - stateBlockCreated := stateBlockBody.Create() - - assert.False(t, stateBlockBody == stateBlockCreated) - assert.False(t, stateBlockCreated.(*block.InterceptedStateBlockBody).StateBlockBody == - stateBlockBody.StateBlockBody) -} - func TestInterceptedStateBlockBody_IntegrityNilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() diff --git a/process/block/interceptedTxBlock.go b/process/block/interceptedTxBlock.go index 0068204a438..07215e99692 100644 --- a/process/block/interceptedTxBlock.go +++ b/process/block/interceptedTxBlock.go @@ -2,7 +2,6 @@ package block import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -30,16 +29,6 @@ func (inTxBlkBdy *InterceptedTxBlockBody) Hash() []byte { return inTxBlkBdy.hash } -// Create returns a new instance of this struct (used in topics) -func (inTxBlkBdy *InterceptedTxBlockBody) Create() p2p.Creator { - return NewInterceptedTxBlockBody() -} - -// ID returns the ID of this object. Set to return the hash of the transaction block body -func (inTxBlkBdy *InterceptedTxBlockBody) ID() string { - return string(inTxBlkBdy.hash) -} - // Shard returns the shard ID for which this body is addressed func (inTxBlkBdy *InterceptedTxBlockBody) Shard() uint32 { return inTxBlkBdy.ShardID diff --git a/process/block/interceptedTxBlock_test.go b/process/block/interceptedTxBlock_test.go index e9afa1d5b16..a19233d6b1f 100644 --- a/process/block/interceptedTxBlock_test.go +++ b/process/block/interceptedTxBlock_test.go @@ -27,7 +27,7 @@ func TestInterceptedTxBlockBody_GetUnderlingObjectShouldReturnBlock(t *testing.T assert.True(t, txBlockBody.GetUnderlyingObject() == txBlockBody.TxBlockBody) } -func TestInterceptedTxBlockBody_GetterSetterHashID(t *testing.T) { +func TestInterceptedTxBlockBody_GetterSetterHash(t *testing.T) { t.Parallel() hash := []byte("hash") @@ -36,7 +36,6 @@ func TestInterceptedTxBlockBody_GetterSetterHashID(t *testing.T) { txBlockBody.SetHash(hash) assert.Equal(t, hash, txBlockBody.Hash()) - assert.Equal(t, string(hash), txBlockBody.ID()) } func TestInterceptedTxBlockBody_ShardShouldWork(t *testing.T) { @@ -50,27 +49,6 @@ func TestInterceptedTxBlockBody_ShardShouldWork(t *testing.T) { assert.Equal(t, shard, txBlockBody.Shard()) } -func TestInterceptedTxBlockBody_CreateShouldNotProduceNils(t *testing.T) { - t.Parallel() - - txBlockBody := block.NewInterceptedTxBlockBody() - txBlockCreated := txBlockBody.Create() - - assert.NotNil(t, txBlockCreated) - assert.NotNil(t, txBlockCreated.(*block.InterceptedTxBlockBody).TxBlockBody) -} - -func TestInterceptedTxBlockBody_CreateShouldNotProduceSameObject(t *testing.T) { - t.Parallel() - - txBlockBody := block.NewInterceptedTxBlockBody() - txBlockCreated := txBlockBody.Create() - - assert.False(t, txBlockBody == txBlockCreated) - assert.False(t, txBlockCreated.(*block.InterceptedTxBlockBody).TxBlockBody == - txBlockBody.TxBlockBody) -} - func TestInterceptedTxBlockBody_IntegrityInvalidStateBlockShouldErr(t *testing.T) { t.Parallel() diff --git a/process/block/interceptors.go b/process/block/interceptors.go deleted file mode 100644 index e45a9676d38..00000000000 --- a/process/block/interceptors.go +++ /dev/null @@ -1,214 +0,0 @@ -package block - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/crypto" - "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/sharding" - "github.com/ElrondNetwork/elrond-go-sandbox/storage" -) - -// HeaderInterceptor represents an interceptor used for block headers -type HeaderInterceptor struct { - process.Interceptor - headers data.ShardedDataCacherNotifier - storer storage.Storer - headersNonces data.Uint64Cacher - multiSigVerifier crypto.MultiSigVerifier - hasher hashing.Hasher - shardCoordinator sharding.ShardCoordinator -} - -// GenericBlockBodyInterceptor represents an interceptor used for all types of block bodies -type GenericBlockBodyInterceptor struct { - process.Interceptor - cache storage.Cacher - hasher hashing.Hasher - storer storage.Storer - shardCoordinator sharding.ShardCoordinator -} - -//------- HeaderInterceptor - -// NewHeaderInterceptor hooks a new interceptor for block headers -// Fetched block headers will be placed in a data pool -func NewHeaderInterceptor( - interceptor process.Interceptor, - headers data.ShardedDataCacherNotifier, - headersNonces data.Uint64Cacher, - storer storage.Storer, - multiSigVerifier crypto.MultiSigVerifier, - hasher hashing.Hasher, - shardCoordinator sharding.ShardCoordinator, -) (*HeaderInterceptor, error) { - - if interceptor == nil { - return nil, process.ErrNilInterceptor - } - - if headers == nil { - return nil, process.ErrNilHeadersDataPool - } - - if headersNonces == nil { - return nil, process.ErrNilHeadersNoncesDataPool - } - - if storer == nil { - return nil, process.ErrNilHeadersStorage - } - - if multiSigVerifier == nil { - return nil, process.ErrNilMultiSigVerifier - } - - if hasher == nil { - return nil, process.ErrNilHasher - } - - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - hdrIntercept := &HeaderInterceptor{ - Interceptor: interceptor, - headers: headers, - headersNonces: headersNonces, - storer: storer, - multiSigVerifier: multiSigVerifier, - hasher: hasher, - shardCoordinator: shardCoordinator, - } - - interceptor.SetCheckReceivedObjectHandler(hdrIntercept.processHdr) - - return hdrIntercept, nil -} - -func (hi *HeaderInterceptor) processHdr(hdr p2p.Creator, rawData []byte) error { - if hdr == nil { - return process.ErrNilBlockHeader - } - - if rawData == nil { - return process.ErrNilDataToProcess - } - - hdrIntercepted, ok := hdr.(process.HeaderInterceptorAdapter) - - if !ok { - return process.ErrBadInterceptorTopicImplementation - } - - hashWithSig := hi.hasher.Compute(string(rawData)) - hdrIntercepted.SetHash(hashWithSig) - - err := hdrIntercepted.IntegrityAndValidity(hi.shardCoordinator) - if err != nil { - return err - } - - err = hdrIntercepted.VerifySig() - if err != nil { - return err - } - - isHeaderInStorage, _ := hi.storer.Has(hashWithSig) - - if isHeaderInStorage { - log.Debug("intercepted block header already processed") - return nil - } - - hi.headers.AddData(hashWithSig, hdrIntercepted.GetHeader(), hdrIntercepted.Shard()) - if hi.checkHeaderForCurrentShard(hdrIntercepted) { - _, _ = hi.headersNonces.HasOrAdd(hdrIntercepted.GetHeader().Nonce, hashWithSig) - } - return nil -} - -func (hi *HeaderInterceptor) checkHeaderForCurrentShard(header process.HeaderInterceptorAdapter) bool { - //TODO add real logic here - return true -} - -//------- GenericBlockBodyInterceptor - -// NewGenericBlockBodyInterceptor hooks a new interceptor for block bodies -// Fetched data blocks will be placed inside the cache -func NewGenericBlockBodyInterceptor( - interceptor process.Interceptor, - cache storage.Cacher, - storer storage.Storer, - hasher hashing.Hasher, - shardCoordinator sharding.ShardCoordinator, -) (*GenericBlockBodyInterceptor, error) { - - if interceptor == nil { - return nil, process.ErrNilInterceptor - } - - if cache == nil { - return nil, process.ErrNilCacher - } - - if storer == nil { - return nil, process.ErrNilBlockBodyStorage - } - - if hasher == nil { - return nil, process.ErrNilHasher - } - - if shardCoordinator == nil { - return nil, process.ErrNilShardCoordinator - } - - bbIntercept := &GenericBlockBodyInterceptor{ - Interceptor: interceptor, - cache: cache, - storer: storer, - hasher: hasher, - shardCoordinator: shardCoordinator, - } - - interceptor.SetCheckReceivedObjectHandler(bbIntercept.processBodyBlock) - - return bbIntercept, nil -} - -func (gbbi *GenericBlockBodyInterceptor) processBodyBlock(bodyBlock p2p.Creator, rawData []byte) error { - if bodyBlock == nil { - return process.ErrNilBlockBody - } - - if rawData == nil { - return process.ErrNilDataToProcess - } - - blockBodyIntercepted, ok := bodyBlock.(process.BlockBodyInterceptorAdapter) - - if !ok { - return process.ErrBadInterceptorTopicImplementation - } - - hash := gbbi.hasher.Compute(string(rawData)) - blockBodyIntercepted.SetHash(hash) - - err := blockBodyIntercepted.IntegrityAndValidity(gbbi.shardCoordinator) - if err != nil { - return err - } - - isBlockInStorage, _ := gbbi.storer.Has(hash) - - if isBlockInStorage { - log.Debug("intercepted block body already processed") - return nil - } - - _ = gbbi.cache.Put(hash, blockBodyIntercepted.GetUnderlyingObject()) - return nil -} diff --git a/process/block/interceptors/genericBlockBodyInterceptor.go b/process/block/interceptors/genericBlockBodyInterceptor.go new file mode 100644 index 00000000000..cbd74ea199d --- /dev/null +++ b/process/block/interceptors/genericBlockBodyInterceptor.go @@ -0,0 +1,81 @@ +package interceptors + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// GenericBlockBodyInterceptor represents an interceptor used for all types of block bodies +type GenericBlockBodyInterceptor struct { + *messageChecker + marshalizer marshal.Marshalizer + cache storage.Cacher + hasher hashing.Hasher + storer storage.Storer + shardCoordinator sharding.ShardCoordinator +} + +// NewGenericBlockBodyInterceptor hooks a new interceptor for block bodies +// Fetched data blocks will be placed inside the cache +func NewGenericBlockBodyInterceptor( + marshalizer marshal.Marshalizer, + cache storage.Cacher, + storer storage.Storer, + hasher hashing.Hasher, + shardCoordinator sharding.ShardCoordinator, +) (*GenericBlockBodyInterceptor, error) { + + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + if cache == nil { + return nil, process.ErrNilCacher + } + + if storer == nil { + return nil, process.ErrNilBlockBodyStorage + } + + if hasher == nil { + return nil, process.ErrNilHasher + } + + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + bbIntercept := &GenericBlockBodyInterceptor{ + messageChecker: &messageChecker{}, + marshalizer: marshalizer, + cache: cache, + storer: storer, + hasher: hasher, + shardCoordinator: shardCoordinator, + } + + return bbIntercept, nil +} + +func (gbbi *GenericBlockBodyInterceptor) processBlockBody(messageData []byte, body process.InterceptedBlockBody) error { + hash := gbbi.hasher.Compute(string(messageData)) + body.SetHash(hash) + + err := body.IntegrityAndValidity(gbbi.shardCoordinator) + if err != nil { + return err + } + + isBlockInStorage, _ := gbbi.storer.Has(hash) + + if isBlockInStorage { + log.Debug("intercepted block body already processed") + return nil + } + + _ = gbbi.cache.Put(hash, body.GetUnderlyingObject()) + return nil +} diff --git a/process/block/interceptors/genericBlockBodyInterceptor_test.go b/process/block/interceptors/genericBlockBodyInterceptor_test.go new file mode 100644 index 00000000000..81592ec7504 --- /dev/null +++ b/process/block/interceptors/genericBlockBodyInterceptor_test.go @@ -0,0 +1,110 @@ +package interceptors_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewGenericBlockBodyInterceptor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + gbbi, err := interceptors.NewGenericBlockBodyInterceptor( + nil, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, gbbi) +} + +func TestNewGenericBlockBodyInterceptor_NilPoolShouldErr(t *testing.T) { + t.Parallel() + + storer := &mock.StorerStub{} + + gbbi, err := interceptors.NewGenericBlockBodyInterceptor( + &mock.MarshalizerMock{}, + nil, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilCacher, err) + assert.Nil(t, gbbi) +} + +func TestNewGenericBlockBodyInterceptor_NilStorerShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + + gbbi, err := interceptors.NewGenericBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + nil, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilBlockBodyStorage, err) + assert.Nil(t, gbbi) +} + +func TestNewGenericBlockBodyInterceptor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + gbbi, err := interceptors.NewGenericBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + nil, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, gbbi) +} + +func TestNewGenericBlockBodyInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + gbbi, err := interceptors.NewGenericBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + nil) + + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, gbbi) +} + +func TestNewGenericBlockBodyInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + gbbi, err := interceptors.NewGenericBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Nil(t, err) + assert.NotNil(t, gbbi) +} diff --git a/process/block/interceptors/headerInterceptor.go b/process/block/interceptors/headerInterceptor.go new file mode 100644 index 00000000000..ea4fdb64c2c --- /dev/null +++ b/process/block/interceptors/headerInterceptor.go @@ -0,0 +1,123 @@ +package interceptors + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/crypto" + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// HeaderInterceptor represents an interceptor used for block headers +type HeaderInterceptor struct { + *messageChecker + marshalizer marshal.Marshalizer + headers data.ShardedDataCacherNotifier + storer storage.Storer + headersNonces data.Uint64Cacher + multiSigVerifier crypto.MultiSigVerifier + hasher hashing.Hasher + shardCoordinator sharding.ShardCoordinator +} + +// NewHeaderInterceptor hooks a new interceptor for block headers +// Fetched block headers will be placed in a data pool +func NewHeaderInterceptor( + marshalizer marshal.Marshalizer, + headers data.ShardedDataCacherNotifier, + headersNonces data.Uint64Cacher, + storer storage.Storer, + multiSigVerifier crypto.MultiSigVerifier, + hasher hashing.Hasher, + shardCoordinator sharding.ShardCoordinator, +) (*HeaderInterceptor, error) { + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + if headers == nil { + return nil, process.ErrNilHeadersDataPool + } + + if headersNonces == nil { + return nil, process.ErrNilHeadersNoncesDataPool + } + + if storer == nil { + return nil, process.ErrNilHeadersStorage + } + + if multiSigVerifier == nil { + return nil, process.ErrNilMultiSigVerifier + } + + if hasher == nil { + return nil, process.ErrNilHasher + } + + if shardCoordinator == nil { + return nil, process.ErrNilShardCoordinator + } + + hdrIntercept := &HeaderInterceptor{ + messageChecker: &messageChecker{}, + marshalizer: marshalizer, + headers: headers, + headersNonces: headersNonces, + storer: storer, + multiSigVerifier: multiSigVerifier, + hasher: hasher, + shardCoordinator: shardCoordinator, + } + + return hdrIntercept, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (hi *HeaderInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + err := hi.checkMessage(message) + if err != nil { + return err + } + + hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier) + err = hi.marshalizer.Unmarshal(hdrIntercepted, message.Data()) + if err != nil { + return err + } + + hashWithSig := hi.hasher.Compute(string(message.Data())) + hdrIntercepted.SetHash(hashWithSig) + + err = hdrIntercepted.IntegrityAndValidity(hi.shardCoordinator) + if err != nil { + return err + } + + err = hdrIntercepted.VerifySig() + if err != nil { + return err + } + + isHeaderInStorage, _ := hi.storer.Has(hashWithSig) + + if isHeaderInStorage { + log.Debug("intercepted block header already processed") + return nil + } + + hi.headers.AddData(hashWithSig, hdrIntercepted.GetHeader(), hdrIntercepted.Shard()) + if hi.checkHeaderForCurrentShard(hdrIntercepted) { + _, _ = hi.headersNonces.HasOrAdd(hdrIntercepted.GetHeader().Nonce, hashWithSig) + } + return nil +} + +func (hi *HeaderInterceptor) checkHeaderForCurrentShard(header *block.InterceptedHeader) bool { + return hi.shardCoordinator.ShardForCurrentNode() == header.GetHeader().ShardId +} diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go new file mode 100644 index 00000000000..b8c648f85ea --- /dev/null +++ b/process/block/interceptors/headerInterceptor_test.go @@ -0,0 +1,395 @@ +package interceptors_test + +import ( + "bytes" + "errors" + "testing" + + block2 "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewHeaderInterceptor + +func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + nil, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, hi) +} + +func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { + t.Parallel() + + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + nil, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilHeadersDataPool, err) + assert.Nil(t, hi) +} + +func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + nil, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) + assert.Nil(t, hi) +} + +func TestNewHeaderInterceptor_NilStorerShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + nil, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilHeadersStorage, err) + assert.Nil(t, hi) +} + +func TestNewHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + storer, + nil, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Nil(t, hi) + assert.Equal(t, process.ErrNilMultiSigVerifier, err) +} + +func TestNewHeaderInterceptor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + nil, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilHasher, err) + assert.Nil(t, hi) +} + +func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + nil) + + assert.Equal(t, process.ErrNilShardCoordinator, err) + assert.Nil(t, hi) +} + +func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, err := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Nil(t, err) + assert.NotNil(t, hi) +} + +//------- ProcessReceivedMessage + +func TestHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, _ := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMessage, hi.ProcessReceivedMessage(nil)) +} + +func TestHeaderInterceptor_ProcessReceivedMessageNilDataToProcessShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, _ := interceptors.NewHeaderInterceptor( + &mock.MarshalizerMock{}, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{} + + assert.Equal(t, process.ErrNilDataToProcess, hi.ProcessReceivedMessage(msg)) +} + +func TestHeaderInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnmarshalingShouldErr(t *testing.T) { + t.Parallel() + + errMarshalizer := errors.New("marshalizer error") + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + + hi, _ := interceptors.NewHeaderInterceptor( + &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return errMarshalizer + }, + }, + headers, + headersNonces, + storer, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{ + DataField: make([]byte, 0), + } + + assert.Equal(t, errMarshalizer, hi.ProcessReceivedMessage(msg)) +} + +func TestHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShouldErr(t *testing.T) { + t.Parallel() + + headers := &mock.ShardedDataStub{} + headersNonces := &mock.Uint64CacherStub{} + storer := &mock.StorerStub{} + marshalizer := &mock.MarshalizerMock{} + multisigner := mock.NewMultiSigner() + + hi, _ := interceptors.NewHeaderInterceptor( + marshalizer, + headers, + headersNonces, + storer, + multisigner, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + hdr := block.NewInterceptedHeader(multisigner) + buff, _ := marshalizer.Marshal(hdr) + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + assert.Equal(t, process.ErrNilBlockBodyHash, hi.ProcessReceivedMessage(msg)) +} + +func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + wasCalled := 0 + + testedNonce := uint64(67) + + headers := &mock.ShardedDataStub{} + multisigner := mock.NewMultiSigner() + + headersNonces := &mock.Uint64CacherStub{} + headersNonces.HasOrAddCalled = func(u uint64, i []byte) (b bool, b2 bool) { + if u == testedNonce { + wasCalled++ + } + + return + } + + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return false, nil + } + + hi, _ := interceptors.NewHeaderInterceptor( + marshalizer, + headers, + headersNonces, + storer, + multisigner, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + hdr := block.NewInterceptedHeader(multisigner) + hdr.Nonce = testedNonce + hdr.ShardId = 0 + hdr.PrevHash = make([]byte, 0) + hdr.PubKeysBitmap = make([]byte, 0) + hdr.BlockBodyHash = make([]byte, 0) + hdr.BlockBodyType = block2.TxBlock + hdr.Signature = make([]byte, 0) + hdr.Commitment = make([]byte, 0) + hdr.SetHash([]byte("aaa")) + + buff, _ := marshalizer.Marshal(hdr) + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + headers.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { + aaaHash := mock.HasherMock{}.Compute(string(buff)) + if bytes.Equal(aaaHash, key) { + wasCalled++ + } + } + + assert.Nil(t, hi.ProcessReceivedMessage(msg)) + assert.Equal(t, 2, wasCalled) +} + +func TestHeaderInterceptor_ProcessReceivedMessageIsInStorageShouldNotAdd(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + wasCalled := 0 + + testedNonce := uint64(67) + + headers := &mock.ShardedDataStub{} + multisigner := mock.NewMultiSigner() + + headersNonces := &mock.Uint64CacherStub{} + headersNonces.HasOrAddCalled = func(u uint64, i []byte) (b bool, b2 bool) { + if u == testedNonce { + wasCalled++ + } + + return + } + + storer := &mock.StorerStub{} + storer.HasCalled = func(key []byte) (bool, error) { + return true, nil + } + + hi, _ := interceptors.NewHeaderInterceptor( + marshalizer, + headers, + headersNonces, + storer, + multisigner, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + hdr := block.NewInterceptedHeader(multisigner) + hdr.Nonce = testedNonce + hdr.ShardId = 0 + hdr.PrevHash = make([]byte, 0) + hdr.PubKeysBitmap = make([]byte, 0) + hdr.BlockBodyHash = make([]byte, 0) + hdr.BlockBodyType = block2.TxBlock + hdr.Signature = make([]byte, 0) + hdr.Commitment = make([]byte, 0) + hdr.SetHash([]byte("aaa")) + + buff, _ := marshalizer.Marshal(hdr) + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + headers.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { + aaaHash := mock.HasherMock{}.Compute(string(buff)) + if bytes.Equal(aaaHash, key) { + wasCalled++ + } + } + + assert.Nil(t, hi.ProcessReceivedMessage(msg)) + assert.Equal(t, 0, wasCalled) +} diff --git a/process/block/interceptors/messageChecker.go b/process/block/interceptors/messageChecker.go new file mode 100644 index 00000000000..ebfe8f8d918 --- /dev/null +++ b/process/block/interceptors/messageChecker.go @@ -0,0 +1,24 @@ +package interceptors + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/logger" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +var log = logger.NewDefaultLogger() + +type messageChecker struct { +} + +func (*messageChecker) checkMessage(message p2p.MessageP2P) error { + if message == nil { + return process.ErrNilMessage + } + + if message.Data() == nil { + return process.ErrNilDataToProcess + } + + return nil +} diff --git a/process/block/interceptors/peerBlockBodyInterceptor.go b/process/block/interceptors/peerBlockBodyInterceptor.go new file mode 100644 index 00000000000..b093e794031 --- /dev/null +++ b/process/block/interceptors/peerBlockBodyInterceptor.go @@ -0,0 +1,54 @@ +package interceptors + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// PeerBlockBodyInterceptor represents an interceptor used for transaction block bodies +type PeerBlockBodyInterceptor struct { + *GenericBlockBodyInterceptor +} + +// NewPeerBlockBodyInterceptor creates a new instance of a TxBlockBodyInterceptor +func NewPeerBlockBodyInterceptor( + marshalizer marshal.Marshalizer, + cache storage.Cacher, + storer storage.Storer, + hasher hashing.Hasher, + shardCoordinator sharding.ShardCoordinator, +) (*PeerBlockBodyInterceptor, error) { + gbbi, err := NewGenericBlockBodyInterceptor( + marshalizer, + cache, + storer, + hasher, + shardCoordinator) + + if err != nil { + return nil, err + } + + return &PeerBlockBodyInterceptor{GenericBlockBodyInterceptor: gbbi}, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (pbbi *PeerBlockBodyInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + err := pbbi.checkMessage(message) + if err != nil { + return err + } + + peerBlockBody := block.NewInterceptedPeerBlockBody() + err = pbbi.marshalizer.Unmarshal(peerBlockBody, message.Data()) + if err != nil { + return err + } + + return pbbi.processBlockBody(message.Data(), peerBlockBody) +} diff --git a/process/block/interceptors/peerBlockBodyInterceptors_test.go b/process/block/interceptors/peerBlockBodyInterceptors_test.go new file mode 100644 index 00000000000..119073258db --- /dev/null +++ b/process/block/interceptors/peerBlockBodyInterceptors_test.go @@ -0,0 +1,185 @@ +package interceptors_test + +import ( + "bytes" + "errors" + "testing" + + block2 "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewPeerBlockBodyInterceptor + +func TestNewPeerBlockBodyInterceptor_WithNilParameterShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + pbbi, err := interceptors.NewPeerBlockBodyInterceptor( + nil, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, pbbi) +} + +func TestNewPeerBlockBodyInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + pbbi, err := interceptors.NewPeerBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Nil(t, err) + assert.NotNil(t, pbbi) +} + +//------- ProcessReceivedMessage + +func TestPeerBlockBodyInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + pbbi, _ := interceptors.NewPeerBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMessage, pbbi.ProcessReceivedMessage(nil)) +} + +func TestPeerBlockBodyInterceptor_ProcessReceivedMessageNilMessageDataShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + pbbi, _ := interceptors.NewPeerBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{} + + assert.Equal(t, process.ErrNilDataToProcess, pbbi.ProcessReceivedMessage(msg)) +} + +func TestPeerBlockBodyInterceptor_ValidateMarshalizerErrorsAtUnmarshalingShouldErr(t *testing.T) { + t.Parallel() + + errMarshalizer := errors.New("marshalizer error") + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + pbbi, _ := interceptors.NewPeerBlockBodyInterceptor( + &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return errMarshalizer + }, + }, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{ + DataField: make([]byte, 0), + } + + assert.Equal(t, errMarshalizer, pbbi.ProcessReceivedMessage(msg)) +} + +func TestPeerBlockBodyInterceptor_ProcessReceivedMessageIntegrityFailsShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + pbbi, _ := interceptors.NewPeerBlockBodyInterceptor( + marshalizer, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + peerChangeBlock := block.NewInterceptedPeerBlockBody() + peerChangeBlock.ShardID = uint32(0) + peerChangeBlock.RootHash = []byte("root hash") + peerChangeBlock.Changes = nil + + buff, _ := marshalizer.Marshal(peerChangeBlock) + + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + assert.Equal(t, process.ErrNilPeerChanges, pbbi.ProcessReceivedMessage(msg)) +} + +func TestPeerBlockBodyInterceptor_ProcessReceivedMessageBlockShouldWork(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{ + HasCalled: func(key []byte) (b bool, e error) { + return false, nil + }, + } + + pbbi, _ := interceptors.NewPeerBlockBodyInterceptor( + marshalizer, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + peerChangeBlock := block.NewInterceptedPeerBlockBody() + peerChangeBlock.ShardID = uint32(0) + peerChangeBlock.RootHash = []byte("root hash") + peerChangeBlock.Changes = []block2.PeerChange{ + {PubKey: []byte("pub key"), ShardIdDest: uint32(0)}, + } + + buff, _ := marshalizer.Marshal(peerChangeBlock) + + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + putInCacheWasCalled := false + cache.PutCalled = func(key []byte, value interface{}) (evicted bool) { + if bytes.Equal(key, mock.HasherMock{}.Compute(string(buff))) { + putInCacheWasCalled = true + } + return false + } + + assert.Nil(t, pbbi.ProcessReceivedMessage(msg)) + assert.True(t, putInCacheWasCalled) +} diff --git a/process/block/interceptors/stateBlockBodyInterceptor.go b/process/block/interceptors/stateBlockBodyInterceptor.go new file mode 100644 index 00000000000..73892cf7a8c --- /dev/null +++ b/process/block/interceptors/stateBlockBodyInterceptor.go @@ -0,0 +1,54 @@ +package interceptors + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// StateBlockBodyInterceptor represents an interceptor used for state block bodies +type StateBlockBodyInterceptor struct { + *GenericBlockBodyInterceptor +} + +// NewStateBlockBodyInterceptor creates a new instance of a TxBlockBodyInterceptor +func NewStateBlockBodyInterceptor( + marshalizer marshal.Marshalizer, + cache storage.Cacher, + storer storage.Storer, + hasher hashing.Hasher, + shardCoordinator sharding.ShardCoordinator, +) (*StateBlockBodyInterceptor, error) { + gbbi, err := NewGenericBlockBodyInterceptor( + marshalizer, + cache, + storer, + hasher, + shardCoordinator) + + if err != nil { + return nil, err + } + + return &StateBlockBodyInterceptor{GenericBlockBodyInterceptor: gbbi}, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (sbbi *StateBlockBodyInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + err := sbbi.checkMessage(message) + if err != nil { + return err + } + + stateBlockBody := block.NewInterceptedStateBlockBody() + err = sbbi.marshalizer.Unmarshal(stateBlockBody, message.Data()) + if err != nil { + return err + } + + return sbbi.processBlockBody(message.Data(), stateBlockBody) +} diff --git a/process/block/interceptors/stateBlockBodyInterceptors_test.go b/process/block/interceptors/stateBlockBodyInterceptors_test.go new file mode 100644 index 00000000000..144708591d4 --- /dev/null +++ b/process/block/interceptors/stateBlockBodyInterceptors_test.go @@ -0,0 +1,180 @@ +package interceptors_test + +import ( + "bytes" + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewStateBlockBodyInterceptor + +func TestNewStateBlockBodyInterceptor_WithNilParameterShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + sbbi, err := interceptors.NewStateBlockBodyInterceptor( + nil, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, sbbi) +} + +func TestNewStateBlockBodyInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + sbbi, err := interceptors.NewStateBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Nil(t, err) + assert.NotNil(t, sbbi) +} + +//------- Validate + +func TestStateBlockBodyInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + sbbi, _ := interceptors.NewStateBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMessage, sbbi.ProcessReceivedMessage(nil)) +} + +func TestStateBlockBodyInterceptor_ProcessReceivedMessageNilMessageDataShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + sbbi, _ := interceptors.NewStateBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{} + + assert.Equal(t, process.ErrNilDataToProcess, sbbi.ProcessReceivedMessage(msg)) +} + +func TestStateBlockBodyInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnmarshalingShouldErr(t *testing.T) { + t.Parallel() + + errMarshalizer := errors.New("marshalizer error") + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + sbbi, _ := interceptors.NewStateBlockBodyInterceptor( + &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return errMarshalizer + }, + }, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{ + DataField: make([]byte, 0), + } + + assert.Equal(t, errMarshalizer, sbbi.ProcessReceivedMessage(msg)) +} + +func TestStateBlockBodyInterceptor_ProcessReceivedMessageIntegrityFailsShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + sbbi, _ := interceptors.NewStateBlockBodyInterceptor( + marshalizer, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + stateBlock := block.NewInterceptedStateBlockBody() + stateBlock.ShardID = uint32(0) + stateBlock.RootHash = nil + + buff, _ := marshalizer.Marshal(stateBlock) + + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + assert.Equal(t, process.ErrNilRootHash, sbbi.ProcessReceivedMessage(msg)) +} + +func TestStateBlockBodyInterceptor_ProcessReceivedMessageBlockShouldWork(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{ + HasCalled: func(key []byte) (b bool, e error) { + return false, nil + }, + } + + sbbi, _ := interceptors.NewStateBlockBodyInterceptor( + marshalizer, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + stateBlock := block.NewInterceptedStateBlockBody() + stateBlock.ShardID = uint32(0) + stateBlock.RootHash = []byte("root hash") + + buff, _ := marshalizer.Marshal(stateBlock) + + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + putInCacheWasCalled := false + cache.PutCalled = func(key []byte, value interface{}) (evicted bool) { + if bytes.Equal(key, mock.HasherMock{}.Compute(string(buff))) { + putInCacheWasCalled = true + } + return false + } + + assert.Nil(t, sbbi.ProcessReceivedMessage(msg)) + assert.True(t, putInCacheWasCalled) +} diff --git a/process/block/interceptors/txBlockBodyInterceptor.go b/process/block/interceptors/txBlockBodyInterceptor.go new file mode 100644 index 00000000000..39adbc8cddd --- /dev/null +++ b/process/block/interceptors/txBlockBodyInterceptor.go @@ -0,0 +1,54 @@ +package interceptors + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// TxBlockBodyInterceptor represents an interceptor used for transaction block bodies +type TxBlockBodyInterceptor struct { + *GenericBlockBodyInterceptor +} + +// NewTxBlockBodyInterceptor creates a new instance of a TxBlockBodyInterceptor +func NewTxBlockBodyInterceptor( + marshalizer marshal.Marshalizer, + cache storage.Cacher, + storer storage.Storer, + hasher hashing.Hasher, + shardCoordinator sharding.ShardCoordinator, +) (*TxBlockBodyInterceptor, error) { + gbbi, err := NewGenericBlockBodyInterceptor( + marshalizer, + cache, + storer, + hasher, + shardCoordinator) + + if err != nil { + return nil, err + } + + return &TxBlockBodyInterceptor{GenericBlockBodyInterceptor: gbbi}, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (tbbi *TxBlockBodyInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + err := tbbi.checkMessage(message) + if err != nil { + return err + } + + txBlockBody := block.NewInterceptedTxBlockBody() + err = tbbi.marshalizer.Unmarshal(txBlockBody, message.Data()) + if err != nil { + return err + } + + return tbbi.processBlockBody(message.Data(), txBlockBody) +} diff --git a/process/block/interceptors/txBlockBodyInterceptors_test.go b/process/block/interceptors/txBlockBodyInterceptors_test.go new file mode 100644 index 00000000000..23a6bb3f01a --- /dev/null +++ b/process/block/interceptors/txBlockBodyInterceptors_test.go @@ -0,0 +1,188 @@ +package interceptors_test + +import ( + "bytes" + "errors" + "testing" + + block2 "github.com/ElrondNetwork/elrond-go-sandbox/data/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewTxBlockBodyInterceptor + +func TestNewTxBlockBodyInterceptor_WithNilParameterShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + tbbi, err := interceptors.NewTxBlockBodyInterceptor( + nil, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, tbbi) +} + +func TestNewTxBlockBodyInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + tbbi, err := interceptors.NewTxBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Nil(t, err) + assert.NotNil(t, tbbi) +} + +//------- ProcessReceivedMessage + +func TestTxBlockBodyInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + tbbi, _ := interceptors.NewTxBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + assert.Equal(t, process.ErrNilMessage, tbbi.ProcessReceivedMessage(nil)) +} + +func TestTxBlockBodyInterceptor_ProcessReceivedMessageNilMessageDataShouldErr(t *testing.T) { + t.Parallel() + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + tbbi, _ := interceptors.NewTxBlockBodyInterceptor( + &mock.MarshalizerMock{}, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{} + + assert.Equal(t, process.ErrNilDataToProcess, tbbi.ProcessReceivedMessage(msg)) +} + +func TestTxBlockBodyInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnmarshalingShouldErr(t *testing.T) { + t.Parallel() + + errMarshalizer := errors.New("marshalizer error") + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + tbbi, _ := interceptors.NewTxBlockBodyInterceptor( + &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return errMarshalizer + }, + }, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + msg := &mock.P2PMessageMock{ + DataField: make([]byte, 0), + } + + assert.Equal(t, errMarshalizer, tbbi.ProcessReceivedMessage(msg)) +} + +func TestTxBlockBodyInterceptor_ProcessReceivedMessageIntegrityFailsShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{} + + tbbi, _ := interceptors.NewTxBlockBodyInterceptor( + marshalizer, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + txBlock := block.NewInterceptedTxBlockBody() + txBlock.RootHash = []byte("root hash") + txBlock.ShardID = uint32(0) + txBlock.MiniBlocks = nil + + buff, _ := marshalizer.Marshal(txBlock) + + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + assert.Equal(t, process.ErrNilMiniBlocks, tbbi.ProcessReceivedMessage(msg)) +} + +func TestTxBlockBodyInterceptor_ProcessReceivedMessageBlockShouldWork(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + cache := &mock.CacherStub{} + storer := &mock.StorerStub{ + HasCalled: func(key []byte) (b bool, e error) { + return false, nil + }, + } + + tbbi, _ := interceptors.NewTxBlockBodyInterceptor( + marshalizer, + cache, + storer, + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock()) + + txBlock := block.NewInterceptedTxBlockBody() + txBlock.RootHash = []byte("root hash") + txBlock.ShardID = uint32(0) + txBlock.MiniBlocks = []block2.MiniBlock{ + { + ShardID: uint32(0), + TxHashes: [][]byte{[]byte("tx hash 1")}, + }, + } + + buff, _ := marshalizer.Marshal(txBlock) + + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + putInCacheWasCalled := false + cache.PutCalled = func(key []byte, value interface{}) (evicted bool) { + if bytes.Equal(key, mock.HasherMock{}.Compute(string(buff))) { + putInCacheWasCalled = true + } + return false + } + + assert.Nil(t, tbbi.ProcessReceivedMessage(msg)) + assert.True(t, putInCacheWasCalled) +} diff --git a/process/block/interceptors_test.go b/process/block/interceptors_test.go deleted file mode 100644 index e3d56875c63..00000000000 --- a/process/block/interceptors_test.go +++ /dev/null @@ -1,685 +0,0 @@ -package block - -import ( - "bytes" - "testing" - - block2 "github.com/ElrondNetwork/elrond-go-sandbox/data/block" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" - "github.com/stretchr/testify/assert" -) - -//------- HeaderInterceptor - -//NewHeaderInterceptor - -func TestNewHeaderInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - nil, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilInterceptor, err) - assert.Nil(t, hi) -} - -func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - interceptor, - nil, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilHeadersDataPool, err) - assert.Nil(t, hi) -} - -func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - headers := &mock.ShardedDataStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - interceptor, - headers, - nil, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) - assert.Nil(t, hi) -} - -func TestNewHeaderInterceptor_NilStorerShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - nil, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilHeadersStorage, err) - assert.Nil(t, hi) -} - -func TestNewHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - - hi, err := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - nil, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Nil(t, hi) - assert.Equal(t, process.ErrNilMultiSigVerifier, err) -} - -func TestNewHeaderInterceptor_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - nil, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilHasher, err) - assert.Nil(t, hi) -} - -func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - nil) - - assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.Nil(t, hi) -} - -func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, err := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Nil(t, err) - assert.NotNil(t, hi) -} - -//processHdr - -func TestHeaderInterceptor_ProcessHdrNilHdrShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, _ := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilBlockHeader, hi.ProcessHdr(nil, make([]byte, 0))) -} - -func TestHeaderInterceptor_ProcessHdrNilDataToProcessShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, _ := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - err := hi.ProcessHdr(NewInterceptedHeader(multiSigner), nil) - - assert.Equal(t, process.ErrNilDataToProcess, err) -} - -func TestHeaderInterceptor_ProcessHdrWrongTypeOfCreatorShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, _ := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrBadInterceptorTopicImplementation, - hi.ProcessHdr(&mock.StringCreator{}, make([]byte, 0))) -} - -func TestHeaderInterceptor_ProcessHdrSanityCheckFailedShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - headers := &mock.ShardedDataStub{} - headersNonces := &mock.Uint64CacherStub{} - storer := &mock.StorerStub{} - multiSigner := mock.NewMultiSigner() - - hi, _ := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - err := hi.ProcessHdr(NewInterceptedHeader(multiSigner), make([]byte, 0)) - - assert.Equal(t, process.ErrNilBlockBodyHash, err) -} - -func TestHeaderInterceptor_ProcessOkValsShouldWork(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - wasCalled := 0 - - testedNonce := uint64(67) - - headers := &mock.ShardedDataStub{} - headers.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - aaaHash := mock.HasherMock{}.Compute("aaa") - if bytes.Equal(aaaHash, key) { - wasCalled++ - } - } - - headersNonces := &mock.Uint64CacherStub{} - headersNonces.HasOrAddCalled = func(u uint64, i []byte) (b bool, b2 bool) { - if u == testedNonce { - wasCalled++ - } - - return - } - - storer := &mock.StorerStub{} - storer.HasCalled = func(key []byte) (bool, error) { - return false, nil - } - multiSigner := mock.NewMultiSigner() - - hi, _ := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - hdr := NewInterceptedHeader(multiSigner) - hdr.Nonce = testedNonce - hdr.ShardId = 0 - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.BlockBodyHash = make([]byte, 0) - hdr.BlockBodyType = block2.TxBlock - hdr.Signature = make([]byte, 0) - hdr.Commitment = make([]byte, 0) - hdr.SetHash([]byte("aaa")) - - assert.Nil(t, hi.ProcessHdr(hdr, []byte("aaa"))) - assert.Equal(t, 2, wasCalled) -} - -func TestHeaderInterceptor_ProcessIsInStorageShouldNotAdd(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - wasCalled := 0 - - testedNonce := uint64(67) - - headers := &mock.ShardedDataStub{} - headers.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - aaaHash := mock.HasherMock{}.Compute("aaa") - if bytes.Equal(aaaHash, key) { - wasCalled++ - } - } - - headersNonces := &mock.Uint64CacherStub{} - headersNonces.HasOrAddCalled = func(u uint64, i []byte) (b bool, b2 bool) { - if u == testedNonce { - wasCalled++ - } - - return - } - - storer := &mock.StorerStub{} - storer.HasCalled = func(key []byte) (bool, error) { - return true, nil - } - multiSigner := mock.NewMultiSigner() - - hi, _ := NewHeaderInterceptor( - interceptor, - headers, - headersNonces, - storer, - multiSigner, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - hdr := NewInterceptedHeader(multiSigner) - hdr.Nonce = testedNonce - hdr.ShardId = 0 - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.BlockBodyHash = make([]byte, 0) - hdr.BlockBodyType = block2.TxBlock - hdr.Signature = make([]byte, 0) - hdr.Commitment = make([]byte, 0) - hdr.SetHash([]byte("aaa")) - - assert.Nil(t, hi.ProcessHdr(hdr, []byte("aaa"))) - assert.Equal(t, 0, wasCalled) -} - -//------- BlockBodyInterceptor - -//NewBlockBodyInterceptor - -func TestNewBlockBodyInterceptor_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - storer := &mock.StorerStub{} - - gbbi, err := NewGenericBlockBodyInterceptor( - nil, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilInterceptor, err) - assert.Nil(t, gbbi) -} - -func TestNewBlockBodyInterceptor_NilPoolShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - storer := &mock.StorerStub{} - - gbbi, err := NewGenericBlockBodyInterceptor( - interceptor, - nil, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilCacher, err) - assert.Nil(t, gbbi) -} - -func TestNewBlockBodyInterceptor_NilStorerShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - cache := &mock.CacherStub{} - - gbbi, err := NewGenericBlockBodyInterceptor( - interceptor, - cache, - nil, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilBlockBodyStorage, err) - assert.Nil(t, gbbi) -} - -func TestNewBlockBodyInterceptor_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - storer := &mock.StorerStub{} - - gbbi, err := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - nil, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilHasher, err) - assert.Nil(t, gbbi) -} - -func TestNewBlockBodyInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - storer := &mock.StorerStub{} - - gbbi, err := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - nil) - - assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.Nil(t, gbbi) -} - -func TestNewBlockBodyInterceptor_OkValsShouldWork(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - storer := &mock.StorerStub{} - - gbbi, err := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Nil(t, err) - assert.NotNil(t, gbbi) -} - -//processBodyBlock - -func TestBlockBodyInterceptor_ProcessNilHdrShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - storer := &mock.StorerStub{} - - gbbi, _ := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilBlockBody, gbbi.ProcessBodyBlock(nil, make([]byte, 0))) -} - -func TestBlockBodyInterceptor_ProcessNilDataToProcessShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - storer := &mock.StorerStub{} - - gbbi, _ := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilDataToProcess, - gbbi.ProcessBodyBlock(NewInterceptedTxBlockBody(), nil)) -} - -func TestBlockBodyInterceptor_ProcessHdrWrongTypeOfNewerShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - storer := &mock.StorerStub{} - - gbbi, _ := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrBadInterceptorTopicImplementation, - gbbi.ProcessBodyBlock(&mock.StringCreator{}, make([]byte, 0))) -} - -func TestBlockBodyInterceptor_ProcessHdrSanityCheckFailedShouldErr(t *testing.T) { - t.Parallel() - - cache := &mock.CacherStub{} - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - storer := &mock.StorerStub{} - - gbbi, _ := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - assert.Equal(t, process.ErrNilRootHash, - gbbi.ProcessBodyBlock(NewInterceptedTxBlockBody(), make([]byte, 0))) -} - -func TestBlockBodyInterceptor_ProcessOkValsShouldRetTrue(t *testing.T) { - t.Parallel() - - wasCalled := 0 - - cache := &mock.CacherStub{} - cache.PutCalled = func(key []byte, value interface{}) (evicted bool) { - if bytes.Equal(mock.HasherMock{}.Compute("aaa"), key) { - wasCalled++ - } - - return - } - storer := &mock.StorerStub{} - storer.HasCalled = func(key []byte) (bool, error) { - return false, nil - } - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - gbbi, _ := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - miniBlock := block2.MiniBlock{} - miniBlock.TxHashes = append(miniBlock.TxHashes, []byte{65}) - - txBody := NewInterceptedTxBlockBody() - txBody.ShardID = 0 - txBody.MiniBlocks = make([]block2.MiniBlock, 0) - txBody.MiniBlocks = append(txBody.MiniBlocks, miniBlock) - txBody.RootHash = make([]byte, 0) - - assert.Nil(t, gbbi.ProcessBodyBlock(txBody, []byte("aaa"))) - assert.Equal(t, 1, wasCalled) -} - -func TestBlockBodyInterceptor_ProcessIsInStorageShouldNotAdd(t *testing.T) { - t.Parallel() - - wasCalled := 0 - - cache := &mock.CacherStub{} - cache.PutCalled = func(key []byte, value interface{}) (evicted bool) { - if bytes.Equal(mock.HasherMock{}.Compute("aaa"), key) { - wasCalled++ - } - - return - } - storer := &mock.StorerStub{} - storer.HasCalled = func(key []byte) (bool, error) { - return true, nil - } - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - - gbbi, _ := NewGenericBlockBodyInterceptor( - interceptor, - cache, - storer, - mock.HasherMock{}, - mock.NewOneShardCoordinatorMock()) - - miniBlock := block2.MiniBlock{} - miniBlock.TxHashes = append(miniBlock.TxHashes, []byte{65}) - - txBody := NewInterceptedTxBlockBody() - txBody.ShardID = 0 - txBody.MiniBlocks = make([]block2.MiniBlock, 0) - txBody.MiniBlocks = append(txBody.MiniBlocks, miniBlock) - txBody.RootHash = make([]byte, 0) - - assert.Nil(t, gbbi.ProcessBodyBlock(txBody, []byte("aaa"))) - assert.Equal(t, 0, wasCalled) -} diff --git a/process/block/process.go b/process/block/process.go index 2718691110c..6ab0af44664 100644 --- a/process/block/process.go +++ b/process/block/process.go @@ -7,7 +7,6 @@ import ( "fmt" "math/big" "sort" - "strconv" "sync" "time" @@ -18,7 +17,6 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" "github.com/ElrondNetwork/elrond-go-sandbox/display" "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/hashing/sha256" "github.com/ElrondNetwork/elrond-go-sandbox/logger" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/process" @@ -770,10 +768,6 @@ func createDisplayableHeaderAndBlockBody( func displayHeader(header *block.Header) []*display.LineData { lines := make([]*display.LineData, 0) - //TODO really remove this mock prints - aggrCommits := sha256.Sha256{}.Compute(string(sha256.Sha256{}.Compute(string(header.Commitment) + strconv.Itoa(int(header.Round))))) - aggrSigs := sha256.Sha256{}.Compute(string(sha256.Sha256{}.Compute(string(aggrCommits)))) - lines = append(lines, display.NewLineData(false, []string{ "Header", "Nonce", @@ -811,25 +805,14 @@ func displayHeader(header *block.Header) []*display.LineData { "Pub keys bitmap", toHex(header.PubKeysBitmap)})) - //TODO uncomment as this - //lines = append(lines, display.NewLineData(false, []string{ - // "", - // "Commitment", - // toB64(header.Commitment)})) - //lines = append(lines, display.NewLineData(true, []string{ - // "", - // "Signature", - // toB64(header.Signature)})) - - //TODO remove this lines = append(lines, display.NewLineData(false, []string{ "", "Commitment", - toB64(aggrCommits)})) + toB64(header.Commitment)})) lines = append(lines, display.NewLineData(true, []string{ "", "Signature", - toB64(aggrSigs)})) + toB64(header.Signature)})) return lines } diff --git a/process/block/resolvers/common_test.go b/process/block/resolvers/common_test.go new file mode 100644 index 00000000000..18894897caa --- /dev/null +++ b/process/block/resolvers/common_test.go @@ -0,0 +1,28 @@ +package resolvers_test + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" +) + +func createDataPool() *mock.TransientDataPoolStub { + transientPool := &mock.TransientDataPoolStub{} + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + + return transientPool +} + +func createRequestMsg(dataType process.RequestDataType, val []byte) p2p.MessageP2P { + marshalizer := &mock.MarshalizerMock{} + + buff, _ := marshalizer.Marshal(&process.RequestData{Type: dataType, Value: val}) + + return &mock.P2PMessageMock{DataField: buff} +} diff --git a/process/block/resolvers/genericBlockBodyResolver.go b/process/block/resolvers/genericBlockBodyResolver.go new file mode 100644 index 00000000000..f59dd92b26a --- /dev/null +++ b/process/block/resolvers/genericBlockBodyResolver.go @@ -0,0 +1,103 @@ +package resolvers + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" +) + +// GenericBlockBodyResolver is a wrapper over Resolver that is specialized in resolving block body requests +type GenericBlockBodyResolver struct { + process.TopicResolverSender + blockBodyPool storage.Cacher + blockStorage storage.Storer + marshalizer marshal.Marshalizer +} + +// NewGenericBlockBodyResolver creates a new block body resolver +func NewGenericBlockBodyResolver( + senderResolver process.TopicResolverSender, + blockBodyPool storage.Cacher, + blockBodyStorage storage.Storer, + marshalizer marshal.Marshalizer) (*GenericBlockBodyResolver, error) { + + if senderResolver == nil { + return nil, process.ErrNilResolverSender + } + + if blockBodyPool == nil { + return nil, process.ErrNilBlockBodyPool + } + + if blockBodyStorage == nil { + return nil, process.ErrNilBlockBodyStorage + } + + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + bbResolver := &GenericBlockBodyResolver{ + TopicResolverSender: senderResolver, + blockBodyPool: blockBodyPool, + blockStorage: blockBodyStorage, + marshalizer: marshalizer, + } + + return bbResolver, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to, usually a request topic) +func (gbbRes *GenericBlockBodyResolver) ProcessReceivedMessage(message p2p.MessageP2P) error { + rd := &process.RequestData{} + err := rd.Unmarshal(gbbRes.marshalizer, message) + if err != nil { + return err + } + + buff, err := gbbRes.resolveBlockBodyRequest(rd) + if err != nil { + return err + } + + if buff == nil { + log.Debug(fmt.Sprintf("missing data: %v", rd)) + return nil + } + + return gbbRes.Send(buff, message.Peer()) +} + +func (gbbRes *GenericBlockBodyResolver) resolveBlockBodyRequest(rd *process.RequestData) ([]byte, error) { + if rd.Type != process.HashType { + return nil, process.ErrResolveNotHashType + } + + if rd.Value == nil { + return nil, process.ErrNilValue + } + + blockBody, _ := gbbRes.blockBodyPool.Get(rd.Value) + if blockBody != nil { + buff, err := gbbRes.marshalizer.Marshal(blockBody) + if err != nil { + return nil, err + } + + return buff, nil + } + + return gbbRes.blockStorage.Get(rd.Value) +} + +// RequestDataFromHash requests a block body from other peers having input the block body hash +func (gbbRes *GenericBlockBodyResolver) RequestDataFromHash(hash []byte) error { + return gbbRes.SendOnRequestTopic(&process.RequestData{ + Type: process.HashType, + Value: hash, + }) +} diff --git a/process/block/resolvers/genericBlockBodyResolver_test.go b/process/block/resolvers/genericBlockBodyResolver_test.go new file mode 100644 index 00000000000..660f43ebb5a --- /dev/null +++ b/process/block/resolvers/genericBlockBodyResolver_test.go @@ -0,0 +1,304 @@ +package resolvers_test + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/resolvers" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +//------- NewBlockBodyResolver + +func TestNewGenericBlockBodyResolver_NilSenderResolverShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, err := resolvers.NewGenericBlockBodyResolver( + nil, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + ) + + assert.Equal(t, process.ErrNilResolverSender, err) + assert.Nil(t, gbbRes) +} + +func TestNewGenericBlockBodyResolver_NilBlockBodyPoolShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, err := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + nil, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + ) + + assert.Equal(t, process.ErrNilBlockBodyPool, err) + assert.Nil(t, gbbRes) +} + +func TestNewGenericBlockBodyResolver_NilBlockBodyStorageShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, err := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + nil, + &mock.MarshalizerMock{}, + ) + + assert.Equal(t, process.ErrNilBlockBodyStorage, err) + assert.Nil(t, gbbRes) +} + +func TestNewGenericBlockBodyResolver_NilBlockMArshalizerShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, err := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.StorerStub{}, + nil, + ) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, gbbRes) +} + +func TestNewGenericBlockBodyResolver_OkValsShouldWork(t *testing.T) { + t.Parallel() + + gbbRes, err := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, err) + assert.NotNil(t, gbbRes) +} + +//------- ProcessReceivedMessage + +func TestNewGenericBlockBodyResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg(process.HashType, nil)) + assert.Equal(t, process.ErrNilValue, err) +} + +func TestGenericBlockBodyResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { + t.Parallel() + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg(process.NonceType, make([]byte, 0))) + assert.Equal(t, process.ErrResolveNotHashType, err) +} + +func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolShouldRetValAndSend(t *testing.T) { + t.Parallel() + + requestedBuff := []byte("aaa") + + wasResolved := false + wasSent := false + + cache := &mock.CacherStub{} + cache.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, requestedBuff) { + wasResolved = true + return make([]byte, 0), true + } + + return nil, false + } + + marshalizer := &mock.MarshalizerMock{} + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSent = true + return nil + }, + }, + cache, + &mock.StorerStub{}, + marshalizer, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg( + process.HashType, + requestedBuff)) + + assert.Nil(t, err) + assert.True(t, wasResolved) + assert.True(t, wasSent) +} + +func TestGenericBlockBodyResolver_ProcessReceivedMessageFoundInPoolMarshalizerFailShouldErr(t *testing.T) { + t.Parallel() + + requestedBuff := []byte("aaa") + + errExpected := errors.New("expected error") + + cache := &mock.CacherStub{} + cache.GetCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, requestedBuff) { + return make([]byte, 0), true + } + + return nil, false + } + + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + return nil, errExpected + }, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + m := &mock.MarshalizerMock{} + + return m.Unmarshal(obj, buff) + }, + } + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{}, + cache, + &mock.StorerStub{}, + marshalizer, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg( + process.HashType, + requestedBuff)) + + assert.Equal(t, errExpected, err) + +} + +func TestGenericBlockBodyResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStorageAndSend(t *testing.T) { + t.Parallel() + + requestedBuff := []byte("aaa") + + wasResolved := false + wasSend := false + + cache := &mock.CacherStub{} + cache.GetCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + store := &mock.StorerStub{} + store.GetCalled = func(key []byte) (i []byte, e error) { + wasResolved = true + return make([]byte, 0), nil + } + + marshalizer := &mock.MarshalizerMock{} + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSend = true + return nil + }, + }, + cache, + store, + marshalizer, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg( + process.HashType, + requestedBuff)) + + assert.Nil(t, err) + assert.True(t, wasResolved) + assert.True(t, wasSend) +} + +func TestGenericBlockBodyResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *testing.T) { + t.Parallel() + + requestedBuff := []byte("aaa") + + wasSend := false + + cache := &mock.CacherStub{} + cache.GetCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + store := &mock.StorerStub{} + store.GetCalled = func(key []byte) (i []byte, e error) { + return nil, nil + } + + marshalizer := &mock.MarshalizerMock{} + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSend = true + return nil + }, + }, + cache, + store, + marshalizer, + ) + + err := gbbRes.ProcessReceivedMessage(createRequestMsg( + process.HashType, + requestedBuff)) + + assert.Nil(t, err) + assert.False(t, wasSend) +} + +//------- Requests + +func TestBlockBodyResolver_RequestDataFromHashShouldWork(t *testing.T) { + t.Parallel() + + wasCalled := false + + buffRequested := []byte("aaaa") + + gbbRes, _ := resolvers.NewGenericBlockBodyResolver( + &mock.TopicResolverSenderStub{ + SendOnRequestTopicCalled: func(rd *process.RequestData) error { + wasCalled = true + return nil + }, + }, + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, gbbRes.RequestDataFromHash(buffRequested)) + assert.True(t, wasCalled) +} diff --git a/process/block/resolvers.go b/process/block/resolvers/headerResolver.go similarity index 51% rename from process/block/resolvers.go rename to process/block/resolvers/headerResolver.go index 27f5c09bb96..e87ec12f79a 100644 --- a/process/block/resolvers.go +++ b/process/block/resolvers/headerResolver.go @@ -1,16 +1,22 @@ -package block +package resolvers import ( + "fmt" + "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters" + "github.com/ElrondNetwork/elrond-go-sandbox/logger" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) +var log = logger.NewDefaultLogger() + // HeaderResolver is a wrapper over Resolver that is specialized in resolving headers requests type HeaderResolver struct { - process.Resolver + process.TopicResolverSender hdrPool data.ShardedDataCacherNotifier hdrNonces data.Uint64Cacher hdrStorage storage.Storer @@ -18,27 +24,17 @@ type HeaderResolver struct { nonceConverter typeConverters.Uint64ByteSliceConverter } -// GenericBlockBodyResolver is a wrapper over Resolver that is specialized in resolving block body requests -type GenericBlockBodyResolver struct { - process.Resolver - blockBodyPool storage.Cacher - blockStorage storage.Storer - marshalizer marshal.Marshalizer -} - -//------- headerResolver - // NewHeaderResolver creates a new header resolver func NewHeaderResolver( - resolver process.Resolver, + senderResolver process.TopicResolverSender, transient data.TransientDataHolder, hdrStorage storage.Storer, marshalizer marshal.Marshalizer, nonceConverter typeConverters.Uint64ByteSliceConverter, ) (*HeaderResolver, error) { - if resolver == nil { - return nil, process.ErrNilResolver + if senderResolver == nil { + return nil, process.ErrNilResolverSender } if transient == nil { @@ -68,19 +64,40 @@ func NewHeaderResolver( } hdrResolver := &HeaderResolver{ - Resolver: resolver, - hdrPool: transient.Headers(), - hdrNonces: transient.HeadersNonces(), - hdrStorage: hdrStorage, - marshalizer: marshalizer, - nonceConverter: nonceConverter, + TopicResolverSender: senderResolver, + hdrPool: transient.Headers(), + hdrNonces: transient.HeadersNonces(), + hdrStorage: hdrStorage, + marshalizer: marshalizer, + nonceConverter: nonceConverter, } - hdrResolver.SetResolverHandler(hdrResolver.resolveHdrRequest) return hdrResolver, nil } -func (hdrRes *HeaderResolver) resolveHdrRequest(rd process.RequestData) ([]byte, error) { +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to, usually a request topic) +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P) error { + rd := &process.RequestData{} + err := rd.Unmarshal(hdrRes.marshalizer, message) + if err != nil { + return err + } + + buff, err := hdrRes.resolveHdrRequest(rd) + if err != nil { + return err + } + + if buff == nil { + log.Debug(fmt.Sprintf("missing data: %v", rd)) + return nil + } + + return hdrRes.Send(buff, message.Peer()) +} + +func (hdrRes *HeaderResolver) resolveHdrRequest(rd *process.RequestData) ([]byte, error) { if rd.Value == nil { return nil, process.ErrNilValue } @@ -145,84 +162,18 @@ func (hdrRes *HeaderResolver) resolveHeaderFromNonce(key []byte) ([]byte, error) return buff, nil } -// RequestHeaderFromHash requests a header from other peers having input the hdr hash -func (hdrRes *HeaderResolver) RequestHeaderFromHash(hash []byte) error { - return hdrRes.RequestData(process.RequestData{ +// RequestDataFromHash requests a header from other peers having input the hdr hash +func (hdrRes *HeaderResolver) RequestDataFromHash(hash []byte) error { + return hdrRes.SendOnRequestTopic(&process.RequestData{ Type: process.HashType, Value: hash, }) } -// RequestHeaderFromNonce requests a header from other peers having input the hdr nonce -func (hdrRes *HeaderResolver) RequestHeaderFromNonce(nonce uint64) error { - return hdrRes.RequestData(process.RequestData{ +// RequestDataFromNonce requests a header from other peers having input the hdr nonce +func (hdrRes *HeaderResolver) RequestDataFromNonce(nonce uint64) error { + return hdrRes.SendOnRequestTopic(&process.RequestData{ Type: process.NonceType, Value: hdrRes.nonceConverter.ToByteSlice(nonce), }) } - -//------- genericBlockBodyResolver - -// NewGenericBlockBodyResolver creates a new block body resolver -func NewGenericBlockBodyResolver( - resolver process.Resolver, - blockBodyPool storage.Cacher, - blockBodyStorage storage.Storer, - marshalizer marshal.Marshalizer) (*GenericBlockBodyResolver, error) { - - if resolver == nil { - return nil, process.ErrNilResolver - } - - if blockBodyPool == nil { - return nil, process.ErrNilBlockBodyPool - } - - if blockBodyStorage == nil { - return nil, process.ErrNilBlockBodyStorage - } - - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - - bbResolver := &GenericBlockBodyResolver{ - Resolver: resolver, - blockBodyPool: blockBodyPool, - blockStorage: blockBodyStorage, - marshalizer: marshalizer, - } - bbResolver.SetResolverHandler(bbResolver.resolveBlockBodyRequest) - - return bbResolver, nil -} - -func (gbbRes *GenericBlockBodyResolver) resolveBlockBodyRequest(rd process.RequestData) ([]byte, error) { - if rd.Type != process.HashType { - return nil, process.ErrResolveNotHashType - } - - if rd.Value == nil { - return nil, process.ErrNilValue - } - - blockBody, _ := gbbRes.blockBodyPool.Get(rd.Value) - if blockBody != nil { - buff, err := gbbRes.marshalizer.Marshal(blockBody) - if err != nil { - return nil, err - } - - return buff, nil - } - - return gbbRes.blockStorage.Get(rd.Value) -} - -// RequestBlockBodyFromHash requests a block body from other peers having input the block body hash -func (gbbRes *GenericBlockBodyResolver) RequestBlockBodyFromHash(hash []byte) error { - return gbbRes.RequestData(process.RequestData{ - Type: process.HashType, - Value: hash, - }) -} diff --git a/process/block/resolvers/headerResolver_test.go b/process/block/resolvers/headerResolver_test.go new file mode 100644 index 00000000000..0e2356a1bd6 --- /dev/null +++ b/process/block/resolvers/headerResolver_test.go @@ -0,0 +1,664 @@ +package resolvers_test + +import ( + "bytes" + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/resolvers" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +//------- NewHeaderResolver + +func TestNewHeaderResolver_NilSenderResolverShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + nil, + &mock.TransientDataPoolStub{}, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + assert.Equal(t, process.ErrNilResolverSender, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_NilTransientPoolShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + nil, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + assert.Equal(t, process.ErrNilTransientPool, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_NilTransientHeadersPoolShouldErr(t *testing.T) { + t.Parallel() + + transientPool := createDataPool() + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + return nil + } + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + transientPool, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + assert.Equal(t, process.ErrNilHeadersDataPool, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_NilTransientHeadersNoncesPoolShouldErr(t *testing.T) { + t.Parallel() + + transientPool := createDataPool() + transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { + return nil + } + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + transientPool, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_NilHeadersStorageShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + nil, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + assert.Equal(t, process.ErrNilHeadersStorage, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + &mock.StorerStub{}, + nil, + mock.NewNonceHashConverterMock(), + ) + + assert.Equal(t, process.ErrNilMarshalizer, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_NilNonceConverterShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + nil, + ) + + assert.Equal(t, process.ErrNilNonceConverter, err) + assert.Nil(t, hdrRes) +} + +func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { + t.Parallel() + + hdrRes, err := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + assert.NotNil(t, hdrRes) + assert.Nil(t, err) +} + +//------- ProcessReceivedMessage + +func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(process.NonceType, nil)) + assert.Equal(t, process.ErrNilValue, err) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0))) + assert.Equal(t, process.ErrResolveTypeUnknown, err) + +} + +func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend(t *testing.T) { + t.Parallel() + + requestedData := []byte("aaaa") + + searchWasCalled := false + sendWasCalled := false + + transientPool := createDataPool() + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(requestedData, key) { + searchWasCalled = true + return make([]byte, 0), true + } + return nil, false + } + + return headers + } + + marshalizer := &mock.MarshalizerMock{} + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + sendWasCalled = true + return nil + }, + }, + transientPool, + &mock.StorerStub{}, + marshalizer, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(process.HashType, requestedData)) + assert.Nil(t, err) + assert.True(t, searchWasCalled) + assert.True(t, sendWasCalled) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarshalizerFailsShouldErr(t *testing.T) { + t.Parallel() + + requestedData := []byte("aaaa") + resolvedData := []byte("bbbb") + + errExpected := errors.New("MarshalizerMock generic error") + + transientPool := createDataPool() + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(requestedData, key) { + return resolvedData, true + } + return nil, false + } + + return headers + } + + marshalizerMock := &mock.MarshalizerMock{} + marshalizerStub := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + return nil, errExpected + }, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return marshalizerMock.Unmarshal(obj, buff) + }, + } + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + return nil + }, + }, + transientPool, + &mock.StorerStub{}, + marshalizerStub, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(process.HashType, requestedData)) + assert.Equal(t, errExpected, err) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValAndSend(t *testing.T) { + t.Parallel() + + requestedData := []byte("aaaa") + + transientPool := createDataPool() + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + return headers + } + + wasGotFromStorage := false + wasSent := false + + store := &mock.StorerStub{} + store.GetCalled = func(key []byte) (i []byte, e error) { + if bytes.Equal(key, requestedData) { + wasGotFromStorage = true + return make([]byte, 0), nil + } + + return nil, nil + } + + marshalizer := &mock.MarshalizerMock{} + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSent = true + return nil + }, + }, + transientPool, + store, + marshalizer, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(process.HashType, requestedData)) + assert.Nil(t, err) + assert.True(t, wasGotFromStorage) + assert.True(t, wasSent) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageCheckRetError(t *testing.T) { + t.Parallel() + + requestedData := []byte("aaaa") + + transientPool := createDataPool() + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + return headers + } + + errExpected := errors.New("expected error") + + store := &mock.StorerStub{} + store.GetCalled = func(key []byte) (i []byte, e error) { + if bytes.Equal(key, requestedData) { + return nil, errExpected + } + + return nil, nil + } + + marshalizer := &mock.MarshalizerMock{} + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + transientPool, + store, + marshalizer, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(process.HashType, requestedData)) + assert.Equal(t, errExpected, err) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShouldErr(t *testing.T) { + t.Parallel() + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{}, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + mock.NewNonceHashConverterMock(), + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(process.NonceType, []byte("aaa"))) + assert.Equal(t, process.ErrInvalidNonceByteSlice, err) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNoncePoolShouldRetNilAndNotSend(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + + transientPool := createDataPool() + transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { + headersNonces := &mock.Uint64CacherStub{} + headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { + return nil, false + } + + return headersNonces + } + + nonceConverter := mock.NewNonceHashConverterMock() + + wasSent := false + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSent = true + return nil + }, + }, + transientPool, + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + nonceConverter, + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg( + process.NonceType, + nonceConverter.ToByteSlice(requestedNonce))) + assert.Nil(t, err) + assert.False(t, wasSent) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolShouldRetFromPoolAndSend(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + + wasResolved := false + wasSent := false + + transientPool := &mock.TransientDataPoolStub{} + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, []byte("aaaa")) { + wasResolved = true + return make([]byte, 0), true + } + + return nil, false + } + + return headers + } + transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { + headersNonces := &mock.Uint64CacherStub{} + headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { + if u == requestedNonce { + return []byte("aaaa"), true + } + + return nil, false + } + + return headersNonces + } + + nonceConverter := mock.NewNonceHashConverterMock() + marshalizer := &mock.MarshalizerMock{} + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSent = true + return nil + }, + }, + transientPool, + &mock.StorerStub{}, + marshalizer, + nonceConverter, + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg( + process.NonceType, + nonceConverter.ToByteSlice(requestedNonce))) + + assert.Nil(t, err) + assert.True(t, wasResolved) + assert.True(t, wasSent) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolShouldRetFromStorageAndSend(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + + wasResolved := false + wasSend := false + + transientPool := &mock.TransientDataPoolStub{} + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + return headers + } + transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { + headersNonces := &mock.Uint64CacherStub{} + headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { + if u == requestedNonce { + return []byte("aaaa"), true + } + + return nil, false + } + + return headersNonces + } + + nonceConverter := mock.NewNonceHashConverterMock() + marshalizer := &mock.MarshalizerMock{} + + store := &mock.StorerStub{} + store.GetCalled = func(key []byte) (i []byte, e error) { + if bytes.Equal(key, []byte("aaaa")) { + wasResolved = true + return make([]byte, 0), nil + } + + return nil, nil + } + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + wasSend = true + return nil + }, + }, + transientPool, + store, + marshalizer, + nonceConverter, + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg( + process.NonceType, + nonceConverter.ToByteSlice(requestedNonce))) + + assert.Nil(t, err) + assert.True(t, wasResolved) + assert.True(t, wasSend) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolCheckRetErr(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + + errExpected := errors.New("expected error") + + transientPool := &mock.TransientDataPoolStub{} + transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + headers := &mock.ShardedDataStub{} + + headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + return headers + } + transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { + headersNonces := &mock.Uint64CacherStub{} + headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { + if u == requestedNonce { + return []byte("aaaa"), true + } + + return nil, false + } + + return headersNonces + } + + nonceConverter := mock.NewNonceHashConverterMock() + marshalizer := &mock.MarshalizerMock{} + + store := &mock.StorerStub{} + store.GetCalled = func(key []byte) (i []byte, e error) { + if bytes.Equal(key, []byte("aaaa")) { + return nil, errExpected + } + + return nil, nil + } + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + return nil + }, + }, + transientPool, + store, + marshalizer, + nonceConverter, + ) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg( + process.NonceType, + nonceConverter.ToByteSlice(requestedNonce))) + + assert.Equal(t, errExpected, err) +} + +//------- Requests + +func TestHeaderResolver_RequestDataFromHashShouldWork(t *testing.T) { + t.Parallel() + + buffRequested := []byte("aaaa") + + wasRequested := false + + nonceConverter := mock.NewNonceHashConverterMock() + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendOnRequestTopicCalled: func(rd *process.RequestData) error { + if bytes.Equal(rd.Value, buffRequested) { + wasRequested = true + } + + return nil + }, + }, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + nonceConverter, + ) + + assert.Nil(t, hdrRes.RequestDataFromHash(buffRequested)) + assert.True(t, wasRequested) +} + +func TestHeaderResolver_RequestDataFromNonceShouldWork(t *testing.T) { + t.Parallel() + + nonceRequested := uint64(67) + wasRequested := false + + nonceConverter := mock.NewNonceHashConverterMock() + + buffToExpect := nonceConverter.ToByteSlice(nonceRequested) + + hdrRes, _ := resolvers.NewHeaderResolver( + &mock.TopicResolverSenderStub{ + SendOnRequestTopicCalled: func(rd *process.RequestData) error { + if bytes.Equal(rd.Value, buffToExpect) { + wasRequested = true + } + return nil + }, + }, + createDataPool(), + &mock.StorerStub{}, + &mock.MarshalizerMock{}, + nonceConverter, + ) + + assert.Nil(t, hdrRes.RequestDataFromNonce(nonceRequested)) + assert.True(t, wasRequested) +} diff --git a/process/block/resolvers_test.go b/process/block/resolvers_test.go deleted file mode 100644 index b17e3a09018..00000000000 --- a/process/block/resolvers_test.go +++ /dev/null @@ -1,1046 +0,0 @@ -package block_test - -import ( - "bytes" - "errors" - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/process/block" - "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" - "github.com/stretchr/testify/assert" -) - -//------- headerResolver - -// NewHeaderResolver - -func TestNewHeaderResolver_NilResolverShouldErr(t *testing.T) { - t.Parallel() - - hdrRes, err := block.NewHeaderResolver( - nil, - &mock.TransientDataPoolStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, process.ErrNilResolver, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_NilTransientPoolShouldErr(t *testing.T) { - t.Parallel() - - hdrRes, err := block.NewHeaderResolver( - &mock.ResolverStub{}, - nil, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, process.ErrNilTransientPool, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_NilTransientHeadersPoolShouldErr(t *testing.T) { - t.Parallel() - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return nil - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, err := block.NewHeaderResolver( - &mock.ResolverStub{}, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, process.ErrNilHeadersDataPool, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_NilTransientHeadersNoncesPoolShouldErr(t *testing.T) { - t.Parallel() - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return nil - } - - hdrRes, err := block.NewHeaderResolver( - &mock.ResolverStub{}, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_NilHeadersStorageShouldErr(t *testing.T) { - t.Parallel() - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, err := block.NewHeaderResolver( - &mock.ResolverStub{}, - transientPool, - nil, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, process.ErrNilHeadersStorage, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, err := block.NewHeaderResolver( - &mock.ResolverStub{}, - transientPool, - &mock.StorerStub{}, - nil, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, process.ErrNilMarshalizer, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_NilNonceConverterShouldErr(t *testing.T) { - t.Parallel() - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, err := block.NewHeaderResolver( - &mock.ResolverStub{}, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - nil, - ) - - assert.Equal(t, process.ErrNilNonceConverter, err) - assert.Nil(t, hdrRes) -} - -func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { - t.Parallel() - - wasCalled := false - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - wasCalled = true - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, err := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.NotNil(t, hdrRes) - assert.Nil(t, err) - assert.True(t, wasCalled) -} - -// resolveHdrRequest - -func TestHeaderResolver_ResolveHdrRequestNilValueShouldErr(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{Type: process.NonceType, Value: nil}) - assert.Nil(t, buff) - assert.Equal(t, process.ErrNilValue, err) - -} - -func TestHeaderResolver_ResolveHdrRequestUnknownTypeShouldErr(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{Type: 254, Value: make([]byte, 0)}) - assert.Nil(t, buff) - assert.Equal(t, process.ErrResolveTypeUnknown, err) - -} - -func TestHeaderResolver_ResolveHdrRequestHashTypeFoundInHdrPoolShouldRetValue(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - requestedData := []byte("aaaa") - resolvedData := []byte("bbbb") - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(requestedData, key) { - return resolvedData, true - } - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - marshalizer := &mock.MarshalizerMock{} - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - marshalizer, - mock.NewNonceHashConverterMock(), - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{Type: process.HashType, Value: requestedData}) - assert.Nil(t, err) - - recoveredResolved := make([]byte, 0) - err = marshalizer.Unmarshal(&recoveredResolved, buff) - assert.Equal(t, resolvedData, recoveredResolved) -} - -func TestHeaderResolver_ResolveHdrRequestHashTypeFoundInHdrPoolMarshalizerFailsShouldErr(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - requestedData := []byte("aaaa") - resolvedData := []byte("bbbb") - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(requestedData, key) { - return resolvedData, true - } - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - marshalizer := &mock.MarshalizerStub{} - marshalizer.MarshalCalled = func(obj interface{}) (i []byte, e error) { - return nil, errors.New("MarshalizerMock generic error") - } - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - marshalizer, - mock.NewNonceHashConverterMock(), - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{Type: process.HashType, Value: requestedData}) - assert.Equal(t, "MarshalizerMock generic error", err.Error()) - assert.Nil(t, buff) -} - -func TestHeaderResolver_ResolveHdrRequestRetFromStorageShouldRetVal(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - requestedData := []byte("aaaa") - resolvedData := []byte("bbbb") - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - store := &mock.StorerStub{} - store.GetCalled = func(key []byte) (i []byte, e error) { - if bytes.Equal(key, requestedData) { - return resolvedData, nil - } - - return nil, nil - } - - marshalizer := &mock.MarshalizerMock{} - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - store, - marshalizer, - mock.NewNonceHashConverterMock(), - ) - - buff, _ := hdrRes.ResolveHdrRequest(process.RequestData{Type: process.HashType, Value: requestedData}) - assert.Equal(t, resolvedData, buff) -} - -func TestHeaderResolver_ResolveHdrRequestRetFromStorageCheckRetError(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - requestedData := []byte("aaaa") - resolvedData := []byte("bbbb") - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - store := &mock.StorerStub{} - store.GetCalled = func(key []byte) (i []byte, e error) { - if bytes.Equal(key, requestedData) { - return resolvedData, errors.New("just checking output error") - } - - return nil, nil - } - - marshalizer := &mock.MarshalizerMock{} - marshalizer.Fail = true - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - store, - marshalizer, - mock.NewNonceHashConverterMock(), - ) - - _, err := hdrRes.ResolveHdrRequest(process.RequestData{Type: process.HashType, Value: requestedData}) - assert.Equal(t, "just checking output error", err.Error()) -} - -func TestHeaderResolver_ResolveHdrRequestNonceTypeInvalidSliceShouldErr(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{Type: process.NonceType, Value: []byte("aaa")}) - assert.Nil(t, buff) - assert.Equal(t, process.ErrInvalidNonceByteSlice, err) -} - -func TestHeaderResolver_ResolveHdrRequestNonceTypeNotFoundInHdrNoncePoolShouldRetNil(t *testing.T) { - t.Parallel() - - requestedNonce := uint64(67) - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - headersNonces := &mock.Uint64CacherStub{} - headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { - return nil, false - } - - return headersNonces - } - - nonceConverter := mock.NewNonceHashConverterMock() - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - nonceConverter, - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{ - Type: process.NonceType, - Value: nonceConverter.ToByteSlice(requestedNonce)}) - assert.Nil(t, buff) - assert.Nil(t, err) - -} - -func TestHeaderResolver_ResolveHdrRequestNonceTypeFoundInHdrNoncePoolShouldRetFromPool(t *testing.T) { - t.Parallel() - - requestedNonce := uint64(67) - resolvedData := []byte("bbbb") - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(key, []byte("aaaa")) { - return resolvedData, true - } - - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - headersNonces := &mock.Uint64CacherStub{} - headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { - if u == requestedNonce { - return []byte("aaaa"), true - } - - return nil, false - } - - return headersNonces - } - - nonceConverter := mock.NewNonceHashConverterMock() - marshalizer := &mock.MarshalizerMock{} - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - &mock.StorerStub{}, - marshalizer, - nonceConverter, - ) - - buff, err := hdrRes.ResolveHdrRequest(process.RequestData{ - Type: process.NonceType, - Value: nonceConverter.ToByteSlice(requestedNonce)}) - assert.Nil(t, err) - - recoveredResolved := make([]byte, 0) - err = marshalizer.Unmarshal(&recoveredResolved, buff) - assert.Equal(t, resolvedData, recoveredResolved) -} - -func TestHeaderResolver_ResolveHdrRequestNonceTypeFoundInHdrNoncePoolShouldRetFromStorage(t *testing.T) { - t.Parallel() - - requestedNonce := uint64(67) - resolvedData := []byte("bbbb") - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - headersNonces := &mock.Uint64CacherStub{} - headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { - if u == requestedNonce { - return []byte("aaaa"), true - } - - return nil, false - } - - return headersNonces - } - - nonceConverter := mock.NewNonceHashConverterMock() - marshalizer := &mock.MarshalizerMock{} - - store := &mock.StorerStub{} - store.GetCalled = func(key []byte) (i []byte, e error) { - if bytes.Equal(key, []byte("aaaa")) { - return resolvedData, nil - } - - return nil, nil - } - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - store, - marshalizer, - nonceConverter, - ) - - buff, _ := hdrRes.ResolveHdrRequest(process.RequestData{ - Type: process.NonceType, - Value: nonceConverter.ToByteSlice(requestedNonce)}) - assert.Equal(t, resolvedData, buff) -} - -func TestHeaderResolver_ResolveHdrRequestNonceTypeFoundInHdrNoncePoolCheckRetErr(t *testing.T) { - t.Parallel() - - requestedNonce := uint64(67) - resolvedData := []byte("bbbb") - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - headers := &mock.ShardedDataStub{} - - headers.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false - } - - return headers - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - headersNonces := &mock.Uint64CacherStub{} - headersNonces.GetCalled = func(u uint64) (i []byte, b bool) { - if u == requestedNonce { - return []byte("aaaa"), true - } - - return nil, false - } - - return headersNonces - } - - nonceConverter := mock.NewNonceHashConverterMock() - marshalizer := &mock.MarshalizerMock{} - - store := &mock.StorerStub{} - store.GetCalled = func(key []byte) (i []byte, e error) { - if bytes.Equal(key, []byte("aaaa")) { - return resolvedData, errors.New("just checking output error") - } - - return nil, nil - } - - hdrRes, _ := block.NewHeaderResolver( - topicResolver, - transientPool, - store, - marshalizer, - nonceConverter, - ) - - _, err := hdrRes.ResolveHdrRequest(process.RequestData{ - Type: process.NonceType, - Value: nonceConverter.ToByteSlice(requestedNonce)}) - assert.Equal(t, "just checking output error", err.Error()) -} - -// Requests - -func TestHeaderResolver_RequestHdrFromHashShouldWork(t *testing.T) { - t.Parallel() - - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - - requested := process.RequestData{} - - res.RequestDataCalled = func(rd process.RequestData) error { - requested = rd - return nil - } - - buffRequested := []byte("aaaa") - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - nonceConverter := mock.NewNonceHashConverterMock() - - hdrRes, _ := block.NewHeaderResolver( - res, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - nonceConverter, - ) - - assert.Nil(t, hdrRes.RequestHeaderFromHash(buffRequested)) - assert.Equal(t, process.RequestData{ - Type: process.HashType, - Value: buffRequested, - }, requested) -} - -func TestHeaderResolver_RequestHdrFromNonceShouldWork(t *testing.T) { - t.Parallel() - - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - - requested := process.RequestData{} - - res.RequestDataCalled = func(rd process.RequestData) error { - requested = rd - return nil - } - - transientPool := &mock.TransientDataPoolStub{} - transientPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - transientPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - - nonceConverter := mock.NewNonceHashConverterMock() - - hdrRes, _ := block.NewHeaderResolver( - res, - transientPool, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - nonceConverter, - ) - - buffToExpect := nonceConverter.ToByteSlice(67) - - assert.Nil(t, hdrRes.RequestHeaderFromNonce(67)) - assert.Equal(t, process.RequestData{ - Type: process.NonceType, - Value: buffToExpect, - }, requested) -} - -//------- genericBlockBodyResolver - -// NewBlockBodyResolver - -func TestNewGenericBlockBodyResolver_NilResolverShouldErr(t *testing.T) { - t.Parallel() - - gbbRes, err := block.NewGenericBlockBodyResolver( - nil, - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - ) - - assert.Equal(t, process.ErrNilResolver, err) - assert.Nil(t, gbbRes) -} - -func TestNewGenericBlockBodyResolver_NilBlockBodyPoolShouldErr(t *testing.T) { - t.Parallel() - - gbbRes, err := block.NewGenericBlockBodyResolver( - &mock.ResolverStub{}, - nil, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - ) - - assert.Equal(t, process.ErrNilBlockBodyPool, err) - assert.Nil(t, gbbRes) -} - -func TestNewGenericBlockBodyResolver_NilBlockBodyStorageShouldErr(t *testing.T) { - t.Parallel() - - gbbRes, err := block.NewGenericBlockBodyResolver( - &mock.ResolverStub{}, - &mock.CacherStub{}, - nil, - &mock.MarshalizerMock{}, - ) - - assert.Equal(t, process.ErrNilBlockBodyStorage, err) - assert.Nil(t, gbbRes) -} - -func TestNewGenericBlockBodyResolver_NilBlockMArshalizerShouldErr(t *testing.T) { - t.Parallel() - - gbbRes, err := block.NewGenericBlockBodyResolver( - &mock.ResolverStub{}, - &mock.CacherStub{}, - &mock.StorerStub{}, - nil, - ) - - assert.Equal(t, process.ErrNilMarshalizer, err) - assert.Nil(t, gbbRes) -} - -func TestNewGenericBlockBodyResolver_OkValsShouldWork(t *testing.T) { - t.Parallel() - - wasCalled := false - - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - wasCalled = true - } - - gbbRes, err := block.NewGenericBlockBodyResolver( - res, - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - ) - - assert.Nil(t, err) - assert.NotNil(t, gbbRes) - assert.True(t, wasCalled) -} - -// resolveBlockBodyRequest - -func TestGenericBlockBodyResolver_ResolveBlockBodyRequestWrongTypeShouldErr(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - gbbRes, _ := block.NewGenericBlockBodyResolver( - topicResolver, - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - ) - - buff, err := gbbRes.ResolveBlockBodyRequest(process.RequestData{Type: process.NonceType, Value: make([]byte, 0)}) - assert.Nil(t, buff) - assert.Equal(t, process.ErrResolveNotHashType, err) - -} - -func TestGenericBlockBodyResolver_ResolveBlockBodyRequestNilValueShouldErr(t *testing.T) { - t.Parallel() - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - gbbRes, _ := block.NewGenericBlockBodyResolver( - topicResolver, - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - ) - - buff, err := gbbRes.ResolveBlockBodyRequest(process.RequestData{Type: process.HashType, Value: nil}) - assert.Nil(t, buff) - assert.Equal(t, process.ErrNilValue, err) - -} - -func TestGenericBlockBodyResolver_ResolveBlockBodyRequestFoundInPoolShouldRetVal(t *testing.T) { - t.Parallel() - - requestedBuff := []byte("aaa") - resolvedBuff := []byte("bbb") - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - cache := &mock.CacherStub{} - cache.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(key, requestedBuff) { - return resolvedBuff, true - } - - return nil, false - } - - marshalizer := &mock.MarshalizerMock{} - - gbbRes, _ := block.NewGenericBlockBodyResolver( - topicResolver, - cache, - &mock.StorerStub{}, - marshalizer, - ) - - buff, err := gbbRes.ResolveBlockBodyRequest(process.RequestData{ - Type: process.HashType, - Value: requestedBuff}) - - buffExpected, _ := marshalizer.Marshal(resolvedBuff) - - assert.Nil(t, err) - assert.Equal(t, buffExpected, buff) - -} - -func TestGenericBlockBodyResolver_ResolveBlockBodyRequestFoundInPoolMarshalizerFailShouldErr(t *testing.T) { - t.Parallel() - - requestedBuff := []byte("aaa") - resolvedBuff := []byte("bbb") - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - cache := &mock.CacherStub{} - cache.GetCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(key, requestedBuff) { - return resolvedBuff, true - } - - return nil, false - } - - marshalizer := &mock.MarshalizerStub{} - marshalizer.MarshalCalled = func(obj interface{}) (i []byte, e error) { - return nil, errors.New("MarshalizerMock generic error") - } - - gbbRes, _ := block.NewGenericBlockBodyResolver( - topicResolver, - cache, - &mock.StorerStub{}, - marshalizer, - ) - - buff, err := gbbRes.ResolveBlockBodyRequest(process.RequestData{ - Type: process.HashType, - Value: requestedBuff}) - - assert.Nil(t, buff) - assert.Equal(t, "MarshalizerMock generic error", err.Error()) - -} - -func TestGenericBlockBodyResolver_ResolveBlockBodyRequestNotFoundInPoolShouldRetFromStorage(t *testing.T) { - t.Parallel() - - requestedBuff := []byte("aaa") - resolvedBuff := []byte("bbb") - - topicResolver := &mock.ResolverStub{} - topicResolver.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - } - - cache := &mock.CacherStub{} - cache.GetCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false - } - - store := &mock.StorerStub{} - store.GetCalled = func(key []byte) (i []byte, e error) { - return resolvedBuff, errors.New("just checking output error") - } - - marshalizer := &mock.MarshalizerMock{} - - gbbRes, _ := block.NewGenericBlockBodyResolver( - topicResolver, - cache, - store, - marshalizer, - ) - - buff, err := gbbRes.ResolveBlockBodyRequest(process.RequestData{ - Type: process.HashType, - Value: requestedBuff}) - - assert.Equal(t, resolvedBuff, buff) - assert.Equal(t, "just checking output error", err.Error()) - -} - -// Requests - -func TestBlockBodyResolver_RequestBlockBodyFromHashShouldWork(t *testing.T) { - t.Parallel() - - wasCalled := false - - buffRequested := []byte("aaaa") - - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - wasCalled = true - } - - requested := process.RequestData{} - - res.RequestDataCalled = func(rd process.RequestData) error { - requested = rd - return nil - } - - gbbRes, err := block.NewGenericBlockBodyResolver( - res, - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - ) - - assert.Nil(t, err) - assert.NotNil(t, gbbRes) - assert.True(t, wasCalled) - - assert.Nil(t, gbbRes.RequestBlockBodyFromHash(buffRequested)) - assert.Equal(t, process.RequestData{ - Type: process.HashType, - Value: buffRequested, - }, requested) -} diff --git a/process/errors.go b/process/errors.go index 37fe42dcf1d..43e46ec9300 100644 --- a/process/errors.go +++ b/process/errors.go @@ -4,15 +4,18 @@ import ( "errors" ) +// ErrNilMessage signals that a nil message has been received +var ErrNilMessage = errors.New("nil message") + +// ErrNoConnectedPeerToSendRequest signals that the connected peers list is empty and can not send request +var ErrNoConnectedPeerToSendRequest = errors.New("connected peers list is empty. Can not send request") + // ErrNilAccountsAdapter defines the error when trying to use a nil AccountsAddapter var ErrNilAccountsAdapter = errors.New("nil AccountsAdapter") // ErrNilHasher signals that an operation has been attempted to or with a nil hasher implementation var ErrNilHasher = errors.New("nil Hasher") -// ErrNilPublicKeysSelector signals that a nil public keys selector has been provided -var ErrNilPublicKeysSelector = errors.New("nil public keys selector") - // ErrNilAddressConverter signals that an operation has been attempted to or with a nil AddressConverter implementation var ErrNilAddressConverter = errors.New("nil AddressConverter") @@ -91,9 +94,6 @@ var ErrWrongNonceInBlock = errors.New("wrong nonce in block") // ErrInvalidBlockHash signals the hash of the block is not matching with the previous one var ErrInvalidBlockHash = errors.New("invalid block hash") -// ErrInvalidBlockSignature signals the signature of the block is not valid -var ErrInvalidBlockSignature = errors.New("invalid block signature") - // ErrMissingTransaction signals that one transaction is missing var ErrMissingTransaction = errors.New("missing transaction") @@ -130,15 +130,6 @@ var ErrNilRounder = errors.New("nil Rounder") // ErrNilMessenger signals that a nil Messenger object was provided var ErrNilMessenger = errors.New("nil Messenger") -// ErrNilNewer signals that a nil Newer object was provided -var ErrNilNewer = errors.New("nil Newer") - -// ErrRegisteringValidator signals that a registration validator occur -var ErrRegisteringValidator = errors.New("error while registering validator") - -// ErrNilInterceptor signals that a nil Interceptor has been provided -var ErrNilInterceptor = errors.New("nil Interceptor") - // ErrNilTxDataPool signals that a nil transaction pool has been provided var ErrNilTxDataPool = errors.New("nil transaction data pool") @@ -190,12 +181,6 @@ var ErrNotImplementedBlockProcessingType = errors.New("not implemented block pro // ErrNilDataToProcess signals that nil data was provided var ErrNilDataToProcess = errors.New("nil data to process") -// ErrBadInterceptorTopicImplementation signals that a bad interceptor-topic implementation occurred -var ErrBadInterceptorTopicImplementation = errors.New("bad interceptor-topic implementation") - -// ErrNilBlockBody signals that a nil block body has been provided -var ErrNilBlockBody = errors.New("nil block body") - // ErrNilTransientPool signals that an operation has been attempted to or with a nil transient pool of data var ErrNilTransientPool = errors.New("nil transient pool") @@ -205,17 +190,8 @@ var ErrNilTxStorage = errors.New("nil transaction storage") // ErrNilHeadersStorage signals that a nil header storage has been provided var ErrNilHeadersStorage = errors.New("nil headers storage") -// ErrNilTopic signals that a nil topic has been provided/fetched -var ErrNilTopic = errors.New("nil topic") - -// ErrResolveRequestAlreadyAssigned signals that ResolveRequest is not nil for a particular topic -var ErrResolveRequestAlreadyAssigned = errors.New("resolve request func has already been assigned for this topic") - -// ErrTopicNotWiredToMessenger signals that a call to a not-correctly-instantiated topic has been made -var ErrTopicNotWiredToMessenger = errors.New("topic has not been wired to a p2p.Messenger implementation") - -// ErrNilResolver signals that a nil resolver object has been provided -var ErrNilResolver = errors.New("nil resolver") +// ErrNilResolverSender signals that a nil resolver sender object has been provided +var ErrNilResolverSender = errors.New("nil resolver sender") // ErrNilNonceConverter signals that a nil nonce converter has been provided var ErrNilNonceConverter = errors.New("nil nonce converter") diff --git a/process/factory/containers/objectsContainer.go b/process/factory/containers/objectsContainer.go new file mode 100644 index 00000000000..c8039e57dfb --- /dev/null +++ b/process/factory/containers/objectsContainer.go @@ -0,0 +1,78 @@ +package containers + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// ObjectsContainer is a holder organized by type +type ObjectsContainer struct { + mutex sync.RWMutex + objects map[string]interface{} +} + +// NewObjectsContainer will create a new instance of a container +func NewObjectsContainer() *ObjectsContainer { + return &ObjectsContainer{ + mutex: sync.RWMutex{}, + objects: make(map[string]interface{}), + } +} + +// Get returns the object stored at a certain key. +// Returns an error if the element does not exist +func (oc *ObjectsContainer) Get(key string) (interface{}, error) { + oc.mutex.RLock() + resolver, ok := oc.objects[key] + oc.mutex.RUnlock() + if !ok { + return nil, process.ErrInvalidContainerKey + } + return resolver, nil +} + +// Add will add an object at a given key. Returns +// an error if the element already exists +func (oc *ObjectsContainer) Add(key string, val interface{}) error { + if val == nil { + return process.ErrNilContainerElement + } + oc.mutex.Lock() + defer oc.mutex.Unlock() + + _, ok := oc.objects[key] + + if ok { + return process.ErrContainerKeyAlreadyExists + } + + oc.objects[key] = val + return nil +} + +// Replace will add (or replace if it already exists) an object at a given key +func (oc *ObjectsContainer) Replace(key string, val interface{}) error { + if val == nil { + return process.ErrNilContainerElement + } + oc.mutex.Lock() + oc.objects[key] = val + oc.mutex.Unlock() + return nil +} + +// Remove will remove an object at a given key +func (oc *ObjectsContainer) Remove(key string) { + oc.mutex.Lock() + delete(oc.objects, key) + oc.mutex.Unlock() +} + +// Len returns the length of the added objects +func (oc *ObjectsContainer) Len() int { + oc.mutex.RLock() + l := len(oc.objects) + oc.mutex.RUnlock() + return l +} diff --git a/process/factory/containers/objectsContainer_test.go b/process/factory/containers/objectsContainer_test.go new file mode 100644 index 00000000000..686fc250736 --- /dev/null +++ b/process/factory/containers/objectsContainer_test.go @@ -0,0 +1,156 @@ +package containers_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/containers" + "github.com/stretchr/testify/assert" +) + +func TestNewObjectsContainer_ShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + assert.NotNil(t, c) +} + +//------- Add + +func TestObjectsContainer_AddAlreadyExistingShouldErr(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + _ = c.Add("key", "val") + err := c.Add("key", "value") + + assert.Equal(t, process.ErrContainerKeyAlreadyExists, err) +} + +func TestObjectsContainer_AddNilShouldErr(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + err := c.Add("key", nil) + + assert.Equal(t, process.ErrNilContainerElement, err) +} + +func TestObjectsContainer_AddShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + err := c.Add("key", "value") + + assert.Nil(t, err) +} + +//------- Get + +func TestObjectsContainer_GetNotFoundShouldErr(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + key := "key" + keyNotFound := "key not found" + val := "value" + + _ = c.Add(key, val) + valRecovered, err := c.Get(keyNotFound) + + assert.Nil(t, valRecovered) + assert.Equal(t, process.ErrInvalidContainerKey, err) +} + +func TestObjectsContainer_GetShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + key := "key" + val := "value" + + _ = c.Add(key, val) + valRecovered, err := c.Get(key) + + assert.Equal(t, val, valRecovered) + assert.Nil(t, err) +} + +//------- Replace + +func TestObjectsContainer_ReplaceNilValueShouldErrAndNotModify(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + key := "key" + val := "value" + + _ = c.Add(key, val) + err := c.Replace(key, nil) + + valRecovered, _ := c.Get(key) + + assert.Equal(t, process.ErrNilContainerElement, err) + assert.Equal(t, val, valRecovered) +} + +func TestObjectsContainer_ReplaceShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + key := "key" + val := "value" + val2 := "value2" + + _ = c.Add(key, val) + err := c.Replace(key, val2) + + valRecovered, _ := c.Get(key) + + assert.Equal(t, val2, valRecovered) + assert.Nil(t, err) +} + +//------- Remove + +func TestObjectsContainer_RemoveShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + key := "key" + val := "value" + + _ = c.Add(key, val) + c.Remove(key) + + valRecovered, err := c.Get(key) + + assert.Nil(t, valRecovered) + assert.Equal(t, process.ErrInvalidContainerKey, err) +} + +//------- Len + +func TestObjectsContainer_LenShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewObjectsContainer() + + _ = c.Add("key1", "val") + assert.Equal(t, 1, c.Len()) + + _ = c.Add("key2", "val") + assert.Equal(t, 2, c.Len()) + + c.Remove("key1") + assert.Equal(t, 1, c.Len()) +} diff --git a/process/factory/containers/resolversContainer.go b/process/factory/containers/resolversContainer.go new file mode 100644 index 00000000000..b0625f961a9 --- /dev/null +++ b/process/factory/containers/resolversContainer.go @@ -0,0 +1,78 @@ +package containers + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// ResolversContainer is a resolvers holder organized by type +type ResolversContainer struct { + mutex sync.RWMutex + objects map[string]process.Resolver +} + +// NewResolversContainer will create a new instance of a container +func NewResolversContainer() *ResolversContainer { + return &ResolversContainer{ + mutex: sync.RWMutex{}, + objects: make(map[string]process.Resolver), + } +} + +// Get returns the object stored at a certain key. +// Returns an error if the element does not exist +func (rc *ResolversContainer) Get(key string) (process.Resolver, error) { + rc.mutex.RLock() + resolver, ok := rc.objects[key] + rc.mutex.RUnlock() + if !ok { + return nil, process.ErrInvalidContainerKey + } + return resolver, nil +} + +// Add will add an object at a given key. Returns +// an error if the element already exists +func (rc *ResolversContainer) Add(key string, resolver process.Resolver) error { + if resolver == nil { + return process.ErrNilContainerElement + } + rc.mutex.Lock() + defer rc.mutex.Unlock() + + _, ok := rc.objects[key] + + if ok { + return process.ErrContainerKeyAlreadyExists + } + + rc.objects[key] = resolver + return nil +} + +// Replace will add (or replace if it already exists) an object at a given key +func (rc *ResolversContainer) Replace(key string, resolver process.Resolver) error { + if resolver == nil { + return process.ErrNilContainerElement + } + rc.mutex.Lock() + rc.objects[key] = resolver + rc.mutex.Unlock() + return nil +} + +// Remove will remove an object at a given key +func (rc *ResolversContainer) Remove(key string) { + rc.mutex.Lock() + delete(rc.objects, key) + rc.mutex.Unlock() +} + +// Len returns the length of the added objects +func (rc *ResolversContainer) Len() int { + rc.mutex.RLock() + l := len(rc.objects) + rc.mutex.RUnlock() + return l +} diff --git a/process/factory/containers/resolversContainer_test.go b/process/factory/containers/resolversContainer_test.go new file mode 100644 index 00000000000..7123cebffa5 --- /dev/null +++ b/process/factory/containers/resolversContainer_test.go @@ -0,0 +1,157 @@ +package containers_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory/containers" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewResolversContainer_ShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + assert.NotNil(t, c) +} + +//------- Add + +func TestResolversContainer_AddAlreadyExistingShouldErr(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + _ = c.Add("key", &mock.ResolverStub{}) + err := c.Add("key", &mock.ResolverStub{}) + + assert.Equal(t, process.ErrContainerKeyAlreadyExists, err) +} + +func TestResolversContainer_AddNilShouldErr(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + err := c.Add("key", nil) + + assert.Equal(t, process.ErrNilContainerElement, err) +} + +func TestResolversContainer_AddShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + err := c.Add("key", &mock.ResolverStub{}) + + assert.Nil(t, err) +} + +//------- Get + +func TestResolversContainer_GetNotFoundShouldErr(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + key := "key" + keyNotFound := "key not found" + val := &mock.ResolverStub{} + + _ = c.Add(key, val) + valRecovered, err := c.Get(keyNotFound) + + assert.Nil(t, valRecovered) + assert.Equal(t, process.ErrInvalidContainerKey, err) +} + +func TestResolversContainer_GetShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + key := "key" + val := &mock.ResolverStub{} + + _ = c.Add(key, val) + valRecovered, err := c.Get(key) + + assert.True(t, val == valRecovered) + assert.Nil(t, err) +} + +//------- Replace + +func TestResolversContainer_ReplaceNilValueShouldErrAndNotModify(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + key := "key" + val := &mock.ResolverStub{} + + _ = c.Add(key, val) + err := c.Replace(key, nil) + + valRecovered, _ := c.Get(key) + + assert.Equal(t, process.ErrNilContainerElement, err) + assert.Equal(t, val, valRecovered) +} + +func TestResolversContainer_ReplaceShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + key := "key" + val := &mock.ResolverStub{} + val2 := &mock.ResolverStub{} + + _ = c.Add(key, val) + err := c.Replace(key, val2) + + valRecovered, _ := c.Get(key) + + assert.True(t, val2 == valRecovered) + assert.Nil(t, err) +} + +//------- Remove + +func TestResolversContainer_RemoveShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + key := "key" + val := &mock.ResolverStub{} + + _ = c.Add(key, val) + c.Remove(key) + + valRecovered, err := c.Get(key) + + assert.Nil(t, valRecovered) + assert.Equal(t, process.ErrInvalidContainerKey, err) +} + +//------- Len + +func TestResolversContainer_LenShouldWork(t *testing.T) { + t.Parallel() + + c := containers.NewResolversContainer() + + _ = c.Add("key1", &mock.ResolverStub{}) + assert.Equal(t, 1, c.Len()) + + _ = c.Add("key2", &mock.ResolverStub{}) + assert.Equal(t, 2, c.Len()) + + c.Remove("key1") + assert.Equal(t, 1, c.Len()) +} diff --git a/process/factory/export_test.go b/process/factory/export_test.go deleted file mode 100644 index 58d472ac0aa..00000000000 --- a/process/factory/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package factory - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" -) - -func (p *processorsCreator) SetMessenger(messenger p2p.Messenger) { - p.messenger = messenger -} diff --git a/process/factory/factory.go b/process/factory/factory.go deleted file mode 100644 index d88cdaad7fa..00000000000 --- a/process/factory/factory.go +++ /dev/null @@ -1,438 +0,0 @@ -package factory - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/crypto" - "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" - "github.com/ElrondNetwork/elrond-go-sandbox/data/state" - "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters" - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/process/block" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" - "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/sharding" -) - -type topicName string - -const ( - // TransactionTopic is the topic used for sharing transactions - TransactionTopic topicName = "transactions" - // HeadersTopic is the topic used for sharing block headers - HeadersTopic topicName = "headers" - // TxBlockBodyTopic is the topic used for sharing transactions block bodies - TxBlockBodyTopic topicName = "txBlockBodies" - // PeerChBodyTopic is used for sharing peer change block bodies - PeerChBodyTopic topicName = "peerChangeBlockBodies" - // StateBodyTopic is used for sharing state block bodies - StateBodyTopic topicName = "stateBlockBodies" -) - -type processorsCreator struct { - interceptorContainer process.InterceptorContainer - resolverContainer process.ResolverContainer - - messenger p2p.Messenger - blockchain *blockchain.BlockChain - dataPool data.TransientDataHolder - shardCoordinator sharding.ShardCoordinator - addrConverter state.AddressConverter - hasher hashing.Hasher - marshalizer marshal.Marshalizer - multiSigner crypto.MultiSigner - singleSigner crypto.SingleSigner - keyGen crypto.KeyGenerator - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter -} - -// ProcessorsCreatorConfig is the struct containing the needed params to be -// provided when initialising a new processorsCreator -type ProcessorsCreatorConfig struct { - InterceptorContainer process.InterceptorContainer - ResolverContainer process.ResolverContainer - - Messenger p2p.Messenger - Blockchain *blockchain.BlockChain - DataPool data.TransientDataHolder - ShardCoordinator sharding.ShardCoordinator - AddrConverter state.AddressConverter - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - MultiSigner crypto.MultiSigner - SingleSigner crypto.SingleSigner - KeyGen crypto.KeyGenerator - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter -} - -// NewProcessorsCreator is responsible for creating a new processorsCreator object -func NewProcessorsCreator(config ProcessorsCreatorConfig) (*processorsCreator, error) { - err := validateRequiredProcessCreatorParams(config) - if err != nil { - return nil, err - } - return &processorsCreator{ - interceptorContainer: config.InterceptorContainer, - resolverContainer: config.ResolverContainer, - messenger: config.Messenger, - blockchain: config.Blockchain, - dataPool: config.DataPool, - shardCoordinator: config.ShardCoordinator, - addrConverter: config.AddrConverter, - hasher: config.Hasher, - marshalizer: config.Marshalizer, - multiSigner: config.MultiSigner, - singleSigner: config.SingleSigner, - keyGen: config.KeyGen, - uint64ByteSliceConverter: config.Uint64ByteSliceConverter, - }, nil -} - -// CreateInterceptors creates the interceptors and initializes the interceptor container -func (p *processorsCreator) CreateInterceptors() error { - err := p.createTxInterceptor() - if err != nil { - return err - } - - err = p.createHdrInterceptor() - if err != nil { - return err - } - - err = p.createTxBlockBodyInterceptor() - if err != nil { - return err - } - - err = p.createPeerChBlockBodyInterceptor() - if err != nil { - return err - } - - err = p.createStateBlockBodyInterceptor() - if err != nil { - return err - } - - return nil -} - -// CreateResolvers creates the resolvers and initializes the resolver container -func (p *processorsCreator) CreateResolvers() error { - err := p.createTxResolver() - if err != nil { - return err - } - - err = p.createHdrResolver() - if err != nil { - return err - } - - err = p.createTxBlockBodyResolver() - if err != nil { - return err - } - - err = p.createPeerChBlockBodyResolver() - if err != nil { - return err - } - - err = p.createStateBlockBodyResolver() - if err != nil { - return err - } - - return nil -} - -// InterceptorContainer is a getter for interceptorContainer property -func (p *processorsCreator) InterceptorContainer() process.InterceptorContainer { - return p.interceptorContainer -} - -// ResolverContainer is a getter for resolverContainer property -func (p *processorsCreator) ResolverContainer() process.ResolverContainer { - return p.resolverContainer -} - -func (p *processorsCreator) createTxInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(TransactionTopic), p.messenger, transaction.NewInterceptedTransaction(p.singleSigner)) - if err != nil { - return err - } - - txStorer := p.blockchain.GetStorer(blockchain.TransactionUnit) - - txInterceptor, err := transaction.NewTxInterceptor( - intercept, - p.dataPool.Transactions(), - txStorer, - p.addrConverter, - p.hasher, - p.singleSigner, - p.keyGen, - p.shardCoordinator) - - if err != nil { - return err - } - - err = p.interceptorContainer.Add(string(TransactionTopic), txInterceptor) - return err -} - -func (p *processorsCreator) createHdrInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor( - string(HeadersTopic), - p.messenger, - block.NewInterceptedHeader(p.multiSigner), - ) - if err != nil { - return err - } - - headerStorer := p.blockchain.GetStorer(blockchain.BlockHeaderUnit) - - hdrInterceptor, err := block.NewHeaderInterceptor( - intercept, - p.dataPool.Headers(), - p.dataPool.HeadersNonces(), - headerStorer, - p.multiSigner, - p.hasher, - p.shardCoordinator, - ) - - if err != nil { - return err - } - - err = p.interceptorContainer.Add(string(HeadersTopic), hdrInterceptor) - return err -} - -func (p *processorsCreator) createTxBlockBodyInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(TxBlockBodyTopic), p.messenger, block.NewInterceptedTxBlockBody()) - if err != nil { - return err - } - - txBlockBodyStorer := p.blockchain.GetStorer(blockchain.TxBlockBodyUnit) - - txBlockBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( - intercept, - p.dataPool.TxBlocks(), - txBlockBodyStorer, - p.hasher, - p.shardCoordinator, - ) - - if err != nil { - return err - } - - err = p.interceptorContainer.Add(string(TxBlockBodyTopic), txBlockBodyInterceptor) - return err -} - -func (p *processorsCreator) createPeerChBlockBodyInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(PeerChBodyTopic), p.messenger, block.NewInterceptedPeerBlockBody()) - if err != nil { - return err - } - - peerBlockBodyStorer := p.blockchain.GetStorer(blockchain.PeerBlockBodyUnit) - - peerChBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( - intercept, - p.dataPool.PeerChangesBlocks(), - peerBlockBodyStorer, - p.hasher, - p.shardCoordinator, - ) - - if err != nil { - return err - } - - err = p.interceptorContainer.Add(string(PeerChBodyTopic), peerChBodyInterceptor) - return err -} - -func (p *processorsCreator) createStateBlockBodyInterceptor() error { - intercept, err := interceptor.NewTopicInterceptor(string(StateBodyTopic), p.messenger, block.NewInterceptedStateBlockBody()) - if err != nil { - return err - } - - stateBlockBodyStorer := p.blockchain.GetStorer(blockchain.StateBlockBodyUnit) - - stateBodyInterceptor, err := block.NewGenericBlockBodyInterceptor( - intercept, - p.dataPool.StateBlocks(), - stateBlockBodyStorer, - p.hasher, - p.shardCoordinator, - ) - - if err != nil { - return err - } - - err = p.interceptorContainer.Add(string(StateBodyTopic), stateBodyInterceptor) - return err -} - -func (p *processorsCreator) createTxResolver() error { - resolve, err := resolver.NewTopicResolver(string(TransactionTopic), p.messenger, p.marshalizer) - if err != nil { - return err - } - - txResolver, err := transaction.NewTxResolver( - resolve, - p.dataPool.Transactions(), - p.blockchain.GetStorer(blockchain.TransactionUnit), - p.marshalizer) - - if err != nil { - return err - } - - err = p.resolverContainer.Add(string(TransactionTopic), txResolver) - return err -} - -func (p *processorsCreator) createHdrResolver() error { - resolve, err := resolver.NewTopicResolver(string(HeadersTopic), p.messenger, p.marshalizer) - if err != nil { - return err - } - - hdrResolver, err := block.NewHeaderResolver( - resolve, - p.dataPool, - p.blockchain.GetStorer(blockchain.BlockHeaderUnit), - p.marshalizer, - p.uint64ByteSliceConverter) - - if err != nil { - return err - } - - err = p.resolverContainer.Add(string(HeadersTopic), hdrResolver) - return err -} - -func (p *processorsCreator) createTxBlockBodyResolver() error { - resolve, err := resolver.NewTopicResolver(string(TxBlockBodyTopic), p.messenger, p.marshalizer) - if err != nil { - return err - } - - txBlkResolver, err := block.NewGenericBlockBodyResolver( - resolve, - p.dataPool.TxBlocks(), - p.blockchain.GetStorer(blockchain.TxBlockBodyUnit), - p.marshalizer) - - if err != nil { - return err - } - - err = p.resolverContainer.Add(string(TxBlockBodyTopic), txBlkResolver) - return err -} - -func (p *processorsCreator) createPeerChBlockBodyResolver() error { - resolve, err := resolver.NewTopicResolver(string(PeerChBodyTopic), p.messenger, p.marshalizer) - if err != nil { - return err - } - - peerChBlkResolver, err := block.NewGenericBlockBodyResolver( - resolve, - p.dataPool.PeerChangesBlocks(), - p.blockchain.GetStorer(blockchain.PeerBlockBodyUnit), - p.marshalizer) - - if err != nil { - return err - } - - err = p.resolverContainer.Add(string(PeerChBodyTopic), peerChBlkResolver) - return err -} - -func (p *processorsCreator) createStateBlockBodyResolver() error { - resolve, err := resolver.NewTopicResolver(string(StateBodyTopic), p.messenger, p.marshalizer) - if err != nil { - return err - } - - stateBlkResolver, err := block.NewGenericBlockBodyResolver( - resolve, - p.dataPool.StateBlocks(), - p.blockchain.GetStorer(blockchain.StateBlockBodyUnit), - p.marshalizer) - - if err != nil { - return err - } - - err = p.resolverContainer.Add(string(StateBodyTopic), stateBlkResolver) - return err -} - -func validateRequiredProcessCreatorParams(config ProcessorsCreatorConfig) error { - if config.InterceptorContainer == nil { - return process.ErrNilInterceptorContainer - } - if config.ResolverContainer == nil { - return process.ErrNilResolverContainer - } - if config.Messenger == nil { - return process.ErrNilMessenger - } - if config.Blockchain == nil { - return process.ErrNilBlockChain - } - if config.DataPool == nil { - return process.ErrNilDataPoolHolder - } - if config.ShardCoordinator == nil { - return process.ErrNilShardCoordinator - } - if config.AddrConverter == nil { - return process.ErrNilAddressConverter - } - if config.Hasher == nil { - return process.ErrNilHasher - } - if config.Marshalizer == nil { - return process.ErrNilMarshalizer - } - - if config.SingleSigner == nil { - return process.ErrNilSingleSigner - } - - if config.MultiSigner == nil { - return process.ErrNilMultiSigVerifier - } - - if config.KeyGen == nil { - return process.ErrNilKeyGen - } - if config.Uint64ByteSliceConverter == nil { - return process.ErrNilUint64ByteSliceConverter - } - - return nil -} diff --git a/process/factory/factory_test.go b/process/factory/factory_test.go deleted file mode 100644 index 308caaea29d..00000000000 --- a/process/factory/factory_test.go +++ /dev/null @@ -1,302 +0,0 @@ -package factory_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/data" - "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" - "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/storage" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/stretchr/testify/assert" -) - -func TestProcessorsCreator_NilInterceptorContainerShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.InterceptorContainer = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil interceptor container") -} - -func TestProcessorsCreator_NilResolverContainerShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.ResolverContainer = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil resolver container") -} - -func TestProcessorsCreator_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.Messenger = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil Messenger") -} - -func TestProcessorsCreator_NilBlockchainShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.Blockchain = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil block chain") -} - -func TestProcessorsCreator_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.DataPool = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil data pool") -} - -func TestProcessorsCreator_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.ShardCoordinator = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil shard coordinator") -} - -func TestProcessorsCreator_NilAddrConverterShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.AddrConverter = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil AddressConverter") -} - -func TestProcessorsCreator_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.Hasher = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil Hasher") -} - -func TestProcessorsCreator_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.Marshalizer = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil Marshalizer") -} - -func TestProcessorsCreator_NilSingleSignKeyGenShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.KeyGen = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Equal(t, process.ErrNilKeyGen, err) -} - -func TestProcessorsCreator_NilUint64ByteSliceConverterShouldErr(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactoryConfig.Uint64ByteSliceConverter = nil - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.Nil(t, pFactory) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "nil byte slice converter") -} - -func TestProcessorsCreator_ShouldWork(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - count := 10 - pFactoryConfig.InterceptorContainer = &mock.InterceptorContainer{ - LenCalled: func() int { - return count - }, - } - pFactoryConfig.ResolverContainer = &mock.ResolverContainer{ - LenCalled: func() int { - return count - }, - } - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - - assert.NotNil(t, pFactory) - assert.NotNil(t, pFactory.ResolverContainer()) - assert.NotNil(t, pFactory.InterceptorContainer()) - assert.Nil(t, err) - assert.Equal(t, pFactory.ResolverContainer().Len(), count) - assert.Equal(t, pFactory.InterceptorContainer().Len(), count) -} - -func TestCreateInterceptors_ReturnsSuccessfully(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - assert.Nil(t, err) - - err = pFactory.CreateInterceptors() - assert.Nil(t, err) -} - -func TestCreateInterceptors_NewTopicInterceptorErrorsWillMakeCreateInterceptorsError(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactory, _ := factory.NewProcessorsCreator(pFactoryConfig) - - pFactory.SetMessenger(nil) - err := pFactory.CreateInterceptors() - assert.NotNil(t, err) -} - -func TestCreateResolvers_ReturnsSuccessfully(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactory, err := factory.NewProcessorsCreator(pFactoryConfig) - assert.Nil(t, err) - - err = pFactory.CreateResolvers() - assert.Nil(t, err) -} - -func TestCreateResolvers_NewTopicInterceptorErrorsWillMakeCreateInterceptorsError(t *testing.T) { - t.Parallel() - - pFactoryConfig := createConfig() - pFactory, _ := factory.NewProcessorsCreator(pFactoryConfig) - - pFactory.SetMessenger(nil) - err := pFactory.CreateResolvers() - assert.NotNil(t, err) -} - -func createConfig() factory.ProcessorsCreatorConfig { - - mockMessenger := createMessenger() - mockTransientDataPool := createDataPool() - mockInterceptorContainer := createInterceptorContainer() - mockResolverContainer := createResolverContainer() - - return factory.ProcessorsCreatorConfig{ - InterceptorContainer: mockInterceptorContainer, - ResolverContainer: mockResolverContainer, - Messenger: mockMessenger, - DataPool: mockTransientDataPool, - Blockchain: createBlockchain(), - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - AddrConverter: &mock.AddressConverterMock{}, - Hasher: mock.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - MultiSigner: mock.NewMultiSigner(), - SingleSigner: &mock.SignerMock{}, - KeyGen: &mock.SingleSignKeyGenMock{}, - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - } -} - -func createBlockchain() *blockchain.BlockChain { - blkc, _ := blockchain.NewBlockChain( - &mock.CacherStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.StorerStub{}) - - return blkc -} - -func createMessenger() p2p.Messenger { - mockMessenger := mock.NewMessengerStub() - mockMessenger.GetTopicCalled = func(name string) *p2p.Topic { - topic := &p2p.Topic{} - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - return topic - } - return mockMessenger -} - -func createDataPool() data.TransientDataHolder { - mockTransientDataPool := &mock.TransientDataPoolMock{} - mockTransientDataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - mockTransientDataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{} - } - mockTransientDataPool.HeadersNoncesCalled = func() data.Uint64Cacher { - return &mock.Uint64CacherStub{} - } - mockTransientDataPool.TxBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - mockTransientDataPool.PeerChangesBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - mockTransientDataPool.StateBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - return mockTransientDataPool -} - -func createInterceptorContainer() process.InterceptorContainer { - mockInterceptorContainer := &mock.InterceptorContainer{} - mockInterceptorContainer.AddCalled = func(key string, interceptor process.Interceptor) error { - return nil - } - return mockInterceptorContainer -} -func createResolverContainer() process.ResolverContainer { - mockResolverContainer := &mock.ResolverContainer{} - mockResolverContainer.AddCalled = func(key string, resolver process.Resolver) error { - return nil - } - return mockResolverContainer -} diff --git a/process/factory/interceptorsResolvers.go b/process/factory/interceptorsResolvers.go new file mode 100644 index 00000000000..961868ff540 --- /dev/null +++ b/process/factory/interceptorsResolvers.go @@ -0,0 +1,500 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/crypto" + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/ElrondNetwork/elrond-go-sandbox/data/state" + "github.com/ElrondNetwork/elrond-go-sandbox/data/typeConverters" + "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/interceptors" + "github.com/ElrondNetwork/elrond-go-sandbox/process/block/resolvers" + "github.com/ElrondNetwork/elrond-go-sandbox/process/topicResolverSender" + "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" + "github.com/ElrondNetwork/elrond-go-sandbox/sharding" +) + +type topicName string + +const ( + // TransactionTopic is the topic used for sharing transactions + TransactionTopic topicName = "transactions" + // HeadersTopic is the topic used for sharing block headers + HeadersTopic topicName = "headers" + // TxBlockBodyTopic is the topic used for sharing transactions block bodies + TxBlockBodyTopic topicName = "txBlockBodies" + // PeerChBodyTopic is used for sharing peer change block bodies + PeerChBodyTopic topicName = "peerChangeBlockBodies" + // StateBodyTopic is used for sharing state block bodies + StateBodyTopic topicName = "stateBlockBodies" +) + +type interceptorsResolvers struct { + interceptorContainer process.Container + resolverContainer process.ResolversContainer + + messenger p2p.Messenger + blockchain *blockchain.BlockChain + dataPool data.TransientDataHolder + shardCoordinator sharding.ShardCoordinator + addrConverter state.AddressConverter + hasher hashing.Hasher + marshalizer marshal.Marshalizer + multiSigner crypto.MultiSigner + singleSigner crypto.SingleSigner + keyGen crypto.KeyGenerator + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter +} + +// InterceptorsResolversConfig is the struct containing the needed params to be +// provided when initialising a new interceptorsResolvers factory +type InterceptorsResolversConfig struct { + InterceptorContainer process.Container + ResolverContainer process.ResolversContainer + + Messenger p2p.Messenger + Blockchain *blockchain.BlockChain + DataPool data.TransientDataHolder + ShardCoordinator sharding.ShardCoordinator + AddrConverter state.AddressConverter + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + MultiSigner crypto.MultiSigner + SingleSigner crypto.SingleSigner + KeyGen crypto.KeyGenerator + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter +} + +// NewInterceptorsResolversCreator is responsible for creating a new interceptorsResolvers factory object +func NewInterceptorsResolversCreator(config InterceptorsResolversConfig) (*interceptorsResolvers, error) { + err := validateRequiredProcessCreatorParams(config) + if err != nil { + return nil, err + } + return &interceptorsResolvers{ + interceptorContainer: config.InterceptorContainer, + resolverContainer: config.ResolverContainer, + messenger: config.Messenger, + blockchain: config.Blockchain, + dataPool: config.DataPool, + shardCoordinator: config.ShardCoordinator, + addrConverter: config.AddrConverter, + hasher: config.Hasher, + marshalizer: config.Marshalizer, + multiSigner: config.MultiSigner, + singleSigner: config.SingleSigner, + keyGen: config.KeyGen, + uint64ByteSliceConverter: config.Uint64ByteSliceConverter, + }, nil +} + +// CreateInterceptors creates the interceptors and initializes the interceptor container +func (ir *interceptorsResolvers) CreateInterceptors() error { + err := ir.createTxInterceptor() + if err != nil { + return err + } + + err = ir.createHdrInterceptor() + if err != nil { + return err + } + + err = ir.createTxBlockBodyInterceptor() + if err != nil { + return err + } + + err = ir.createPeerChBlockBodyInterceptor() + if err != nil { + return err + } + + err = ir.createStateBlockBodyInterceptor() + if err != nil { + return err + } + + return nil +} + +// CreateResolvers creates the resolvers and initializes the resolver container +func (ir *interceptorsResolvers) CreateResolvers() error { + err := ir.createTxResolver() + if err != nil { + return err + } + + err = ir.createHdrResolver() + if err != nil { + return err + } + + err = ir.createTxBlockBodyResolver() + if err != nil { + return err + } + + err = ir.createPeerChBlockBodyResolver() + if err != nil { + return err + } + + err = ir.createStateBlockBodyResolver() + if err != nil { + return err + } + + return nil +} + +// InterceptorContainer is a getter for interceptorContainer property +func (ir *interceptorsResolvers) InterceptorContainer() process.Container { + return ir.interceptorContainer +} + +// ResolverContainer is a getter for resolverContainer property +func (ir *interceptorsResolvers) ResolverContainer() process.ResolversContainer { + return ir.resolverContainer +} + +func (ir *interceptorsResolvers) createTxInterceptor() error { + txStorer := ir.blockchain.GetStorer(blockchain.TransactionUnit) + + txInterceptor, err := transaction.NewTxInterceptor( + ir.marshalizer, + ir.dataPool.Transactions(), + txStorer, + ir.addrConverter, + ir.hasher, + ir.singleSigner, + ir.keyGen, + ir.shardCoordinator) + + if err != nil { + return err + } + + err = ir.createTopicAndAssignHandler(string(TransactionTopic), txInterceptor, true) + if err != nil { + return err + } + + err = ir.interceptorContainer.Add(string(TransactionTopic), txInterceptor) + return err +} + +func (ir *interceptorsResolvers) createHdrInterceptor() error { + headerStorer := ir.blockchain.GetStorer(blockchain.BlockHeaderUnit) + + hdrInterceptor, err := interceptors.NewHeaderInterceptor( + ir.marshalizer, + ir.dataPool.Headers(), + ir.dataPool.HeadersNonces(), + headerStorer, + ir.multiSigner, + ir.hasher, + ir.shardCoordinator, + ) + + if err != nil { + return err + } + + err = ir.createTopicAndAssignHandler(string(HeadersTopic), hdrInterceptor, true) + if err != nil { + return err + } + + err = ir.interceptorContainer.Add(string(HeadersTopic), hdrInterceptor) + return err +} + +func (ir *interceptorsResolvers) createTxBlockBodyInterceptor() error { + txBlockBodyStorer := ir.blockchain.GetStorer(blockchain.TxBlockBodyUnit) + + txBlockBodyInterceptor, err := interceptors.NewTxBlockBodyInterceptor( + ir.marshalizer, + ir.dataPool.TxBlocks(), + txBlockBodyStorer, + ir.hasher, + ir.shardCoordinator, + ) + + if err != nil { + return err + } + + err = ir.createTopicAndAssignHandler(string(TxBlockBodyTopic), txBlockBodyInterceptor, true) + if err != nil { + return err + } + + err = ir.interceptorContainer.Add(string(TxBlockBodyTopic), txBlockBodyInterceptor) + return err +} + +func (ir *interceptorsResolvers) createPeerChBlockBodyInterceptor() error { + peerBlockBodyStorer := ir.blockchain.GetStorer(blockchain.PeerBlockBodyUnit) + + peerChBodyInterceptor, err := interceptors.NewPeerBlockBodyInterceptor( + ir.marshalizer, + ir.dataPool.PeerChangesBlocks(), + peerBlockBodyStorer, + ir.hasher, + ir.shardCoordinator, + ) + + if err != nil { + return err + } + + err = ir.createTopicAndAssignHandler(string(PeerChBodyTopic), peerChBodyInterceptor, true) + if err != nil { + return err + } + + err = ir.interceptorContainer.Add(string(PeerChBodyTopic), peerChBodyInterceptor) + return err +} + +func (ir *interceptorsResolvers) createStateBlockBodyInterceptor() error { + stateBlockBodyStorer := ir.blockchain.GetStorer(blockchain.StateBlockBodyUnit) + + stateBodyInterceptor, err := interceptors.NewStateBlockBodyInterceptor( + ir.marshalizer, + ir.dataPool.StateBlocks(), + stateBlockBodyStorer, + ir.hasher, + ir.shardCoordinator, + ) + + if err != nil { + return err + } + + err = ir.createTopicAndAssignHandler(string(StateBodyTopic), stateBodyInterceptor, true) + if err != nil { + return err + } + + err = ir.interceptorContainer.Add(string(StateBodyTopic), stateBodyInterceptor) + return err +} + +func (ir *interceptorsResolvers) createTxResolver() error { + resolverSender, err := topicResolverSender.NewTopicResolverSender( + ir.messenger, + string(TransactionTopic), + ir.marshalizer) + if err != nil { + return err + } + + txResolver, err := transaction.NewTxResolver( + resolverSender, + ir.dataPool.Transactions(), + ir.blockchain.GetStorer(blockchain.TransactionUnit), + ir.marshalizer) + + if err != nil { + return err + } + + //add on the request topic + err = ir.createTopicAndAssignHandler( + string(TransactionTopic)+resolverSender.RequestTopicSuffix(), + txResolver, + false) + if err != nil { + return err + } + + err = ir.resolverContainer.Add(string(TransactionTopic), txResolver) + return err +} + +func (ir *interceptorsResolvers) createHdrResolver() error { + resolverSender, err := topicResolverSender.NewTopicResolverSender( + ir.messenger, + string(HeadersTopic), + ir.marshalizer) + if err != nil { + return err + } + + hdrResolver, err := resolvers.NewHeaderResolver( + resolverSender, + ir.dataPool, + ir.blockchain.GetStorer(blockchain.BlockHeaderUnit), + ir.marshalizer, + ir.uint64ByteSliceConverter) + + if err != nil { + return err + } + + //add on the request topic + err = ir.createTopicAndAssignHandler( + string(HeadersTopic)+resolverSender.RequestTopicSuffix(), + hdrResolver, + false) + if err != nil { + return err + } + + err = ir.resolverContainer.Add(string(HeadersTopic), hdrResolver) + return err +} + +func (ir *interceptorsResolvers) createTxBlockBodyResolver() error { + resolverSender, err := topicResolverSender.NewTopicResolverSender( + ir.messenger, + string(TxBlockBodyTopic), + ir.marshalizer) + if err != nil { + return err + } + + txBlkResolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + ir.dataPool.TxBlocks(), + ir.blockchain.GetStorer(blockchain.TxBlockBodyUnit), + ir.marshalizer) + + if err != nil { + return err + } + + //add on the request topic + err = ir.createTopicAndAssignHandler( + string(TxBlockBodyTopic)+resolverSender.RequestTopicSuffix(), + txBlkResolver, + false) + if err != nil { + return err + } + + err = ir.resolverContainer.Add(string(TxBlockBodyTopic), txBlkResolver) + return err +} + +func (ir *interceptorsResolvers) createPeerChBlockBodyResolver() error { + resolverSender, err := topicResolverSender.NewTopicResolverSender( + ir.messenger, + string(PeerChBodyTopic), + ir.marshalizer) + if err != nil { + return err + } + + peerChBlkResolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + ir.dataPool.PeerChangesBlocks(), + ir.blockchain.GetStorer(blockchain.PeerBlockBodyUnit), + ir.marshalizer) + + if err != nil { + return err + } + + //add on the request topic + err = ir.createTopicAndAssignHandler( + string(PeerChBodyTopic)+resolverSender.RequestTopicSuffix(), + peerChBlkResolver, + false) + if err != nil { + return err + } + + err = ir.resolverContainer.Add(string(PeerChBodyTopic), peerChBlkResolver) + return err +} + +func (ir *interceptorsResolvers) createStateBlockBodyResolver() error { + resolverSender, err := topicResolverSender.NewTopicResolverSender( + ir.messenger, + string(StateBodyTopic), + ir.marshalizer) + if err != nil { + return err + } + + stateBlkResolver, err := resolvers.NewGenericBlockBodyResolver( + resolverSender, + ir.dataPool.StateBlocks(), + ir.blockchain.GetStorer(blockchain.StateBlockBodyUnit), + ir.marshalizer) + + if err != nil { + return err + } + + //add on the request topic + err = ir.createTopicAndAssignHandler( + string(StateBodyTopic)+resolverSender.RequestTopicSuffix(), + stateBlkResolver, + false) + if err != nil { + return err + } + + err = ir.resolverContainer.Add(string(StateBodyTopic), stateBlkResolver) + return err +} + +func validateRequiredProcessCreatorParams(config InterceptorsResolversConfig) error { + if config.InterceptorContainer == nil { + return process.ErrNilInterceptorContainer + } + if config.ResolverContainer == nil { + return process.ErrNilResolverContainer + } + if config.Messenger == nil { + return process.ErrNilMessenger + } + if config.Blockchain == nil { + return process.ErrNilBlockChain + } + if config.DataPool == nil { + return process.ErrNilDataPoolHolder + } + if config.ShardCoordinator == nil { + return process.ErrNilShardCoordinator + } + if config.AddrConverter == nil { + return process.ErrNilAddressConverter + } + if config.Hasher == nil { + return process.ErrNilHasher + } + if config.Marshalizer == nil { + return process.ErrNilMarshalizer + } + if config.SingleSigner == nil { + return process.ErrNilSingleSigner + } + if config.MultiSigner == nil { + return process.ErrNilMultiSigVerifier + } + if config.KeyGen == nil { + return process.ErrNilKeyGen + } + if config.Uint64ByteSliceConverter == nil { + return process.ErrNilUint64ByteSliceConverter + } + + return nil +} + +func (ir *interceptorsResolvers) createTopicAndAssignHandler(topic string, handler p2p.MessageProcessor, createPipe bool) error { + err := ir.messenger.CreateTopic(topic, createPipe) + if err != nil { + return err + } + + return ir.messenger.RegisterMessageProcessor(topic, handler) +} diff --git a/process/factory/interceptorsResolvers_test.go b/process/factory/interceptorsResolvers_test.go new file mode 100644 index 00000000000..02ebf21f979 --- /dev/null +++ b/process/factory/interceptorsResolvers_test.go @@ -0,0 +1,352 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/data" + "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/storage" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +//------- NewInterceptorsResolversCreator + +func TestNewInterceptorsResolversCreator_NilInterceptorContainerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.InterceptorContainer = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilInterceptorContainer, err) +} + +func TestNewInterceptorsResolversCreator_NilResolverContainerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.ResolverContainer = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilResolverContainer, err) +} + +func TestNewInterceptorsResolversCreator_NilMessengerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Messenger = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilMessenger, err) +} + +func TestNewInterceptorsResolversCreator_NilBlockchainShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Blockchain = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilBlockChain, err) +} + +func TestNewInterceptorsResolversCreator_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.DataPool = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilDataPoolHolder, err) +} + +func TestNewInterceptorsResolversCreator_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.ShardCoordinator = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewInterceptorsResolversCreator_NilAddrConverterShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.AddrConverter = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewInterceptorsResolversCreator_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Hasher = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewInterceptorsResolversCreator_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Marshalizer = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptorsResolversCreator_NilKeyGenShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.KeyGen = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilKeyGen, err) +} + +func TestNewInterceptorsResolversCreator_NilSingleSignerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.SingleSigner = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilSingleSigner, err) +} + +func TestNewInterceptorsResolversCreator_NilMultiSignerShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.MultiSigner = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilMultiSigVerifier, err) +} + +func TestNewInterceptorsResolversCreator_NilUint64ByteSliceConverterShouldErr(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactoryConfig.Uint64ByteSliceConverter = nil + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.Nil(t, pFactory) + assert.Equal(t, process.ErrNilUint64ByteSliceConverter, err) +} + +func TestNewInterceptorsResolversCreator_ShouldWork(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + assert.NotNil(t, pFactory) + assert.Nil(t, err) +} + +func TestNewInterceptorsResolversCreator_ShouldNotModifyContainerPointers(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + + cm1 := &mock.ObjectsContainerStub{} + cm2 := &mock.ResolversContainerStub{} + + pFactoryConfig.InterceptorContainer = cm1 + pFactoryConfig.ResolverContainer = cm2 + + pFactory, _ := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + assert.True(t, cm1 == pFactory.InterceptorContainer()) + assert.True(t, cm2 == pFactory.ResolverContainer()) +} + +//------- CreateInterceptors + +func TestInterceptorsResolversCreator_CreateInterceptorsReturnsSuccessfully(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + assert.Nil(t, err) + + err = pFactory.CreateInterceptors() + assert.Nil(t, err) +} + +func TestInterceptorsResolversCreator_CreateInterceptorsNewTopicInterceptorErrorsWillMakeCreateInterceptorsError(t *testing.T) { + t.Parallel() + + errExpected := errors.New("expected error") + + pFactoryConfig := createConfig() + pFactoryConfig.Messenger = &mock.MessengerStub{ + HasTopicCalled: func(name string) bool { + return true + }, + HasTopicValidatorCalled: func(name string) bool { + return false + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return errExpected + }, + CreateTopicCalled: func(name string, createPipeForTopic bool) error { + return nil + }, + } + pFactory, _ := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + err := pFactory.CreateInterceptors() + assert.Equal(t, errExpected, err) +} + +func TestInterceptorsResolversCreator_CreateResolversReturnsSuccessfully(t *testing.T) { + t.Parallel() + + pFactoryConfig := createConfig() + pFactory, err := factory.NewInterceptorsResolversCreator(pFactoryConfig) + assert.Nil(t, err) + + err = pFactory.CreateResolvers() + assert.Nil(t, err) +} + +//------- CreateResolvers + +func TestInterceptorsResolversCreator_CreateResolversNewTopicInterceptorErrorsWillMakeCreateInterceptorsError(t *testing.T) { + t.Parallel() + + errExpected := errors.New("expected error") + + pFactoryConfig := createConfig() + pFactoryConfig.Messenger = &mock.MessengerStub{ + HasTopicCalled: func(name string) bool { + return true + }, + HasTopicValidatorCalled: func(name string) bool { + return false + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return errExpected + }, + CreateTopicCalled: func(name string, createPipeForTopic bool) error { + return nil + }, + } + + pFactory, _ := factory.NewInterceptorsResolversCreator(pFactoryConfig) + + err := pFactory.CreateResolvers() + assert.Equal(t, errExpected, err) +} + +func createConfig() factory.InterceptorsResolversConfig { + + mockMessenger := createMessenger() + mockTransientDataPool := createDataPool() + mockInterceptorContainer := &mock.ObjectsContainerStub{ + AddCalled: func(key string, val interface{}) error { + return nil + }, + } + mockResolverContainer := &mock.ResolversContainerStub{ + AddCalled: func(key string, val process.Resolver) error { + return nil + }, + } + + return factory.InterceptorsResolversConfig{ + InterceptorContainer: mockInterceptorContainer, + ResolverContainer: mockResolverContainer, + Messenger: mockMessenger, + DataPool: mockTransientDataPool, + Blockchain: createBlockchain(), + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + AddrConverter: &mock.AddressConverterMock{}, + Hasher: mock.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + MultiSigner: mock.NewMultiSigner(), + SingleSigner: &mock.SignerMock{}, + KeyGen: &mock.SingleSignKeyGenMock{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + } +} + +func createBlockchain() *blockchain.BlockChain { + blkc, _ := blockchain.NewBlockChain( + &mock.CacherStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}, + &mock.StorerStub{}) + + return blkc +} + +func createMessenger() p2p.Messenger { + mockMessenger := &mock.MessengerStub{ + HasTopicCalled: func(name string) bool { + return true + }, + HasTopicValidatorCalled: func(name string) bool { + return false + }, + RegisterMessageProcessorCalled: func(topic string, handler p2p.MessageProcessor) error { + return nil + }, + CreateTopicCalled: func(name string, createPipeForTopic bool) error { + return nil + }, + } + + return mockMessenger +} + +func createDataPool() data.TransientDataHolder { + mockTransientDataPool := &mock.TransientDataPoolMock{} + mockTransientDataPool.TransactionsCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + mockTransientDataPool.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + mockTransientDataPool.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + mockTransientDataPool.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + mockTransientDataPool.PeerChangesBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + mockTransientDataPool.StateBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + return mockTransientDataPool +} diff --git a/process/interceptor/container.go b/process/interceptor/container.go deleted file mode 100644 index 3eed5b3bf5d..00000000000 --- a/process/interceptor/container.go +++ /dev/null @@ -1,78 +0,0 @@ -package interceptor - -import ( - "sync" - - "github.com/ElrondNetwork/elrond-go-sandbox/process" -) - -// Container is a holder for interceptors organized by type -type Container struct { - mutex sync.RWMutex - interceptors map[string]process.Interceptor -} - -// NewContainer will create a new instance of an inteceptor container -func NewContainer() *Container { - return &Container{ - mutex: sync.RWMutex{}, - interceptors: make(map[string]process.Interceptor), - } -} - -// Get returns the interceptor stored at a certain key. -// Returns an error if the element does not exist -func (i *Container) Get(key string) (process.Interceptor, error) { - i.mutex.RLock() - interceptor, ok := i.interceptors[key] - i.mutex.RUnlock() - if !ok { - return nil, process.ErrInvalidContainerKey - } - return interceptor, nil -} - -// Add will add an interceptor at a given key. Returns -// an error if the element already exists -func (i *Container) Add(key string, interceptor process.Interceptor) error { - if interceptor == nil { - return process.ErrNilContainerElement - } - i.mutex.Lock() - defer i.mutex.Unlock() - - _, ok := i.interceptors[key] - - if ok { - return process.ErrContainerKeyAlreadyExists - } - - i.interceptors[key] = interceptor - return nil -} - -// Replace will add (or replace if it already exists) an interceptor at a given key -func (i *Container) Replace(key string, interceptor process.Interceptor) error { - if interceptor == nil { - return process.ErrNilContainerElement - } - i.mutex.Lock() - i.interceptors[key] = interceptor - i.mutex.Unlock() - return nil -} - -// Remove will remove an interceptor at a given key -func (i *Container) Remove(key string) { - i.mutex.Lock() - delete(i.interceptors, key) - i.mutex.Unlock() -} - -// Len returns the length of the added interceptors -func (i *Container) Len() int { - i.mutex.RLock() - l := len(i.interceptors) - i.mutex.RUnlock() - return l -} diff --git a/process/interceptor/export_test.go b/process/interceptor/export_test.go deleted file mode 100644 index 0f66014a2d9..00000000000 --- a/process/interceptor/export_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package interceptor - -import ( - "context" - - "github.com/libp2p/go-libp2p-pubsub" -) - -func (ti *topicInterceptor) Validator(ctx context.Context, message *pubsub.Message) bool { - return ti.validator(ctx, message) -} diff --git a/process/interceptor/topicInterceptor.go b/process/interceptor/topicInterceptor.go deleted file mode 100644 index 4b5189abbe5..00000000000 --- a/process/interceptor/topicInterceptor.go +++ /dev/null @@ -1,122 +0,0 @@ -package interceptor - -import ( - "context" - - "github.com/ElrondNetwork/elrond-go-sandbox/logger" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/libp2p/go-libp2p-pubsub" -) - -var log = logger.NewDefaultLogger() - -// TopicInterceptor is a struct coupled with a p2p.Topic that calls CheckReceivedObject whenever Messenger needs to validate -// the data -type topicInterceptor struct { - messenger p2p.Messenger - marshalizer marshal.Marshalizer - templateObject p2p.Creator - name string - - checkReceivedObject func(newer p2p.Creator, rawData []byte) error -} - -// NewTopicInterceptor returns a new data interceptor that runs coupled with p2p.Topics -func NewTopicInterceptor( - name string, - messenger p2p.Messenger, - templateObject p2p.Creator, -) (*topicInterceptor, error) { - - if messenger == nil { - return nil, process.ErrNilMessenger - } - - marshalizer := messenger.Marshalizer() - - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - - if templateObject == nil { - return nil, process.ErrNilNewer - } - - topic, err := getOrCreateTopic(name, templateObject, messenger) - if err != nil { - return nil, err - } - - intercept := topicInterceptor{ - messenger: messenger, - templateObject: templateObject, - name: name, - marshalizer: marshalizer, - } - - err = topic.RegisterValidator(intercept.validator) - if err != nil { - return nil, process.ErrRegisteringValidator - } - - return &intercept, nil -} - -func getOrCreateTopic(name string, templateObject p2p.Creator, messenger p2p.Messenger) (*p2p.Topic, error) { - existingTopic := messenger.GetTopic(name) - - if existingTopic != nil { - return existingTopic, nil - } - - topic := p2p.NewTopic(name, templateObject, messenger.Marshalizer()) - return topic, messenger.AddTopic(topic) -} - -func (ti *topicInterceptor) validator(ctx context.Context, message *pubsub.Message) bool { - obj := ti.templateObject.Create() - - err := ti.marshalizer.Unmarshal(obj, message.GetData()) - - if err != nil { - log.Debug(err.Error()) - return false - } - - if ti.checkReceivedObject == nil { - log.Error("nil checkReceivedObject handler") - return false - } - - err = ti.checkReceivedObject(obj, message.GetData()) - if err != nil { - log.Debug(err.Error()) - return false - } - - return true -} - -// Name returns the name of the interceptor -func (ti *topicInterceptor) Name() string { - return ti.name -} - -// SetCheckReceivedObjectHandler sets the handler that gets called each time new data arrives in a form of -// a newer object -func (ti *topicInterceptor) SetCheckReceivedObjectHandler(handler func(newer p2p.Creator, rawData []byte) error) { - ti.checkReceivedObject = handler -} - -// CheckReceivedObjectHandler returns the handler that gets called each time new data arrives in a form of -// a newer object -func (ti *topicInterceptor) CheckReceivedObjectHandler() func(newer p2p.Creator, rawData []byte) error { - return ti.checkReceivedObject -} - -// Marshalizer returns the marshalizer used to unmarshal the received data -func (ti *topicInterceptor) Marshalizer() marshal.Marshalizer { - return ti.marshalizer -} diff --git a/process/interceptor/topicInterceptor_test.go b/process/interceptor/topicInterceptor_test.go deleted file mode 100644 index 98a4342ad0d..00000000000 --- a/process/interceptor/topicInterceptor_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package interceptor_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/process/interceptor" - "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" - "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" -) - -//------- NewTopicInterceptor - -func TestNewTopicInterceptor_NilMessengerShouldErr(t *testing.T) { - ti, err := interceptor.NewTopicInterceptor("", nil, &mock.StringCreator{}) - assert.Equal(t, process.ErrNilMessenger, err) - assert.Nil(t, ti) -} - -func TestNewTopicInterceptor_NilTemplateObjectShouldErr(t *testing.T) { - ti, err := interceptor.NewTopicInterceptor("", mock.NewMessengerStub(), nil) - assert.Equal(t, process.ErrNilNewer, err) - assert.Nil(t, ti) -} - -func TestNewTopicInterceptor_ErrMessengerAddTopicShouldErr(t *testing.T) { - mes := mock.NewMessengerStub() - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - mes.AddTopicCalled = func(t *p2p.Topic) error { - return errors.New("failure") - } - - ti, err := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - assert.NotNil(t, err) - assert.Nil(t, ti) -} - -func TestNewTopicInterceptor_ErrMessengerRegistrationValidatorShouldErr(t *testing.T) { - mes := mock.NewMessengerStub() - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - mes.AddTopicCalled = func(t *p2p.Topic) error { - return nil - } - - ti, err := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - assert.Equal(t, process.ErrRegisteringValidator, err) - assert.Nil(t, ti) -} - -func TestNewTopicInterceptor_NilMessengerMarshalizerShouldErr(t *testing.T) { - mes := &mock.MessengerStub{} - - mes.AddTopicCalled = func(t *p2p.Topic) error { - t.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - ti, err := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - assert.Nil(t, ti, err) - assert.Equal(t, process.ErrNilMarshalizer, err) -} - -func TestNewTopicInterceptor_OkValsShouldWork(t *testing.T) { - mes := mock.NewMessengerStub() - - wasCalled := false - mes.AddTopicCalled = func(t *p2p.Topic) error { - t.RegisterTopicValidator = func(v pubsub.Validator) error { - wasCalled = true - - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - ti, err := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - assert.Nil(t, err) - assert.True(t, wasCalled) - assert.NotNil(t, ti) -} - -func TestNewTopicInterceptor_CompareMarshlizersShouldEqual(t *testing.T) { - mes := mock.NewMessengerStub() - - mes.AddTopicCalled = func(t *p2p.Topic) error { - t.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - ti, _ := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - assert.True(t, ti.Marshalizer() == mes.Marshalizer()) -} - -func TestNewTopicInterceptor_WithExistingTopicShouldWork(t *testing.T) { - mes := mock.NewMessengerStub() - - wasCalled := false - - topicName := "test" - - topic := p2p.NewTopic(topicName, &mock.StringCreator{}, mes.Marshalizer()) - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - wasCalled = true - - return nil - } - - mes.AddTopicCalled = func(topic *p2p.Topic) error { - assert.Fail(t, "should have not reached this point") - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - if name == topicName { - return topic - } - - return nil - } - - ti, err := interceptor.NewTopicInterceptor(topicName, mes, &mock.StringCreator{}) - assert.Nil(t, err) - assert.True(t, wasCalled) - assert.NotNil(t, ti) -} - -//------- Validation - -func TestTopicInterceptor_ValidationMalfunctionMarshalizerReturnFalse(t *testing.T) { - mes := mock.NewMessengerStub() - - var topic *p2p.Topic - - mes.AddTopicCalled = func(t *p2p.Topic) error { - topic = t - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - ti, _ := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - - //we have the validator func, let's test with a broken marshalizer - mes.Marshalizer().(*mock.MarshalizerMock).Fail = true - - blankMessage := pubsub.Message{} - - assert.False(t, ti.Validator(nil, &blankMessage)) -} - -func TestTopicInterceptor_ValidationNilCheckReceivedObjectReturnFalse(t *testing.T) { - mes := mock.NewMessengerStub() - - var topic *p2p.Topic - - mes.AddTopicCalled = func(t *p2p.Topic) error { - topic = t - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - ti, _ := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - - //we have the validator func, let's test with a message - objToMarshalizeUnmarshalize := &mock.StringCreator{Data: "test data"} - - message := &pubsub.Message{Message: &pubsub_pb.Message{}} - data, err := mes.Marshalizer().Marshal(objToMarshalizeUnmarshalize) - assert.Nil(t, err) - message.Data = data - - assert.False(t, ti.Validator(nil, message)) -} - -func TestTopicInterceptor_ValidationCheckReceivedObjectFalseReturnFalse(t *testing.T) { - mes := mock.NewMessengerStub() - - var topic *p2p.Topic - - mes.AddTopicCalled = func(t *p2p.Topic) error { - topic = t - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - intercept, _ := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - - wasCalled := false - - intercept.SetCheckReceivedObjectHandler(func(newer p2p.Creator, rawData []byte) error { - wasCalled = true - return errors.New("err1") - }) - - //we have the validator func, let's test with a message - objToMarshalizeUnmarshalize := &mock.StringCreator{Data: "test data"} - - message := &pubsub.Message{Message: &pubsub_pb.Message{}} - data, err := mes.Marshalizer().Marshal(objToMarshalizeUnmarshalize) - assert.Nil(t, err) - message.Data = data - - assert.False(t, intercept.Validator(nil, message)) - assert.True(t, wasCalled) -} - -func TestTopicInterceptor_ValidationCheckReceivedObjectTrueReturnTrue(t *testing.T) { - mes := mock.NewMessengerStub() - - var topic *p2p.Topic - - mes.AddTopicCalled = func(t *p2p.Topic) error { - topic = t - topic.RegisterTopicValidator = func(v pubsub.Validator) error { - return nil - } - - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - intercept, _ := interceptor.NewTopicInterceptor("", mes, &mock.StringCreator{}) - - wasCalled := false - - intercept.SetCheckReceivedObjectHandler(func(newer p2p.Creator, rawData []byte) error { - wasCalled = true - return nil - }) - - //we have the validator func, let's test with a message - objToMarshalizeUnmarshalize := &mock.StringCreator{Data: "test data"} - - message := &pubsub.Message{Message: &pubsub_pb.Message{}} - data, err := mes.Marshalizer().Marshal(objToMarshalizeUnmarshalize) - assert.Nil(t, err) - message.Data = data - - assert.True(t, intercept.Validator(nil, message)) - assert.True(t, wasCalled) -} diff --git a/process/interface.go b/process/interface.go index d9cd625fcd2..9d7834124da 100644 --- a/process/interface.go +++ b/process/interface.go @@ -4,12 +4,10 @@ import ( "math/big" "time" - "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -62,59 +60,40 @@ type HashAccesser interface { Hash() []byte } -// TransactionInterceptorAdapter is the interface used in interception of transactions -type TransactionInterceptorAdapter interface { - Checker - SigVerifier - HashAccesser - p2p.Creator - RcvShard() uint32 - SndShard() uint32 - IsAddressedToOtherShards() bool - SetAddressConverter(converter state.AddressConverter) - AddressConverter() state.AddressConverter - GetTransaction() *transaction.Transaction - SingleSignKeyGen() crypto.KeyGenerator - SetSingleSignKeyGen(generator crypto.KeyGenerator) - SetTxBuffWithoutSig(txBuffWithoutSig []byte) - TxBuffWithoutSig() []byte -} - -// BlockBodyInterceptorAdapter defines what a block body object should do -type BlockBodyInterceptorAdapter interface { +// InterceptedBlockBody interface provides functionality over intercepted blocks +type InterceptedBlockBody interface { Checker HashAccesser - p2p.Creator - Shard() uint32 GetUnderlyingObject() interface{} } -// HeaderInterceptorAdapter is the interface used in interception of headers -type HeaderInterceptorAdapter interface { - BlockBodyInterceptorAdapter - SigVerifier - GetHeader() *block.Header +// IntRandomizer interface provides functionality over generating integer numbers +type IntRandomizer interface { + Intn(n int) int +} + +// Resolver defines what a data resolver should do +type Resolver interface { + RequestDataFromHash(hash []byte) error + ProcessReceivedMessage(message p2p.MessageP2P) error } -// Interceptor defines what a data interceptor should do -type Interceptor interface { - Name() string - SetCheckReceivedObjectHandler(func(newer p2p.Creator, rawData []byte) error) - CheckReceivedObjectHandler() func(newer p2p.Creator, rawData []byte) error - Marshalizer() marshal.Marshalizer +// HeaderResolver defines what a block header resolver should do +type HeaderResolver interface { + Resolver + RequestDataFromNonce(nonce uint64) error } -// Resolver is an interface that defines the behaviour of a struct that is able -// to send data requests to other entities and to resolve requests that came from those other entities -type Resolver interface { - RequestData(rd RequestData) error - SetResolverHandler(func(rd RequestData) ([]byte, error)) - ResolverHandler() func(rd RequestData) ([]byte, error) +// TopicResolverSender defines what sending operations are allowed for a topic resolver +type TopicResolverSender interface { + SendOnRequestTopic(rd *RequestData) error + Send(buff []byte, peer p2p.PeerID) error + RequestTopicSuffix() string } -// Bootstraper is an interface that defines the behaviour of a struct that is able -// to syncronize the node -type Bootstraper interface { +// Bootstrapper is an interface that defines the behaviour of a struct that is able +// to synchronize the node +type Bootstrapper interface { CreateAndCommitEmptyBlock(uint32) (*block.TxBlockBody, *block.Header) AddSyncStateListener(func(bool)) ShouldSync() bool @@ -128,31 +107,34 @@ type ForkDetector interface { CheckFork() bool } -// InterceptorContainer is an interface that defines the beahaviour for a container -// holding a list of interceptors organized by type -type InterceptorContainer interface { - Get(key string) (Interceptor, error) - Add(key string, interceptor Interceptor) error - Replace(key string, interceptor Interceptor) error +// Container defines a holder data type with basic functionality +type Container interface { + Get(key string) (interface{}, error) + Add(key string, val interface{}) error + Replace(key string, val interface{}) error Remove(key string) Len() int } -// ResolverContainer is an interface that defines the beahaviour for a container -// holding a list of resolvers organized by type -type ResolverContainer interface { +// ResolversContainer defines a resolvers holder data type with basic functionality +type ResolversContainer interface { Get(key string) (Resolver, error) - Add(key string, resolver Resolver) error - Replace(key string, interceptor Resolver) error + Add(key string, val Resolver) error + Replace(key string, val Resolver) error Remove(key string) Len() int } -// ProcessorFactory is an interface that defines the behaviour for a factory that +// InterceptorsResolversFactory is an interface that defines the behaviour for a factory that // can create the needed interceptors and resolvers for the application -type ProcessorFactory interface { +type InterceptorsResolversFactory interface { CreateInterceptors() error CreateResolvers() error - InterceptorContainer() InterceptorContainer - ResolverContainer() ResolverContainer + InterceptorContainer() Container + ResolverContainer() ResolversContainer +} + +type WireMessageHandler interface { + ConnectedPeers() []p2p.PeerID + SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error } diff --git a/process/mock/headerResolverMock.go b/process/mock/headerResolverMock.go new file mode 100644 index 00000000000..d2a1198ef7d --- /dev/null +++ b/process/mock/headerResolverMock.go @@ -0,0 +1,23 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type HeaderResolverMock struct { + RequestDataFromHashCalled func(hash []byte) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P) error + RequestDataFromNonceCalled func(nonce uint64) error +} + +func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { + return hrm.RequestDataFromHashCalled(hash) +} + +func (hrm *HeaderResolverMock) ProcessReceivedMessage(message p2p.MessageP2P) error { + return hrm.ProcessReceivedMessageCalled(message) +} + +func (hrm *HeaderResolverMock) RequestDataFromNonce(nonce uint64) error { + return hrm.RequestDataFromNonceCalled(nonce) +} diff --git a/process/mock/intRandomizerMock.go b/process/mock/intRandomizerMock.go new file mode 100644 index 00000000000..14d32bf7ef8 --- /dev/null +++ b/process/mock/intRandomizerMock.go @@ -0,0 +1,9 @@ +package mock + +type IntRandomizerMock struct { + IntnCalled func(n int) int +} + +func (irm *IntRandomizerMock) Intn(n int) int { + return irm.IntnCalled(n) +} diff --git a/process/mock/interceptorConteinerMock.go b/process/mock/interceptorConteinerMock.go deleted file mode 100644 index ffaae271de1..00000000000 --- a/process/mock/interceptorConteinerMock.go +++ /dev/null @@ -1,42 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/process" -) - -// InterceptorContainer is a struct that defines the beahaviour for a container -// holding a list of interceptors organized by type -type InterceptorContainer struct { - GetCalled func(key string) (process.Interceptor, error) - AddCalled func(key string, interceptor process.Interceptor) error - ReplaceCalled func(key string, interceptor process.Interceptor) error - RemoveCalled func(key string) - LenCalled func() int -} - -// Get returns the interceptor stored at a certain key. -// Returns an error if the element does not exist -func (i *InterceptorContainer) Get(key string) (process.Interceptor, error) { - return i.GetCalled(key) -} - -// Add will add an interceptor at a given key. Returns -// an error if the element already exists -func (i *InterceptorContainer) Add(key string, interceptor process.Interceptor) error { - return i.AddCalled(key, interceptor) -} - -// Replace will add (or replace if it already exists) an interceptor at a given key -func (i *InterceptorContainer) Replace(key string, interceptor process.Interceptor) error { - return i.ReplaceCalled(key, interceptor) -} - -// Remove will remove an interceptor at a given key -func (i *InterceptorContainer) Remove(key string) { - i.RemoveCalled(key) -} - -// Len returns the length of the added interceptors -func (i *InterceptorContainer) Len() int { - return i.LenCalled() -} diff --git a/process/mock/interceptorStub.go b/process/mock/interceptorStub.go deleted file mode 100644 index 54a25f9d482..00000000000 --- a/process/mock/interceptorStub.go +++ /dev/null @@ -1,29 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" -) - -type InterceptorStub struct { - NameCalled func() string - SetCheckReceivedObjectHandlerCalled func(func(newer p2p.Creator, rawData []byte) error) - CheckReceivedObjectHandlerCalled func() func(newer p2p.Creator, rawData []byte) error - MarshalizerCalled func() marshal.Marshalizer -} - -func (is *InterceptorStub) Marshalizer() marshal.Marshalizer { - return is.MarshalizerCalled() -} - -func (is *InterceptorStub) Name() string { - return is.NameCalled() -} - -func (is *InterceptorStub) SetCheckReceivedObjectHandler(handler func(newer p2p.Creator, rawData []byte) error) { - is.SetCheckReceivedObjectHandlerCalled(handler) -} - -func (is *InterceptorStub) CheckReceivedObjectHandler() func(newer p2p.Creator, rawData []byte) error { - return is.CheckReceivedObjectHandlerCalled() -} diff --git a/process/mock/messengerStub.go b/process/mock/messengerStub.go index 873a6929efc..b454237751c 100644 --- a/process/mock/messengerStub.go +++ b/process/mock/messengerStub.go @@ -1,87 +1,103 @@ package mock import ( - "context" - "time" - - "github.com/ElrondNetwork/elrond-go-sandbox/hashing" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/libp2p/go-libp2p-net" - "github.com/libp2p/go-libp2p-peer" - "github.com/multiformats/go-multiaddr" ) type MessengerStub struct { - marshalizer marshal.Marshalizer - HasherObj hashing.Hasher - AddTopicCalled func(t *p2p.Topic) error - GetTopicCalled func(name string) *p2p.Topic + CloseCalled func() error + IDCalled func() p2p.PeerID + PeersCalled func() []p2p.PeerID + AddressesCalled func() []string + ConnectToPeerCalled func(address string) error + KadDhtDiscoverNewPeersCalled func() error + TrimConnectionsCalled func() + IsConnectedCalled func(peerID p2p.PeerID) bool + ConnectedPeersCalled func() []p2p.PeerID + CreateTopicCalled func(name string, createPipeForTopic bool) error + HasTopicCalled func(name string) bool + HasTopicValidatorCalled func(name string) bool + BroadcastOnPipeCalled func(pipe string, topic string, buff []byte) + BroadcastCalled func(topic string, buff []byte) + RegisterMessageProcessorCalled func(topic string, handler p2p.MessageProcessor) error + UnregisterMessageProcessorCalled func(topic string) error + SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error + OutgoingPipeLoadBalancerCalled func() p2p.PipeLoadBalancer + BootstrapCalled func() error +} + +func (ms *MessengerStub) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { + return ms.RegisterMessageProcessorCalled(topic, handler) +} + +func (ms *MessengerStub) UnregisterMessageProcessor(topic string) error { + return ms.UnregisterMessageProcessorCalled(topic) } -func NewMessengerStub() *MessengerStub { - return &MessengerStub{ - marshalizer: &MarshalizerMock{}, - HasherObj: HasherMock{}, - } +func (ms *MessengerStub) Broadcast(topic string, buff []byte) { + ms.BroadcastCalled(topic, buff) +} + +func (ms *MessengerStub) OutgoingPipeLoadBalancer() p2p.PipeLoadBalancer { + return ms.OutgoingPipeLoadBalancerCalled() } func (ms *MessengerStub) Close() error { - panic("implement me") + return ms.CloseCalled() } -func (ms *MessengerStub) ID() peer.ID { - panic("implement me") +func (ms *MessengerStub) ID() p2p.PeerID { + return ms.IDCalled() } -func (ms *MessengerStub) Peers() []peer.ID { - panic("implement me") +func (ms *MessengerStub) Peers() []p2p.PeerID { + return ms.PeersCalled() } -func (ms *MessengerStub) Conns() []net.Conn { - panic("implement me") +func (ms *MessengerStub) Addresses() []string { + return ms.AddressesCalled() } -func (ms *MessengerStub) Marshalizer() marshal.Marshalizer { - return ms.marshalizer +func (ms *MessengerStub) ConnectToPeer(address string) error { + return ms.ConnectToPeerCalled(address) } -func (ms *MessengerStub) Hasher() hashing.Hasher { - return ms.HasherObj +func (ms *MessengerStub) KadDhtDiscoverNewPeers() error { + return ms.KadDhtDiscoverNewPeersCalled() } -func (ms *MessengerStub) RouteTable() *p2p.RoutingTable { - panic("implement me") +func (ms *MessengerStub) TrimConnections() { + ms.TrimConnectionsCalled() } -func (ms *MessengerStub) Addresses() []string { - panic("implement me") +func (ms *MessengerStub) IsConnected(peerID p2p.PeerID) bool { + return ms.IsConnectedCalled(peerID) } -func (ms *MessengerStub) ConnectToAddresses(ctx context.Context, addresses []string) { - panic("implement me") +func (ms *MessengerStub) ConnectedPeers() []p2p.PeerID { + return ms.ConnectedPeersCalled() } -func (ms *MessengerStub) Bootstrap(ctx context.Context) { - panic("implement me") +func (ms *MessengerStub) CreateTopic(name string, createPipeForTopic bool) error { + return ms.CreateTopicCalled(name, createPipeForTopic) } -func (ms *MessengerStub) PrintConnected() { - panic("implement me") +func (ms *MessengerStub) HasTopic(name string) bool { + return ms.HasTopicCalled(name) } -func (ms *MessengerStub) AddAddress(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) { - panic("implement me") +func (ms *MessengerStub) HasTopicValidator(name string) bool { + return ms.HasTopicValidatorCalled(name) } -func (ms *MessengerStub) Connectedness(pid peer.ID) net.Connectedness { - panic("implement me") +func (ms *MessengerStub) BroadcastOnPipe(pipe string, topic string, buff []byte) { + ms.BroadcastOnPipeCalled(pipe, topic, buff) } -func (ms *MessengerStub) GetTopic(topicName string) *p2p.Topic { - return ms.GetTopicCalled(topicName) +func (ms *MessengerStub) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { + return ms.SendToConnectedPeerCalled(topic, buff, peerID) } -func (ms *MessengerStub) AddTopic(t *p2p.Topic) error { - return ms.AddTopicCalled(t) +func (ms *MessengerStub) Bootstrap() error { + return ms.BootstrapCalled() } diff --git a/process/mock/objectsContainerStub.go b/process/mock/objectsContainerStub.go new file mode 100644 index 00000000000..7b6d13cc3ce --- /dev/null +++ b/process/mock/objectsContainerStub.go @@ -0,0 +1,29 @@ +package mock + +type ObjectsContainerStub struct { + GetCalled func(key string) (interface{}, error) + AddCalled func(key string, val interface{}) error + ReplaceCalled func(key string, val interface{}) error + RemoveCalled func(key string) + LenCalled func() int +} + +func (ocs *ObjectsContainerStub) Get(key string) (interface{}, error) { + return ocs.GetCalled(key) +} + +func (ocs *ObjectsContainerStub) Add(key string, val interface{}) error { + return ocs.AddCalled(key, val) +} + +func (ocs *ObjectsContainerStub) Replace(key string, val interface{}) error { + return ocs.ReplaceCalled(key, val) +} + +func (ocs *ObjectsContainerStub) Remove(key string) { + ocs.RemoveCalled(key) +} + +func (ocs *ObjectsContainerStub) Len() int { + return ocs.LenCalled() +} diff --git a/process/mock/p2pMessageMock.go b/process/mock/p2pMessageMock.go new file mode 100644 index 00000000000..1d15966c7bf --- /dev/null +++ b/process/mock/p2pMessageMock.go @@ -0,0 +1,43 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type P2PMessageMock struct { + FromField []byte + DataField []byte + SeqNoField []byte + TopicIDsField []string + SignatureField []byte + KeyField []byte + PeerField p2p.PeerID +} + +func (msg *P2PMessageMock) From() []byte { + return msg.FromField +} + +func (msg *P2PMessageMock) Data() []byte { + return msg.DataField +} + +func (msg *P2PMessageMock) SeqNo() []byte { + return msg.SeqNo() +} + +func (msg *P2PMessageMock) TopicIDs() []string { + return msg.TopicIDsField +} + +func (msg *P2PMessageMock) Signature() []byte { + return msg.SignatureField +} + +func (msg *P2PMessageMock) Key() []byte { + return msg.KeyField +} + +func (msg *P2PMessageMock) Peer() p2p.PeerID { + return msg.PeerField +} diff --git a/process/mock/resolverContainerMock.go b/process/mock/resolverContainerMock.go deleted file mode 100644 index 9a76585277c..00000000000 --- a/process/mock/resolverContainerMock.go +++ /dev/null @@ -1,42 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/process" -) - -// ResolverContainer is a struct that defines the beahaviour for a container -// holding a list of resolvers organized by type -type ResolverContainer struct { - GetCalled func(key string) (process.Resolver, error) - AddCalled func(key string, resolver process.Resolver) error - ReplaceCalled func(key string, resolver process.Resolver) error - RemoveCalled func(key string) - LenCalled func() int -} - -// Get returns the resolver stored at a certain key. -// Returns an error if the element does not exist -func (i *ResolverContainer) Get(key string) (process.Resolver, error) { - return i.GetCalled(key) -} - -// Add will add a resolver at a given key. Returns -// an error if the element already exists -func (i *ResolverContainer) Add(key string, resolver process.Resolver) error { - return i.AddCalled(key, resolver) -} - -// Replace will add (or replace if it already exists) a resolver at a given key -func (i *ResolverContainer) Replace(key string, resolver process.Resolver) error { - return i.ReplaceCalled(key, resolver) -} - -// Remove will remove a resolver at a given key -func (i *ResolverContainer) Remove(key string) { - i.RemoveCalled(key) -} - -// Len returns the length of the added resolvers -func (i *ResolverContainer) Len() int { - return i.LenCalled() -} diff --git a/process/mock/resolverStub.go b/process/mock/resolverStub.go index b21eeb638b8..5dbf24ecf0a 100644 --- a/process/mock/resolverStub.go +++ b/process/mock/resolverStub.go @@ -1,23 +1,18 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" ) type ResolverStub struct { - RequestDataCalled func(rd process.RequestData) error - SetResolverHandlerCalled func(func(rd process.RequestData) ([]byte, error)) - ResolverHandlerCalled func() func(rd process.RequestData) ([]byte, error) + RequestDataFromHashCalled func(hash []byte) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P) error } -func (rs *ResolverStub) RequestData(rd process.RequestData) error { - return rs.RequestDataCalled(rd) +func (rs *ResolverStub) RequestDataFromHash(hash []byte) error { + return rs.RequestDataFromHashCalled(hash) } -func (rs *ResolverStub) SetResolverHandler(handler func(rd process.RequestData) ([]byte, error)) { - rs.SetResolverHandlerCalled(handler) -} - -func (rs *ResolverStub) ResolverHandler() func(rd process.RequestData) ([]byte, error) { - return rs.ResolverHandlerCalled() +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P) error { + return rs.ProcessReceivedMessageCalled(message) } diff --git a/process/mock/resolversContainerStub.go b/process/mock/resolversContainerStub.go new file mode 100644 index 00000000000..1497285f650 --- /dev/null +++ b/process/mock/resolversContainerStub.go @@ -0,0 +1,33 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +type ResolversContainerStub struct { + GetCalled func(key string) (process.Resolver, error) + AddCalled func(key string, val process.Resolver) error + ReplaceCalled func(key string, val process.Resolver) error + RemoveCalled func(key string) + LenCalled func() int +} + +func (rcs *ResolversContainerStub) Get(key string) (process.Resolver, error) { + return rcs.GetCalled(key) +} + +func (rcs *ResolversContainerStub) Add(key string, val process.Resolver) error { + return rcs.AddCalled(key, val) +} + +func (rcs *ResolversContainerStub) Replace(key string, val process.Resolver) error { + return rcs.ReplaceCalled(key, val) +} + +func (rcs *ResolversContainerStub) Remove(key string) { + rcs.RemoveCalled(key) +} + +func (rcs *ResolversContainerStub) Len() int { + return rcs.LenCalled() +} diff --git a/process/mock/stringCreator.go b/process/mock/stringCreator.go deleted file mode 100644 index 45bbf59ecca..00000000000 --- a/process/mock/stringCreator.go +++ /dev/null @@ -1,17 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" -) - -type StringCreator struct { - Data string -} - -func (sn *StringCreator) ID() string { - return sn.Data -} - -func (sn *StringCreator) Create() p2p.Creator { - return &StringCreator{} -} diff --git a/process/mock/topicResolverSenderStub.go b/process/mock/topicResolverSenderStub.go new file mode 100644 index 00000000000..6e01187b2fe --- /dev/null +++ b/process/mock/topicResolverSenderStub.go @@ -0,0 +1,23 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +type TopicResolverSenderStub struct { + SendOnRequestTopicCalled func(rd *process.RequestData) error + SendCalled func(buff []byte, peer p2p.PeerID) error +} + +func (trss *TopicResolverSenderStub) RequestTopicSuffix() string { + return "_REQUEST" +} + +func (trss *TopicResolverSenderStub) SendOnRequestTopic(rd *process.RequestData) error { + return trss.SendOnRequestTopicCalled(rd) +} + +func (trss *TopicResolverSenderStub) Send(buff []byte, peer p2p.PeerID) error { + return trss.SendCalled(buff, peer) +} diff --git a/process/mock/transactionInterceptorMock.go b/process/mock/transactionInterceptorMock.go index 900106f2be3..316078bffe6 100644 --- a/process/mock/transactionInterceptorMock.go +++ b/process/mock/transactionInterceptorMock.go @@ -3,7 +3,6 @@ package mock import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" ) type TransactionInterceptorMock struct { @@ -25,9 +24,9 @@ func (tim *TransactionInterceptorMock) VerifySig() bool { return tim.IsVerified } -func (tim *TransactionInterceptorMock) Create() p2p.Creator { - return &TransactionInterceptorMock{} -} +//func (tim *TransactionInterceptorMock) Create() p2p.Creator { +// return &TransactionInterceptorMock{} +//} func (tim *TransactionInterceptorMock) ID() string { panic("implement me") diff --git a/process/mock/wireMessageHandlerStub.go b/process/mock/wireMessageHandlerStub.go new file mode 100644 index 00000000000..68d0a2f988b --- /dev/null +++ b/process/mock/wireMessageHandlerStub.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" +) + +type WireMessageHandlerStub struct { + ConnectedPeersCalled func() []p2p.PeerID + SendToConnectedPeerCalled func(topic string, buff []byte, peerID p2p.PeerID) error +} + +func (wmhs *WireMessageHandlerStub) ConnectedPeers() []p2p.PeerID { + return wmhs.ConnectedPeersCalled() +} + +func (wmhs *WireMessageHandlerStub) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { + return wmhs.SendToConnectedPeerCalled(topic, buff, peerID) +} diff --git a/process/requestData.go b/process/requestData.go index 6f21a2edd20..07ad87bff88 100644 --- a/process/requestData.go +++ b/process/requestData.go @@ -2,6 +2,9 @@ package process import ( "fmt" + + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" ) // RequestDataType represents the data type for the requested data @@ -31,3 +34,26 @@ type RequestData struct { Type RequestDataType Value []byte } + +// Unmarshal sets the fields according to p2p.MessageP2P.Data() contents +// Errors if something went wrong +func (rd *RequestData) Unmarshal(marshalizer marshal.Marshalizer, message p2p.MessageP2P) error { + if marshalizer == nil { + return ErrNilMarshalizer + } + + if message == nil { + return ErrNilMessage + } + + if message.Data() == nil { + return ErrNilDataToProcess + } + + err := marshalizer.Unmarshal(rd, message.Data()) + if err != nil { + return err + } + + return nil +} diff --git a/process/resolver/container.go b/process/resolver/container.go deleted file mode 100644 index a9261673229..00000000000 --- a/process/resolver/container.go +++ /dev/null @@ -1,78 +0,0 @@ -package resolver - -import ( - "sync" - - "github.com/ElrondNetwork/elrond-go-sandbox/process" -) - -// Container is a holder for resolvers organized by type -type Container struct { - mutex sync.RWMutex - resolvers map[string]process.Resolver -} - -// NewContainer will create a new instance of a resolver container -func NewContainer() *Container { - return &Container{ - mutex: sync.RWMutex{}, - resolvers: make(map[string]process.Resolver), - } -} - -// Get returns the resolver stored at a certain key. -// Returns an error if the element does not exist -func (i *Container) Get(key string) (process.Resolver, error) { - i.mutex.RLock() - resolver, ok := i.resolvers[key] - i.mutex.RUnlock() - if !ok { - return nil, process.ErrInvalidContainerKey - } - return resolver, nil -} - -// Add will add a resolver at a given key. Returns -// an error if the element already exists -func (i *Container) Add(key string, resolver process.Resolver) error { - if resolver == nil { - return process.ErrNilContainerElement - } - i.mutex.Lock() - defer i.mutex.Unlock() - - _, ok := i.resolvers[key] - - if ok { - return process.ErrContainerKeyAlreadyExists - } - - i.resolvers[key] = resolver - return nil -} - -// Replace will add (or replace if it already exists) a resolver at a given key -func (i *Container) Replace(key string, resolver process.Resolver) error { - if resolver == nil { - return process.ErrNilContainerElement - } - i.mutex.Lock() - i.resolvers[key] = resolver - i.mutex.Unlock() - return nil -} - -// Remove will remove a resolver at a given key -func (i *Container) Remove(key string) { - i.mutex.Lock() - delete(i.resolvers, key) - i.mutex.Unlock() -} - -// Len returns the length of the added resolvers -func (i *Container) Len() int { - i.mutex.RLock() - l := len(i.resolvers) - i.mutex.RUnlock() - return l -} diff --git a/process/resolver/topicResolver.go b/process/resolver/topicResolver.go deleted file mode 100644 index 8c3ffb8d6fc..00000000000 --- a/process/resolver/topicResolver.go +++ /dev/null @@ -1,101 +0,0 @@ -package resolver - -import ( - "github.com/ElrondNetwork/elrond-go-sandbox/logger" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" -) - -var log = logger.NewDefaultLogger() - -// topicResolver is a struct coupled with a p2p.Topic that can process requests -type topicResolver struct { - messenger p2p.Messenger - name string - topic *p2p.Topic - marshalizer marshal.Marshalizer - - resolveRequest func(rd process.RequestData) ([]byte, error) -} - -// NewTopicResolver returns a new topic resolver instance -func NewTopicResolver( - name string, - messenger p2p.Messenger, - marshalizer marshal.Marshalizer, -) (*topicResolver, error) { - - if messenger == nil { - return nil, process.ErrNilMessenger - } - - if marshalizer == nil { - return nil, process.ErrNilMarshalizer - } - - topic := messenger.GetTopic(name) - if topic == nil { - return nil, process.ErrNilTopic - } - - if topic.ResolveRequest != nil { - return nil, process.ErrResolveRequestAlreadyAssigned - } - - resolver := &topicResolver{ - name: name, - messenger: messenger, - topic: topic, - marshalizer: marshalizer, - } - - topic.ResolveRequest = func(objData []byte) []byte { - rd := process.RequestData{} - - err := marshalizer.Unmarshal(&rd, objData) - if err != nil { - return nil - } - - if resolver.resolveRequest != nil { - buff, err := resolver.resolveRequest(rd) - if err != nil { - log.Debug(err.Error()) - } - - return buff - } - - return nil - } - - return resolver, nil -} - -// RequestData is used to request data over channels (topics) from other peers -// This method only sends the request, the received data should be handled by interceptors -func (tr *topicResolver) RequestData(rd process.RequestData) error { - buff, err := tr.marshalizer.Marshal(&rd) - if err != nil { - return err - } - - if tr.topic.Request == nil { - return process.ErrTopicNotWiredToMessenger - } - - return tr.topic.Request(buff) -} - -// SetResolverHandler sets the handler that will be called when a new request comes from other peers to -// current node -func (tr *topicResolver) SetResolverHandler(handler func(rd process.RequestData) ([]byte, error)) { - tr.resolveRequest = handler -} - -// ResolverHandler gets the handler that will be called when a new request comes from other peers to -// current node -func (tr *topicResolver) ResolverHandler() func(rd process.RequestData) ([]byte, error) { - return tr.resolveRequest -} diff --git a/process/resolver/topicResolver_test.go b/process/resolver/topicResolver_test.go deleted file mode 100644 index f38431cea69..00000000000 --- a/process/resolver/topicResolver_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package resolver_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" - "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" - "github.com/ElrondNetwork/elrond-go-sandbox/process/resolver" - "github.com/stretchr/testify/assert" -) - -//-------NewTopicResolver - -func TestNewTopicResolver_NilMessengerShouldErr(t *testing.T) { - t.Parallel() - - tr, err := resolver.NewTopicResolver("test", nil, &mock.MarshalizerMock{}) - - assert.Equal(t, process.ErrNilMessenger, err) - assert.Nil(t, tr) -} - -func TestNewTopicResolver_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - tr, err := resolver.NewTopicResolver("test", &mock.MessengerStub{}, nil) - - assert.Equal(t, process.ErrNilMarshalizer, err) - assert.Nil(t, tr) -} - -func TestNewTopicResolver_NilTopicShouldErr(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return nil - } - - tr, err := resolver.NewTopicResolver("test", mes, &mock.MarshalizerMock{}) - - assert.Equal(t, process.ErrNilTopic, err) - assert.Nil(t, tr) -} - -func TestNewTopicResolver_TopicWithResolveRequestAssignedShouldErr(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - topic.ResolveRequest = func(hash []byte) []byte { - return nil - } - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - tr, err := resolver.NewTopicResolver("test", mes, &mock.MarshalizerMock{}) - - assert.Equal(t, process.ErrResolveRequestAlreadyAssigned, err) - assert.Nil(t, tr) -} - -func TestNewTopicResolver_OkValsShouldWork(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - res, err := resolver.NewTopicResolver("test", mes, &mock.MarshalizerMock{}) - - assert.Nil(t, err) - assert.NotNil(t, res) -} - -//------- ResolveRequest - -func TestTopicResolver_ResolveRequestMarshalizerFailsShouldReturnNil(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - resMarshalizer.Fail = true - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - _, _ = resolver.NewTopicResolver("test", mes, resMarshalizer) - - assert.Nil(t, topic.ResolveRequest([]byte("a"))) -} - -func TestTopicResolver_ResolveRequestNilShouldReturnNil(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - _, _ = resolver.NewTopicResolver("test", mes, resMarshalizer) - - assert.Nil(t, topic.ResolveRequest(nil)) -} - -func TestTopicResolver_ResolveRequestNilFuncShouldReturnNil(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - _, _ = resolver.NewTopicResolver("test", mes, resMarshalizer) - - rd := process.RequestData{ - Type: process.HashType, - Value: []byte("aaa"), - } - buffRd, err := resMarshalizer.Marshal(&rd) - assert.Nil(t, err) - - assert.Nil(t, topic.ResolveRequest(buffRd)) -} - -func TestTopicResolver_ResolveRequestShouldWork(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - res, _ := resolver.NewTopicResolver("test", mes, resMarshalizer) - - res.SetResolverHandler(func(rd process.RequestData) ([]byte, error) { - return []byte("aaa"), nil - }) - - rd := process.RequestData{ - Type: process.HashType, - Value: []byte("aaa"), - } - buffRd, err := resMarshalizer.Marshal(&rd) - assert.Nil(t, err) - - assert.Equal(t, []byte("aaa"), topic.ResolveRequest(buffRd)) -} - -//------- RequestData - -func TestTopicResolver_RequestDataMarshalizerFailsShouldErr(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - resMarshalizer.Fail = true - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - res, _ := resolver.NewTopicResolver("test", mes, resMarshalizer) - - assert.Equal(t, "MarshalizerMock generic error", res.RequestData( - process.RequestData{ - Type: process.HashType, - Value: []byte("aaa"), - }).Error()) -} - -func TestTopicResolver_RequestDataTopicNotWiredShouldErr(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - res, _ := resolver.NewTopicResolver("test", mes, resMarshalizer) - - assert.Equal(t, process.ErrTopicNotWiredToMessenger, res.RequestData( - process.RequestData{ - Type: process.HashType, - Value: []byte("aaa"), - })) -} - -func TestTopicResolver_RequestDataShouldWork(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - res, _ := resolver.NewTopicResolver("test", mes, resMarshalizer) - - wasCalled := false - - topic.Request = func(hash []byte) error { - wasCalled = true - return nil - } - - assert.Nil(t, res.RequestData( - process.RequestData{ - Type: process.HashType, - Value: []byte("aaa"), - })) - assert.True(t, wasCalled) -} - -func TestTopicResolver_ResolverHandler(t *testing.T) { - t.Parallel() - - mes := &mock.MessengerStub{} - - resMarshalizer := &mock.MarshalizerMock{} - - topic := p2p.NewTopic("test", &mock.StringCreator{}, &mock.MarshalizerMock{}) - - mes.GetTopicCalled = func(name string) *p2p.Topic { - return topic - } - - res, _ := resolver.NewTopicResolver("test", mes, resMarshalizer) - - //first test for nil - assert.Nil(t, res.ResolverHandler()) - - res.SetResolverHandler(func(rd process.RequestData) (bytes []byte, e error) { - return nil, nil - }) - - //second, test is not nil - assert.NotNil(t, res.ResolverHandler()) -} diff --git a/process/sync/block.go b/process/sync/block.go index f4e4e796319..01c4cf3c70d 100644 --- a/process/sync/block.go +++ b/process/sync/block.go @@ -15,6 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/logger" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) @@ -43,12 +44,13 @@ type Bootstrap struct { txBodyHash []byte chRcvTxBdy chan bool - RequestHeaderHandler func(nonce uint64) - RequestTxBodyHandler func(hash []byte) - chStopSync chan bool waitTime time.Duration + resolvers process.ResolversContainer + hdrRes process.HeaderResolver + txBlockBodyRes process.Resolver + isNodeSynchronized bool syncStateListeners []func(bool) mutSyncStateListeners sync.RWMutex @@ -64,8 +66,18 @@ func NewBootstrap( hasher hashing.Hasher, marshalizer marshal.Marshalizer, forkDetector process.ForkDetector, + resolversContainer process.ResolversContainer, ) (*Bootstrap, error) { - err := checkBootstrapNilParameters(transientDataHolder, blkc, rounder, blkExecutor, hasher, marshalizer, forkDetector) + + err := checkBootstrapNilParameters( + transientDataHolder, + blkc, + rounder, + blkExecutor, + hasher, + marshalizer, + forkDetector, + resolversContainer) if err != nil { return nil, err @@ -84,6 +96,20 @@ func NewBootstrap( forkDetector: forkDetector, } + hdrResolver, err := resolversContainer.Get(string(factory.HeadersTopic)) + if err != nil { + return nil, err + } + + txBlockBodyResolver, err := resolversContainer.Get(string(factory.TxBlockBodyTopic)) + if err != nil { + return nil, err + } + + //placed in struct fields for performance reasons + boot.hdrRes = hdrResolver.(process.HeaderResolver) + boot.txBlockBodyRes = txBlockBodyResolver.(process.Resolver) + boot.chRcvHdr = make(chan bool) boot.chRcvTxBdy = make(chan bool) @@ -110,6 +136,7 @@ func checkBootstrapNilParameters( hasher hashing.Hasher, marshalizer marshal.Marshalizer, forkDetector process.ForkDetector, + resolvers process.ResolversContainer, ) error { if transientDataHolder == nil { return process.ErrNilTransientDataHolder @@ -151,9 +178,14 @@ func checkBootstrapNilParameters( return process.ErrNilForkDetector } + if resolvers == nil { + return process.ErrNilResolverContainer + } + return nil } +// AddSyncStateListener adds a syncStateListener that get notified each time the sync status of the node changes func (boot *Bootstrap) AddSyncStateListener(syncStateListener func(bool)) { boot.mutSyncStateListeners.Lock() boot.syncStateListeners = append(boot.syncStateListeners, syncStateListener) @@ -397,9 +429,11 @@ func (boot *Bootstrap) getHeaderFromPoolHavingNonce(nonce uint64) *block.Header // requestHeader method requests a block header from network when it is not found in the pool func (boot *Bootstrap) requestHeader(nonce uint64) { - if boot.RequestHeaderHandler != nil { - boot.setRequestedHeaderNonce(&nonce) - boot.RequestHeaderHandler(nonce) + err := boot.hdrRes.RequestDataFromNonce(nonce) + + log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) + if err != nil { + log.Error("RequestHeaderFromNonce error: ", err.Error()) } } @@ -455,9 +489,12 @@ func (boot *Bootstrap) getTxBody(hash []byte) interface{} { // requestBody method requests a block body from network when it is not found in the pool func (boot *Bootstrap) requestTxBody(hash []byte) { - if boot.RequestTxBodyHandler != nil { - boot.setRequestedTxBodyHash(hash) - boot.RequestTxBodyHandler(hash) + err := boot.txBlockBodyRes.RequestDataFromHash(hash) + + log.Info(fmt.Sprintf("requested tx body with hash %s from network\n", toB64(hash))) + if err != nil { + log.Error("RequestBlockBodyFromHash error: ", err.Error()) + return } } diff --git a/process/sync/block_test.go b/process/sync/block_test.go index acae23617d4..8c88c134dcf 100644 --- a/process/sync/block_test.go +++ b/process/sync/block_test.go @@ -4,16 +4,16 @@ import ( "bytes" "fmt" "reflect" + goSync "sync" "testing" "time" - goSync "sync" - "github.com/ElrondNetwork/elrond-go-sandbox/consensus/round" "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/block" "github.com/ElrondNetwork/elrond-go-sandbox/data/blockchain" "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/factory" "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" "github.com/ElrondNetwork/elrond-go-sandbox/process/sync" "github.com/ElrondNetwork/elrond-go-sandbox/storage" @@ -31,6 +31,33 @@ type removedFlags struct { flagHdrRemovedFromForkDetector bool } +func createMockResolversContainer() *mock.ResolversContainerStub { + return &mock.ResolversContainerStub{ + GetCalled: func(key string) (resolver process.Resolver, e error) { + if key == string(factory.HeadersTopic) { + return &mock.HeaderResolverMock{ + RequestDataFromNonceCalled: func(nonce uint64) error { + return nil + }, + RequestDataFromHashCalled: func(hash []byte) error { + return nil + }, + }, nil + } + + if key == string(factory.TxBlockBodyTopic) { + return &mock.ResolverStub{ + RequestDataFromHashCalled: func(hash []byte) error { + return nil + }, + }, nil + } + + return nil, nil + }, + } +} + //------- NewBootstrap func TestNewBootstrap_NilTransientDataHolderShouldErr(t *testing.T) { @@ -43,7 +70,16 @@ func TestNewBootstrap_NilTransientDataHolderShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(nil, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + nil, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}) assert.Nil(t, bs) assert.Equal(t, process.ErrNilTransientDataHolder, err) @@ -69,7 +105,17 @@ func TestNewBootstrap_TransientDataHolderRetNilOnHeadersShouldErr(t *testing.T) marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilHeadersDataPool, err) @@ -95,7 +141,17 @@ func TestNewBootstrap_TransientDataHolderRetNilOnHeadersNoncesShouldErr(t *testi marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) @@ -121,7 +177,17 @@ func TestNewBootstrap_TransientDataHolderRetNilOnTxBlockBodyShouldErr(t *testing marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilTxBlockBody, err) @@ -146,13 +212,23 @@ func TestNewBootstrap_NilBlockchainShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, nil, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + nil, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilBlockChain, err) } -func TestNewBootstrap_NilRoundShouldErr(t *testing.T) { +func TestNewBootstrap_NilRounderShouldErr(t *testing.T) { t.Parallel() transient := &mock.TransientDataPoolMock{} @@ -171,7 +247,17 @@ func TestNewBootstrap_NilRoundShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, nil, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + nil, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilRounder, err) @@ -196,7 +282,17 @@ func TestNewBootstrap_NilBlockProcessorShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, nil, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + nil, + waitTime, + hasher, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilBlockExecutor, err) @@ -219,8 +315,19 @@ func TestNewBootstrap_NilHasherShouldErr(t *testing.T) { rnd := &mock.RounderMock{} blkExec := &mock.BlockProcessorMock{} marshalizer := &mock.MarshalizerMock{} + forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, nil, marshalizer, nil) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + nil, + marshalizer, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilHasher, err) @@ -245,7 +352,17 @@ func TestNewBootstrap_NilMarshalizerShouldErr(t *testing.T) { hasher := &mock.HasherMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, nil, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + nil, + forkDetector, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -270,12 +387,164 @@ func TestNewBootstrap_NilForkDetectorShouldErr(t *testing.T) { hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, nil) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + nil, + &mock.ResolversContainerStub{}, + ) assert.Nil(t, bs) assert.Equal(t, process.ErrNilForkDetector, err) } +func TestNewBootstrap_NilResolversContainerShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + blkc := &blockchain.BlockChain{} + rnd := &mock.RounderMock{} + blkExec := &mock.BlockProcessorMock{} + forkDetector := &mock.ForkDetectorMock{} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + nil, + ) + + assert.Nil(t, bs) + assert.Equal(t, process.ErrNilResolverContainer, err) +} + +func TestNewBootstrap_NilHeaderResolverShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + + errExpected := errors.New("expected error") + + resContainer := &mock.ResolversContainerStub{ + GetCalled: func(key string) (resolver process.Resolver, e error) { + if key == string(factory.HeadersTopic) { + return nil, errExpected + } + + if key == string(factory.TxBlockBodyTopic) { + return &mock.ResolverStub{}, nil + } + + return nil, nil + }, + } + + blkc := &blockchain.BlockChain{} + rnd := &mock.RounderMock{} + blkExec := &mock.BlockProcessorMock{} + forkDetector := &mock.ForkDetectorMock{} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + resContainer, + ) + + assert.Nil(t, bs) + assert.Equal(t, errExpected, err) +} + +func TestNewBootstrap_NilTxBlockBodyResolverShouldErr(t *testing.T) { + t.Parallel() + + transient := &mock.TransientDataPoolMock{} + transient.HeadersCalled = func() data.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + transient.HeadersNoncesCalled = func() data.Uint64Cacher { + return &mock.Uint64CacherStub{} + } + transient.TxBlocksCalled = func() storage.Cacher { + return &mock.CacherStub{} + } + + errExpected := errors.New("expected error") + + resContainer := &mock.ResolversContainerStub{ + GetCalled: func(key string) (resolver process.Resolver, e error) { + if key == string(factory.HeadersTopic) { + return &mock.HeaderResolverMock{}, errExpected + } + + if key == string(factory.TxBlockBodyTopic) { + return nil, errExpected + } + + return nil, nil + }, + } + + blkc := &blockchain.BlockChain{} + rnd := &mock.RounderMock{} + blkExec := &mock.BlockProcessorMock{} + forkDetector := &mock.ForkDetectorMock{} + hasher := &mock.HasherMock{} + marshalizer := &mock.MarshalizerMock{} + + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + resContainer, + ) + + assert.Nil(t, bs) + assert.Equal(t, errExpected, err) +} + func TestNewBootstrap_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -310,6 +579,7 @@ func TestNewBootstrap_OkValsShouldWork(t *testing.T) { return cs } + blkc := &blockchain.BlockChain{} rnd := &mock.RounderMock{} blkExec := &mock.BlockProcessorMock{} @@ -317,7 +587,17 @@ func TestNewBootstrap_OkValsShouldWork(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, err := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, err := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) assert.NotNil(t, bs) assert.Nil(t, err) @@ -374,10 +654,9 @@ func TestBootstrap_ShouldReturnMissingHeader(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) - - bs.RequestHeaderHandler = func(nonce uint64) {} - bs.RequestTxBodyHandler = func(hash []byte) {} + forkDetector, + createMockResolversContainer(), + ) r := bs.SyncBlock() @@ -454,7 +733,9 @@ func TestBootstrap_ShouldReturnMissingBody(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) bs.RequestHeader(2) @@ -527,7 +808,9 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) bs.StartSync() time.Sleep(200 * time.Millisecond) @@ -623,7 +906,9 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) bs.StartSync() @@ -717,7 +1002,9 @@ func TestBootstrap_ShouldReturnNilErr(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) r := bs.SyncBlock() @@ -760,7 +1047,9 @@ func TestBootstrap_ShouldSyncShouldReturnFalseWhenCurrentBlockIsNilAndRoundIndex waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.False(t, bs.ShouldSync()) } @@ -805,7 +1094,9 @@ func TestBootstrap_ShouldReturnTrueWhenCurrentBlockIsNilAndRoundIndexIsGreaterTh waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.True(t, bs.ShouldSync()) } @@ -854,7 +1145,9 @@ func TestBootstrap_ShouldReturnFalseWhenNodeIsSynced(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.False(t, bs.ShouldSync()) } @@ -902,7 +1195,9 @@ func TestBootstrap_ShouldReturnTrueWhenNodeIsNotSynced(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.False(t, bs.ShouldSync()) } @@ -951,7 +1246,9 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.Nil(t, bs.GetHeaderFromPool(0)) } @@ -1011,7 +1308,9 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.True(t, hdr == bs.GetHeaderFromPool(0)) } @@ -1059,7 +1358,9 @@ func TestGetBlockFromPoolShouldReturnBlock(t *testing.T) { waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) assert.True(t, blk == bs.GetTxBody([]byte("aaa"))) @@ -1134,7 +1435,9 @@ func TestBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *testing. waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) bs.ReceivedHeaders(addedHash) @@ -1226,7 +1529,9 @@ func TestBootstrap_ReceivedHeadersNotFoundInPoolButFoundInStorageShouldAddToFork waitTime, hasher, marshalizer, - forkDetector) + forkDetector, + createMockResolversContainer(), + ) bs.ReceivedHeaders(addedHash) @@ -1265,7 +1570,17 @@ func TestBootstrap_ForkChoiceNilBlockchainHeaderShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) err := bs.ForkChoice(&block.Header{}) assert.Equal(t, sync.ErrNilCurrentHeader, err) @@ -1301,7 +1616,17 @@ func TestBootstrap_ForkChoiceNilParamHeaderShouldErr(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) blkc.CurrentBlockHeader = &block.Header{} @@ -1384,7 +1709,17 @@ func TestBootstrap_ForkChoiceIsNotEmptyShouldRemove(t *testing.T) { marshalizer := &mock.MarshalizerMock{} forkDetector := createForkDetector(newHdrNonce, remFlags) - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) blkc.CurrentBlockHeader = &block.Header{ PubKeysBitmap: []byte{1}, @@ -1512,7 +1847,18 @@ func TestBootstrap_ForkChoiceIsEmptyCallRollBackOkValsShouldWork(t *testing.T) { }, } forkDetector := createForkDetector(currentHdrNonce, remFlags) - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) //this is the block we want to revert blkc.CurrentBlockHeader = &block.Header{ @@ -1619,7 +1965,17 @@ func TestBootstrap_ForkChoiceIsEmptyCallRollBackToGenesisShouldWork(t *testing.T }, } forkDetector := createForkDetector(currentHdrNonce, remFlags) - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) //this is the block we want to revert blkc.CurrentBlockHeader = &block.Header{ @@ -1684,7 +2040,17 @@ func TestBootstrap_GetTxBodyHavingHashReturnsFromCacherShouldWork(t *testing.T) marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) txBlockRecovered := bs.GetTxBody(requestedHash) assert.True(t, txBlockRecovered == txBlock) @@ -1736,7 +2102,17 @@ func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetNil(t *t marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) txBlockRecovered := bs.GetTxBody(requestedHash) assert.Nil(t, txBlockRecovered) @@ -1797,7 +2173,17 @@ func TestBootstrap_GetTxBodyHavingHashFoundInStorageShouldWork(t *testing.T) { blkExec := &mock.BlockProcessorMock{} forkDetector := &mock.ForkDetectorMock{} - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) txBlockRecovered := bs.GetTxBody(requestedHash) assert.Equal(t, txBlock, txBlockRecovered) @@ -1865,7 +2251,17 @@ func TestBootstrap_GetTxBodyHavingHashMarshalizerFailShouldRemoveAndRetNil(t *te blkExec := &mock.BlockProcessorMock{} forkDetector := &mock.ForkDetectorMock{} - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) txBlockRecovered := bs.GetTxBody(requestedHash) assert.Nil(t, txBlockRecovered) @@ -1922,7 +2318,17 @@ func TestBootstrap_CreateEmptyBlockShouldReturnNilWhenMarshalErr(t *testing.T) { marshalizer.Fail = true - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) blk, hdr := bs.CreateAndCommitEmptyBlock(0) @@ -1985,7 +2391,17 @@ func TestBootstrap_CreateEmptyBlockShouldReturnNilWhenCommitBlockErr(t *testing. return err } - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) blk, hdr := bs.CreateAndCommitEmptyBlock(0) @@ -2045,7 +2461,17 @@ func TestBootstrap_CreateEmptyBlockShouldWork(t *testing.T) { return nil } - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) blk, hdr := bs.CreateAndCommitEmptyBlock(0) @@ -2105,7 +2531,17 @@ func TestBootstrap_AddSyncStateListenerShouldAppendAnotherListener(t *testing.T) return nil } - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) f1 := func(bool) {} f2 := func(bool) {} @@ -2170,7 +2606,17 @@ func TestBootstrap_NotifySyncStateListenersShouldNotify(t *testing.T) { return nil } - bs, _ := sync.NewBootstrap(transient, blkc, rnd, blkExec, waitTime, hasher, marshalizer, forkDetector) + bs, _ := sync.NewBootstrap( + transient, + blkc, + rnd, + blkExec, + waitTime, + hasher, + marshalizer, + forkDetector, + createMockResolversContainer(), + ) calls := 0 var wg goSync.WaitGroup diff --git a/process/topicResolverSender/export_test.go b/process/topicResolverSender/export_test.go new file mode 100644 index 00000000000..7f82bfa9500 --- /dev/null +++ b/process/topicResolverSender/export_test.go @@ -0,0 +1,10 @@ +package topicResolverSender + +import ( + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +func SelectRandomPeers(connectedPeers []p2p.PeerID, peersToSend int, randomizer process.IntRandomizer) []p2p.PeerID { + return selectRandomPeers(connectedPeers, peersToSend, randomizer) +} diff --git a/process/topicResolverSender/topicResolverSender.go b/process/topicResolverSender/topicResolverSender.go new file mode 100644 index 00000000000..a03895046d6 --- /dev/null +++ b/process/topicResolverSender/topicResolverSender.go @@ -0,0 +1,109 @@ +package topicResolverSender + +import ( + "math/rand" + "time" + + "github.com/ElrondNetwork/elrond-go-sandbox/logger" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" +) + +// requestTopicSuffix represents the topic name suffix +const requestTopicSuffix = "_REQUEST" + +// PeersToSendRequest number of peers to send the message +const PeersToSendRequest = 2 + +var log = logger.NewDefaultLogger() + +type topicResolverSender struct { + messenger process.WireMessageHandler + marshalizer marshal.Marshalizer + topicName string + r *rand.Rand +} + +// NewTopicResolverSender returns a new topic resolver instance +func NewTopicResolverSender( + messenger process.WireMessageHandler, + topicName string, + marshalizer marshal.Marshalizer, +) (*topicResolverSender, error) { + if messenger == nil { + return nil, process.ErrNilMessenger + } + + if marshalizer == nil { + return nil, process.ErrNilMarshalizer + } + + resolver := &topicResolverSender{ + messenger: messenger, + topicName: topicName, + marshalizer: marshalizer, + r: rand.New(rand.NewSource(time.Now().UnixNano())), + } + + return resolver, nil +} + +// SendOnRequestTopic is used to send request data over channels (topics) to other peers +// This method only sends the request, the received data should be handled by interceptors +func (trs *topicResolverSender) SendOnRequestTopic(rd *process.RequestData) error { + buff, err := trs.marshalizer.Marshal(rd) + if err != nil { + return err + } + + peersToSend := selectRandomPeers(trs.messenger.ConnectedPeers(), PeersToSendRequest, trs.r) + if len(peersToSend) == 0 { + return process.ErrNoConnectedPeerToSendRequest + } + + for _, peer := range peersToSend { + err = trs.messenger.SendToConnectedPeer(trs.topicName+requestTopicSuffix, buff, peer) + if err != nil { + log.Debug(err.Error()) + } + } + + return nil +} + +// Send is used to send an array buffer to a connected peer +// It is used when replying to a request +func (trs *topicResolverSender) Send(buff []byte, peer p2p.PeerID) error { + return trs.messenger.SendToConnectedPeer(trs.topicName, buff, peer) +} + +// RequestTopicSuffix returns the suffix that will be added to create a new channel for requests +func (trs *topicResolverSender) RequestTopicSuffix() string { + return requestTopicSuffix +} + +func selectRandomPeers(connectedPeers []p2p.PeerID, peersToSend int, randomizer process.IntRandomizer) []p2p.PeerID { + selectedPeers := make([]p2p.PeerID, 0) + + if len(connectedPeers) == 0 { + return selectedPeers + } + + if len(connectedPeers) <= peersToSend { + return connectedPeers + } + + uniqueIndexes := make(map[int]struct{}) + //generating peersToSend number of unique indexes + for len(uniqueIndexes) < peersToSend { + newIndex := randomizer.Intn(len(connectedPeers)) + uniqueIndexes[newIndex] = struct{}{} + } + + for index := range uniqueIndexes { + selectedPeers = append(selectedPeers, connectedPeers[index]) + } + + return selectedPeers +} diff --git a/process/topicResolverSender/topicResolverSender_test.go b/process/topicResolverSender/topicResolverSender_test.go new file mode 100644 index 00000000000..7a5cab2031c --- /dev/null +++ b/process/topicResolverSender/topicResolverSender_test.go @@ -0,0 +1,243 @@ +package topicResolverSender_test + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" + "github.com/ElrondNetwork/elrond-go-sandbox/process" + "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" + "github.com/ElrondNetwork/elrond-go-sandbox/process/topicResolverSender" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +//------- NewTopicResolverSender + +func TestNewTopicResolverSender_NilMessengerShouldErr(t *testing.T) { + t.Parallel() + + trs, err := topicResolverSender.NewTopicResolverSender(nil, "topic", &mock.MarshalizerMock{}) + + assert.Nil(t, trs) + assert.Equal(t, process.ErrNilMessenger, err) +} + +func TestNewTopicResolverSender_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + trs, err := topicResolverSender.NewTopicResolverSender(&mock.WireMessageHandlerStub{}, "topic", nil) + + assert.Nil(t, trs) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewTopicResolverSender_OkValsShouldWork(t *testing.T) { + t.Parallel() + + trs, err := topicResolverSender.NewTopicResolverSender( + &mock.WireMessageHandlerStub{}, + "topic", + &mock.MarshalizerMock{}) + + assert.NotNil(t, trs) + assert.Nil(t, err) +} + +//------- SendOnRequestTopic + +func TestTopicResolverSender_SendOnRequestTopicMarshalizerFailsShouldErr(t *testing.T) { + t.Parallel() + + errExpected := errors.New("expected error") + + trs, _ := topicResolverSender.NewTopicResolverSender( + &mock.WireMessageHandlerStub{}, + "topic", + &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (bytes []byte, e error) { + return nil, errExpected + }, + }) + + err := trs.SendOnRequestTopic(&process.RequestData{}) + + assert.Equal(t, errExpected, err) +} + +func TestTopicResolverSender_SendOnRequestTopicNoOneToSendShouldErr(t *testing.T) { + t.Parallel() + + trs, _ := topicResolverSender.NewTopicResolverSender( + &mock.WireMessageHandlerStub{ + ConnectedPeersCalled: func() []p2p.PeerID { + return make([]p2p.PeerID, 0) + }, + }, + "topic", + &mock.MarshalizerMock{}, + ) + + err := trs.SendOnRequestTopic(&process.RequestData{}) + + assert.Equal(t, process.ErrNoConnectedPeerToSendRequest, err) +} + +func TestTopicResolverSender_SendOnRequestTopicShouldWork(t *testing.T) { + t.Parallel() + + pID1 := p2p.PeerID("peer1") + sentToPid1 := false + + trs, _ := topicResolverSender.NewTopicResolverSender( + &mock.WireMessageHandlerStub{ + ConnectedPeersCalled: func() []p2p.PeerID { + return []p2p.PeerID{pID1} + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID p2p.PeerID) error { + if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { + sentToPid1 = true + } + + return nil + }, + }, + "topic", + &mock.MarshalizerMock{}, + ) + + err := trs.SendOnRequestTopic(&process.RequestData{}) + + assert.Nil(t, err) + assert.True(t, sentToPid1) +} + +//------- Send + +func TestTopicResolverSender_SendShouldWork(t *testing.T) { + t.Parallel() + + pID1 := p2p.PeerID("peer1") + sentToPid1 := false + buffToSend := []byte("buff") + + trs, _ := topicResolverSender.NewTopicResolverSender( + &mock.WireMessageHandlerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID p2p.PeerID) error { + if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && + bytes.Equal(buff, buffToSend) { + sentToPid1 = true + } + + return nil + }, + }, + "topic", + &mock.MarshalizerMock{}, + ) + + err := trs.Send(buffToSend, pID1) + + assert.Nil(t, err) + assert.True(t, sentToPid1) +} + +// ------- SelectRandomPeers + +func TestSelectRandomPeers_ConnectedPeersLen0ShoudRetEmpty(t *testing.T) { + t.Parallel() + + selectedPeers := topicResolverSender.SelectRandomPeers(make([]p2p.PeerID, 0), 0, nil) + + assert.Equal(t, 0, len(selectedPeers)) +} + +func TestSelectRandomPeers_ConnectedPeersLenSmallerThanRequiredShoudRetListTest1(t *testing.T) { + t.Parallel() + + connectedPeers := []p2p.PeerID{p2p.PeerID("peer 1"), p2p.PeerID("peer 2")} + + selectedPeers := topicResolverSender.SelectRandomPeers(connectedPeers, 3, nil) + + assert.Equal(t, connectedPeers, selectedPeers) +} + +func TestSelectRandomPeers_ConnectedPeersLenSmallerThanRequiredShoudRetListTest2(t *testing.T) { + t.Parallel() + + connectedPeers := []p2p.PeerID{p2p.PeerID("peer 1"), p2p.PeerID("peer 2")} + + selectedPeers := topicResolverSender.SelectRandomPeers(connectedPeers, 2, nil) + + assert.Equal(t, connectedPeers, selectedPeers) +} + +func TestSelectRandomPeers_ConnectedPeersTestRandomizerRepeat0ThreeTimes(t *testing.T) { + t.Parallel() + + connectedPeers := []p2p.PeerID{p2p.PeerID("peer 1"), p2p.PeerID("peer 2"), p2p.PeerID("peer 3")} + + valuesGenerated := []int{0, 0, 0, 1} + idxGenerated := 0 + + mr := &mock.IntRandomizerMock{ + IntnCalled: func(n int) int { + val := valuesGenerated[idxGenerated] + idxGenerated++ + return val + }, + } + + selectedPeers := topicResolverSender.SelectRandomPeers(connectedPeers, 2, mr) + + //since iterating a map does not guarantee the order, we have to search in any combination possible + foundPeer0 := false + foundPeer1 := false + + for i := 0; i < len(selectedPeers); i++ { + if selectedPeers[i] == connectedPeers[0] { + foundPeer0 = true + } + if selectedPeers[i] == connectedPeers[1] { + foundPeer1 = true + } + } + + assert.True(t, foundPeer0 && foundPeer1) + assert.Equal(t, 2, len(selectedPeers)) +} + +func TestSelectRandomPeers_ConnectedPeersTestRandomizerRepeat2TwoTimes(t *testing.T) { + t.Parallel() + + connectedPeers := []p2p.PeerID{p2p.PeerID("peer 1"), p2p.PeerID("peer 2"), p2p.PeerID("peer 3")} + + valuesGenerated := []int{2, 2, 0} + idxGenerated := 0 + + mr := &mock.IntRandomizerMock{ + IntnCalled: func(n int) int { + val := valuesGenerated[idxGenerated] + idxGenerated++ + return val + }, + } + + selectedPeers := topicResolverSender.SelectRandomPeers(connectedPeers, 2, mr) + + //since iterating a map does not guarantee the order, we have to search in any combination possible + foundPeer0 := false + foundPeer2 := false + + for i := 0; i < len(selectedPeers); i++ { + if selectedPeers[i] == connectedPeers[0] { + foundPeer0 = true + } + if selectedPeers[i] == connectedPeers[2] { + foundPeer2 = true + } + } + + assert.True(t, foundPeer0 && foundPeer2) + assert.Equal(t, 2, len(selectedPeers)) +} diff --git a/process/transaction/export_test.go b/process/transaction/export_test.go index ccb98c5aaba..11172a09df7 100644 --- a/process/transaction/export_test.go +++ b/process/transaction/export_test.go @@ -5,8 +5,6 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" - "github.com/ElrondNetwork/elrond-go-sandbox/process" ) func (txProc *txProcessor) GetAddresses(tx *transaction.Transaction) (adrSrc, adrDest state.AddressContainer, err error) { @@ -32,11 +30,3 @@ func (txProc *txProcessor) MoveBalances(acntSrc, acntDest state.JournalizedAccou func (txProc *txProcessor) IncreaseNonceAcntSrc(acntSrc state.JournalizedAccountWrapper) error { return txProc.increaseNonceAcntSrc(acntSrc) } - -func (txi *TxInterceptor) ProcessTx(tx p2p.Creator, rawData []byte) error { - return txi.processTx(tx, rawData) -} - -func (txRes *TxResolver) ResolveTxRequest(rd process.RequestData) ([]byte, error) { - return txRes.resolveTxRequest(rd) -} diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 3ec241be0b2..814caefaca6 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/data/transaction" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" ) @@ -33,16 +32,6 @@ func NewInterceptedTransaction(signer crypto.SingleSigner) *InterceptedTransacti } } -// Create returns a new instance of this struct (used in topics) -func (inTx *InterceptedTransaction) Create() p2p.Creator { - return NewInterceptedTransaction(inTx.singleSigner) -} - -// ID returns the ID of this object. Set to return the hash of the transaction -func (inTx *InterceptedTransaction) ID() string { - return string(inTx.hash) -} - // IntegrityAndValidity returns a non nil error if transaction failed some checking tests func (inTx *InterceptedTransaction) IntegrityAndValidity(coordinator sharding.ShardCoordinator) error { if coordinator == nil { diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index 65a8b0404c5..12971dbbb84 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -373,7 +373,6 @@ func TestTransactionInterceptor_GetterSetterHash(t *testing.T) { tx := transaction.NewInterceptedTransaction(signer) tx.SetHash(hash) - assert.Equal(t, string(hash), tx.ID()) assert.Equal(t, string(hash), string(tx.Hash())) } diff --git a/process/transaction/interceptor.go b/process/transaction/interceptor.go index 10dbcf6e641..9d5763addef 100644 --- a/process/transaction/interceptor.go +++ b/process/transaction/interceptor.go @@ -5,6 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" "github.com/ElrondNetwork/elrond-go-sandbox/hashing" + "github.com/ElrondNetwork/elrond-go-sandbox/marshal" "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/sharding" @@ -13,7 +14,7 @@ import ( // TxInterceptor is used for intercepting transaction and storing them into a datapool type TxInterceptor struct { - process.Interceptor + marshalizer marshal.Marshalizer txPool data.ShardedDataCacherNotifier txStorer storage.Storer addrConverter state.AddressConverter @@ -25,7 +26,7 @@ type TxInterceptor struct { // NewTxInterceptor hooks a new interceptor for transactions func NewTxInterceptor( - interceptor process.Interceptor, + marshalizer marshal.Marshalizer, txPool data.ShardedDataCacherNotifier, txStorer storage.Storer, addrConverter state.AddressConverter, @@ -35,8 +36,8 @@ func NewTxInterceptor( shardCoordinator sharding.ShardCoordinator, ) (*TxInterceptor, error) { - if interceptor == nil { - return nil, process.ErrNilInterceptor + if marshalizer == nil { + return nil, process.ErrNilMarshalizer } if txPool == nil { @@ -72,7 +73,7 @@ func NewTxInterceptor( } txIntercept := &TxInterceptor{ - Interceptor: interceptor, + marshalizer: marshalizer, txPool: txPool, txStorer: txStorer, hasher: hasher, @@ -82,40 +83,35 @@ func NewTxInterceptor( shardCoordinator: shardCoordinator, } - interceptor.SetCheckReceivedObjectHandler(txIntercept.processTx) - return txIntercept, nil } -func (txi *TxInterceptor) processTx(tx p2p.Creator, rawData []byte) error { - if tx == nil { - return process.ErrNilTransaction +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + if message == nil { + return process.ErrNilMessage } - if rawData == nil { + if message.Data() == nil { return process.ErrNilDataToProcess } - txIntercepted, ok := tx.(process.TransactionInterceptorAdapter) - - if !ok { - return process.ErrBadInterceptorTopicImplementation + txIntercepted := NewInterceptedTransaction(txi.singleSigner) + err := txi.marshalizer.Unmarshal(txIntercepted, message.Data()) + if err != nil { + return err } txIntercepted.SetAddressConverter(txi.addrConverter) txIntercepted.SetSingleSignKeyGen(txi.keyGen) - hashWithSig := txi.hasher.Compute(string(rawData)) + hashWithSig := txi.hasher.Compute(string(message.Data())) txIntercepted.SetHash(hashWithSig) copiedTx := *txIntercepted.GetTransaction() copiedTx.Signature = nil - marshalizer := txi.Marshalizer() - if marshalizer == nil { - return process.ErrNilMarshalizer - } - - buffCopiedTx, err := marshalizer.Marshal(&copiedTx) + buffCopiedTx, err := txi.marshalizer.Marshal(&copiedTx) if err != nil { return err } diff --git a/process/transaction/interceptor_test.go b/process/transaction/interceptor_test.go index 800aaeb3984..0915e32cfd5 100644 --- a/process/transaction/interceptor_test.go +++ b/process/transaction/interceptor_test.go @@ -7,18 +7,16 @@ import ( "github.com/ElrondNetwork/elrond-go-sandbox/crypto" "github.com/ElrondNetwork/elrond-go-sandbox/data/state" - "github.com/ElrondNetwork/elrond-go-sandbox/marshal" - "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" "github.com/ElrondNetwork/elrond-go-sandbox/process/transaction" - "github.com/stretchr/testify/assert" "github.com/pkg/errors" + "github.com/stretchr/testify/assert" ) //------- NewTxInterceptor -func TestNewTxInterceptor_NilInterceptorShouldErr(t *testing.T) { +func TestNewTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() txPool := &mock.ShardedDataStub{} @@ -38,14 +36,13 @@ func TestNewTxInterceptor_NilInterceptorShouldErr(t *testing.T) { keyGen, oneSharder) - assert.Equal(t, process.ErrNilInterceptor, err) + assert.Equal(t, process.ErrNilMarshalizer, err) assert.Nil(t, txi) } func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() @@ -53,7 +50,7 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, nil, storer, addrConv, @@ -70,14 +67,13 @@ func TestNewTxInterceptor_NilStorerShouldErr(t *testing.T) { t.Parallel() txPool := &mock.ShardedDataStub{} - interceptor := &mock.InterceptorStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, nil, addrConv, @@ -93,7 +89,6 @@ func TestNewTxInterceptor_NilStorerShouldErr(t *testing.T) { func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} txPool := &mock.ShardedDataStub{} keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() @@ -101,7 +96,7 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, nil, @@ -117,7 +112,6 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} @@ -126,7 +120,7 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -142,7 +136,6 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} @@ -150,7 +143,7 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { storer := &mock.StorerStub{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -166,7 +159,6 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} oneSharder := mock.NewOneShardCoordinatorMock() @@ -174,7 +166,7 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -190,7 +182,6 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} @@ -198,7 +189,7 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -214,10 +205,6 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} @@ -226,7 +213,7 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { signer := &mock.SignerMock{} txi, err := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -239,15 +226,11 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { assert.NotNil(t, txi) } -//------- processTx +//------- ProcessReceivedMessage -func TestTransactionInterceptor_ProcessTxNilTxShouldErr(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} @@ -256,7 +239,7 @@ func TestTransactionInterceptor_ProcessTxNilTxShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, _ := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -265,16 +248,12 @@ func TestTransactionInterceptor_ProcessTxNilTxShouldErr(t *testing.T) { keyGen, oneSharder) - assert.Equal(t, process.ErrNilTransaction, txi.ProcessTx(nil, make([]byte, 0))) + assert.Equal(t, process.ErrNilMessage, txi.ProcessReceivedMessage(nil)) } -func TestTransactionInterceptor_ProcessTxWrongTypeOfNewerShouldErr(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} keyGen := &mock.SingleSignKeyGenMock{} @@ -283,7 +262,7 @@ func TestTransactionInterceptor_ProcessTxWrongTypeOfNewerShouldErr(t *testing.T) signer := &mock.SignerMock{} txi, _ := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerMock{}, txPool, storer, addrConv, @@ -292,20 +271,15 @@ func TestTransactionInterceptor_ProcessTxWrongTypeOfNewerShouldErr(t *testing.T) keyGen, oneSharder) - sn := mock.StringCreator{} + msg := &mock.P2PMessageMock{} - assert.Equal(t, process.ErrBadInterceptorTopicImplementation, txi.ProcessTx(&sn, make([]byte, 0))) + assert.Equal(t, process.ErrNilDataToProcess, txi.ProcessReceivedMessage(msg)) } -func TestTransactionInterceptor_ProcessTxNilMarshalizerShouldErr(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarshalingShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return nil - } + errMarshalizer := errors.New("marshalizer error") txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} @@ -315,7 +289,11 @@ func TestTransactionInterceptor_ProcessTxNilMarshalizerShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, _ := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return errMarshalizer + }, + }, txPool, storer, addrConv, @@ -324,24 +302,17 @@ func TestTransactionInterceptor_ProcessTxNilMarshalizerShouldErr(t *testing.T) { keyGen, oneSharder) - txNewer := transaction.NewInterceptedTransaction(signer) - txNewer.Signature = make([]byte, 0) - txNewer.Challenge = make([]byte, 0) - txNewer.RcvAddr = make([]byte, 0) - txNewer.SndAddr = make([]byte, 0) + msg := &mock.P2PMessageMock{ + DataField: make([]byte, 0), + } - assert.Equal(t, process.ErrNilMarshalizer, txi.ProcessTx(txNewer, make([]byte, 0))) + assert.Equal(t, errMarshalizer, txi.ProcessReceivedMessage(msg)) } -func TestTransactionInterceptor_ProcessTxIntegrityFailedShouldErr(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtMarshalingShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} - } + errMarshalizer := errors.New("marshalizer error") txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} @@ -351,7 +322,14 @@ func TestTransactionInterceptor_ProcessTxIntegrityFailedShouldErr(t *testing.T) signer := &mock.SignerMock{} txi, _ := transaction.NewTxInterceptor( - interceptor, + &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return nil + }, + MarshalCalled: func(obj interface{}) (bytes []byte, e error) { + return nil, errMarshalizer + }, + }, txPool, storer, addrConv, @@ -360,24 +338,17 @@ func TestTransactionInterceptor_ProcessTxIntegrityFailedShouldErr(t *testing.T) keyGen, oneSharder) - txNewer := transaction.NewInterceptedTransaction(signer) - txNewer.Signature = nil - txNewer.Challenge = make([]byte, 0) - txNewer.RcvAddr = make([]byte, 0) - txNewer.SndAddr = make([]byte, 0) + msg := &mock.P2PMessageMock{ + DataField: make([]byte, 0), + } - assert.Equal(t, process.ErrNilSignature, txi.ProcessTx(txNewer, make([]byte, 0))) + assert.Equal(t, errMarshalizer, txi.ProcessReceivedMessage(msg)) } -func TestTransactionInterceptor_ProcessNilDataToProcessShouldErr(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} - } + marshalizer := &mock.MarshalizerMock{} txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} @@ -387,7 +358,7 @@ func TestTransactionInterceptor_ProcessNilDataToProcessShouldErr(t *testing.T) { signer := &mock.SignerMock{} txi, _ := transaction.NewTxInterceptor( - interceptor, + marshalizer, txPool, storer, addrConv, @@ -397,82 +368,44 @@ func TestTransactionInterceptor_ProcessNilDataToProcessShouldErr(t *testing.T) { oneSharder) txNewer := transaction.NewInterceptedTransaction(signer) - txNewer.Signature = make([]byte, 0) + txNewer.Signature = nil txNewer.Challenge = make([]byte, 0) txNewer.RcvAddr = make([]byte, 0) txNewer.SndAddr = make([]byte, 0) - assert.Equal(t, process.ErrNilDataToProcess, txi.ProcessTx(txNewer, nil)) -} - -func TestTransactionInterceptor_ProcessTxIntegrityAndValidityShouldErr(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} + buff, _ := marshalizer.Marshal(txNewer) + msg := &mock.P2PMessageMock{ + DataField: buff, } - txPool := &mock.ShardedDataStub{} - addrConv := &mock.AddressConverterMock{} - keyGen := &mock.SingleSignKeyGenMock{} - oneSharder := mock.NewOneShardCoordinatorMock() - storer := &mock.StorerStub{} - signer := &mock.SignerMock{} - - txi, _ := transaction.NewTxInterceptor( - interceptor, - txPool, - storer, - addrConv, - mock.HasherMock{}, - signer, - keyGen, - oneSharder) - - txNewer := transaction.NewInterceptedTransaction(signer) - txNewer.Signature = make([]byte, 0) - txNewer.Challenge = make([]byte, 0) - txNewer.RcvAddr = []byte("please fail, addrConverter!") - txNewer.SndAddr = make([]byte, 0) - txNewer.Value = big.NewInt(0) - - addrConv.CreateAddressFromPublicKeyBytesRetErrForValue = []byte("please fail, addrConverter!") - - assert.Equal(t, process.ErrInvalidRcvAddr, txi.ProcessTx(txNewer, make([]byte, 0))) + assert.Equal(t, process.ErrNilSignature, txi.ProcessReceivedMessage(msg)) } -func TestTransactionInterceptor_ProcessTxVerifySigFailsShouldErr(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} - } - txPool := &mock.ShardedDataStub{} addrConv := &mock.AddressConverterMock{} + marshalizer := &mock.MarshalizerMock{} pubKey := &mock.SingleSignPublicKey{} keyGen := &mock.SingleSignKeyGenMock{} keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { return pubKey, nil } + errExpected := errors.New("sig not valid") + oneSharder := mock.NewOneShardCoordinatorMock() storer := &mock.StorerStub{} signer := &mock.SignerMock{ VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return errors.New("sig not valid") + return errExpected }, } txi, _ := transaction.NewTxInterceptor( - interceptor, + marshalizer, txPool, storer, addrConv, @@ -488,27 +421,23 @@ func TestTransactionInterceptor_ProcessTxVerifySigFailsShouldErr(t *testing.T) { txNewer.SndAddr = make([]byte, 0) txNewer.Value = big.NewInt(0) - assert.Equal(t, "sig not valid", txi.ProcessTx(txNewer, []byte("txHash")).Error()) + buff, _ := marshalizer.Marshal(txNewer) + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + assert.Equal(t, errExpected, txi.ProcessReceivedMessage(msg)) } -func TestTransactionInterceptor_ProcessTxOkValsSameShardShouldWork(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} - } + marshalizer := &mock.MarshalizerMock{} wasAdded := 0 txPool := &mock.ShardedDataStub{} - txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { - wasAdded++ - } - } + addrConv := &mock.AddressConverterMock{} pubKey := &mock.SingleSignPublicKey{} @@ -524,12 +453,12 @@ func TestTransactionInterceptor_ProcessTxOkValsSameShardShouldWork(t *testing.T) } signer := &mock.SignerMock{ VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil + return nil }, } txi, _ := transaction.NewTxInterceptor( - interceptor, + marshalizer, txPool, storer, addrConv, @@ -545,28 +474,30 @@ func TestTransactionInterceptor_ProcessTxOkValsSameShardShouldWork(t *testing.T) txNewer.SndAddr = make([]byte, 0) txNewer.Value = big.NewInt(0) - assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) + buff, _ := marshalizer.Marshal(txNewer) + msg := &mock.P2PMessageMock{ + DataField: buff, + } + + txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { + if bytes.Equal(mock.HasherMock{}.Compute(string(buff)), key) { + wasAdded++ + } + } + + assert.Nil(t, txi.ProcessReceivedMessage(msg)) assert.Equal(t, 1, wasAdded) } -func TestTransactionInterceptor_ProcessTxOkValsOtherShardsShouldWork(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWork(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} - } + marshalizer := &mock.MarshalizerMock{} wasAdded := 0 txPool := &mock.ShardedDataStub{} - txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { - wasAdded++ - } - } + addrConv := &mock.AddressConverterMock{} pubKey := &mock.SingleSignPublicKey{} @@ -583,12 +514,12 @@ func TestTransactionInterceptor_ProcessTxOkValsOtherShardsShouldWork(t *testing. storer := &mock.StorerStub{} signer := &mock.SignerMock{ VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil + return nil }, } txi, _ := transaction.NewTxInterceptor( - interceptor, + marshalizer, txPool, storer, addrConv, @@ -604,90 +535,29 @@ func TestTransactionInterceptor_ProcessTxOkValsOtherShardsShouldWork(t *testing. txNewer.SndAddr = make([]byte, 0) txNewer.Value = big.NewInt(0) - assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) - assert.Equal(t, 0, wasAdded) -} - -func TestTransactionInterceptor_ProcessTxMarshalizerFailShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - marshalizer.Fail = true - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return marshalizer + buff, _ := marshalizer.Marshal(txNewer) + msg := &mock.P2PMessageMock{ + DataField: buff, } - txPool := &mock.ShardedDataStub{} txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { + if bytes.Equal(mock.HasherMock{}.Compute(string(buff)), key) { + wasAdded++ } } - addrConv := &mock.AddressConverterMock{} - - pubKey := &mock.SingleSignPublicKey{} - keyGen := &mock.SingleSignKeyGenMock{} - keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { - return pubKey, nil - } - storer := &mock.StorerStub{} - storer.HasCalled = func(key []byte) (bool, error) { - return false, nil - } - - multiSharder := mock.NewMultipleShardsCoordinatorMock() - multiSharder.CurrentShard = 0 - called := uint32(0) - multiSharder.ComputeShardForAddressCalled = func(address state.AddressContainer, addressConverter state.AddressConverter) uint32 { - defer func() { - called++ - }() - - return called - } - signer := &mock.SignerMock{} - - txi, _ := transaction.NewTxInterceptor( - interceptor, - txPool, - storer, - addrConv, - mock.HasherMock{}, - signer, - keyGen, - multiSharder) - - txNewer := transaction.NewInterceptedTransaction(signer) - txNewer.Signature = make([]byte, 0) - txNewer.Challenge = make([]byte, 0) - txNewer.RcvAddr = make([]byte, 0) - txNewer.SndAddr = make([]byte, 0) - err := txi.ProcessTx(txNewer, []byte("txHash")) - assert.Equal(t, "MarshalizerMock generic error", err.Error()) + assert.Nil(t, txi.ProcessReceivedMessage(msg)) + assert.Equal(t, 0, wasAdded) } -func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { +func TestTransactionInterceptor_ProcessReceivedMessagePresentInStorerShouldNotAdd(t *testing.T) { t.Parallel() - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} - } + marshalizer := &mock.MarshalizerMock{} wasAdded := 0 txPool := &mock.ShardedDataStub{} - txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { - wasAdded++ - } - } addrConv := &mock.AddressConverterMock{} pubKey := &mock.SingleSignPublicKey{} @@ -697,7 +567,7 @@ func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { } storer := &mock.StorerStub{} storer.HasCalled = func(key []byte) (bool, error) { - return false, nil + return true, nil } multiSharder := mock.NewMultipleShardsCoordinatorMock() @@ -717,7 +587,7 @@ func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { } txi, _ := transaction.NewTxInterceptor( - interceptor, + marshalizer, txPool, storer, addrConv, @@ -733,73 +603,17 @@ func TestTransactionInterceptor_ProcessTxOkVals2ShardsShouldWork(t *testing.T) { txNewer.SndAddr = make([]byte, 0) txNewer.Value = big.NewInt(0) - assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) - assert.Equal(t, 1, wasAdded) -} - -func TestTransactionInterceptor_ProcessTxPresentInStorerShouldNotAdd(t *testing.T) { - t.Parallel() - - interceptor := &mock.InterceptorStub{} - interceptor.SetCheckReceivedObjectHandlerCalled = func(i func(newer p2p.Creator, rawData []byte) error) { - } - interceptor.MarshalizerCalled = func() marshal.Marshalizer { - return &mock.MarshalizerMock{} + buff, _ := marshalizer.Marshal(txNewer) + msg := &mock.P2PMessageMock{ + DataField: buff, } - wasAdded := 0 - - txPool := &mock.ShardedDataStub{} txPool.AddDataCalled = func(key []byte, data interface{}, destShardID uint32) { - if bytes.Equal(mock.HasherMock{}.Compute("txHash"), key) { + if bytes.Equal(mock.HasherMock{}.Compute(string(buff)), key) { wasAdded++ } } - addrConv := &mock.AddressConverterMock{} - - pubKey := &mock.SingleSignPublicKey{} - keyGen := &mock.SingleSignKeyGenMock{} - keyGen.PublicKeyFromByteArrayCalled = func(b []byte) (key crypto.PublicKey, e error) { - return pubKey, nil - } - storer := &mock.StorerStub{} - storer.HasCalled = func(key []byte) (bool, error) { - return true, nil - } - - multiSharder := mock.NewMultipleShardsCoordinatorMock() - multiSharder.CurrentShard = 0 - called := uint32(0) - multiSharder.ComputeShardForAddressCalled = func(address state.AddressContainer, addressConverter state.AddressConverter) uint32 { - defer func() { - called++ - }() - - return called - } - signer := &mock.SignerMock{ - VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - } - - txi, _ := transaction.NewTxInterceptor( - interceptor, - txPool, - storer, - addrConv, - mock.HasherMock{}, - signer, - keyGen, - multiSharder) - - txNewer := transaction.NewInterceptedTransaction(signer) - txNewer.Signature = make([]byte, 0) - txNewer.Challenge = make([]byte, 0) - txNewer.RcvAddr = make([]byte, 0) - txNewer.SndAddr = make([]byte, 0) - txNewer.Value = big.NewInt(0) - assert.Nil(t, txi.ProcessTx(txNewer, []byte("txHash"))) + assert.Nil(t, txi.ProcessReceivedMessage(msg)) assert.Equal(t, 0, wasAdded) } diff --git a/process/transaction/resolver.go b/process/transaction/resolver.go index 32457a21fa8..31e31f8fc67 100644 --- a/process/transaction/resolver.go +++ b/process/transaction/resolver.go @@ -1,15 +1,18 @@ package transaction import ( + "fmt" + "github.com/ElrondNetwork/elrond-go-sandbox/data" "github.com/ElrondNetwork/elrond-go-sandbox/marshal" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/storage" ) // TxResolver is a wrapper over Resolver that is specialized in resolving transaction requests type TxResolver struct { - process.Resolver + process.TopicResolverSender txPool data.ShardedDataCacherNotifier txStorage storage.Storer marshalizer marshal.Marshalizer @@ -17,14 +20,14 @@ type TxResolver struct { // NewTxResolver creates a new transaction resolver func NewTxResolver( - resolver process.Resolver, + senderResolver process.TopicResolverSender, txPool data.ShardedDataCacherNotifier, txStorage storage.Storer, marshalizer marshal.Marshalizer, ) (*TxResolver, error) { - if resolver == nil { - return nil, process.ErrNilResolver + if senderResolver == nil { + return nil, process.ErrNilResolverSender } if txPool == nil { @@ -40,17 +43,38 @@ func NewTxResolver( } txResolver := &TxResolver{ - Resolver: resolver, - txPool: txPool, - txStorage: txStorage, - marshalizer: marshalizer, + TopicResolverSender: senderResolver, + txPool: txPool, + txStorage: txStorage, + marshalizer: marshalizer, } - txResolver.SetResolverHandler(txResolver.resolveTxRequest) return txResolver, nil } -func (txRes *TxResolver) resolveTxRequest(rd process.RequestData) ([]byte, error) { +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to, usually a request topic) +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P) error { + rd := &process.RequestData{} + err := rd.Unmarshal(txRes.marshalizer, message) + if err != nil { + return err + } + + buff, err := txRes.resolveTxRequest(rd) + if err != nil { + return err + } + + if buff == nil { + log.Debug(fmt.Sprintf("missing data: %v", rd)) + return nil + } + + return txRes.Send(buff, message.Peer()) +} + +func (txRes *TxResolver) resolveTxRequest(rd *process.RequestData) ([]byte, error) { if rd.Type != process.HashType { return nil, process.ErrResolveNotHashType } @@ -72,9 +96,9 @@ func (txRes *TxResolver) resolveTxRequest(rd process.RequestData) ([]byte, error return buff, nil } -// RequestTransactionFromHash requests a transaction from other peers having input the tx hash -func (txRes *TxResolver) RequestTransactionFromHash(hash []byte) error { - return txRes.RequestData(process.RequestData{ +// RequestDataFromHash requests a transaction from other peers having input the tx hash +func (txRes *TxResolver) RequestDataFromHash(hash []byte) error { + return txRes.SendOnRequestTopic(&process.RequestData{ Type: process.HashType, Value: hash, }) diff --git a/process/transaction/resolver_test.go b/process/transaction/resolver_test.go index 7a3b5d078d3..c98fb8f9b26 100644 --- a/process/transaction/resolver_test.go +++ b/process/transaction/resolver_test.go @@ -2,11 +2,12 @@ package transaction import ( "bytes" + "errors" "testing" + "github.com/ElrondNetwork/elrond-go-sandbox/p2p" "github.com/ElrondNetwork/elrond-go-sandbox/process" "github.com/ElrondNetwork/elrond-go-sandbox/process/mock" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -22,7 +23,7 @@ func TestNewTxResolver_NilResolverShouldErr(t *testing.T) { &mock.MarshalizerMock{}, ) - assert.Equal(t, process.ErrNilResolver, err) + assert.Equal(t, process.ErrNilResolverSender, err) assert.Nil(t, txRes) } @@ -30,7 +31,7 @@ func TestNewTxResolver_NilTxPoolShouldErr(t *testing.T) { t.Parallel() txRes, err := NewTxResolver( - &mock.ResolverStub{}, + &mock.TopicResolverSenderStub{}, nil, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -44,7 +45,7 @@ func TestNewTxResolver_NilTxStorageShouldErr(t *testing.T) { t.Parallel() txRes, err := NewTxResolver( - &mock.ResolverStub{}, + &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, nil, &mock.MarshalizerMock{}, @@ -58,7 +59,7 @@ func TestNewTxResolver_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() txRes, err := NewTxResolver( - &mock.ResolverStub{}, + &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, nil, @@ -71,12 +72,7 @@ func TestNewTxResolver_NilMarshalizerShouldErr(t *testing.T) { func TestNewTxResolver_OkValsShouldWork(t *testing.T) { t.Parallel() - wasCalled := false - - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(i func(rd process.RequestData) ([]byte, error)) { - wasCalled = true - } + res := &mock.TopicResolverSenderStub{} txRes, err := NewTxResolver( res, @@ -87,96 +83,121 @@ func TestNewTxResolver_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, txRes) - assert.True(t, wasCalled) } -//------- resolveTxRequest +//------- ProcessReceivedMessage -func TestTxResolver_ResolveTxRequestWrongTypeShouldErr(t *testing.T) { +func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - txRes, _ := NewTxResolver( - res, + &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, ) - buff, err := txRes.resolveTxRequest(process.RequestData{Type: process.NonceType, Value: []byte("aaa")}) + err := txRes.ProcessReceivedMessage(nil) + + assert.Equal(t, process.ErrNilMessage, err) +} + +func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + txRes, _ := NewTxResolver( + &mock.TopicResolverSenderStub{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + marshalizer, + ) + + data, _ := marshalizer.Marshal(&process.RequestData{Type: process.NonceType, Value: []byte("aaa")}) + + msg := &mock.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg) - assert.Nil(t, buff) assert.Equal(t, process.ErrResolveNotHashType, err) } -func TestTxResolver_ResolveTxRequestNilValueShouldRetNil(t *testing.T) { +func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } + marshalizer := &mock.MarshalizerMock{} txRes, _ := NewTxResolver( - res, + &mock.TopicResolverSenderStub{}, &mock.ShardedDataStub{}, &mock.StorerStub{}, - &mock.MarshalizerMock{}, + marshalizer, ) - buff, err := txRes.resolveTxRequest(process.RequestData{Type: process.HashType, Value: nil}) + data, _ := marshalizer.Marshal(&process.RequestData{Type: process.HashType, Value: nil}) + + msg := &mock.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg) - assert.Nil(t, buff) assert.Equal(t, process.ErrNilValue, err) } -func TestTxResolver_ResolveTxRequestFoundInTxPoolShouldRetVal(t *testing.T) { +func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - marshalizer := &mock.MarshalizerMock{} - buffToExpect, err := marshalizer.Marshal("value") - assert.Nil(t, err) + + searchWasCalled := false + sendWasCalled := false txPool := &mock.ShardedDataStub{} txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { if bytes.Equal([]byte("aaa"), key) { - return "value", true + searchWasCalled = true + return make([]byte, 0), true } return nil, false } txRes, _ := NewTxResolver( - res, + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + sendWasCalled = true + return nil + }, + }, txPool, &mock.StorerStub{}, marshalizer, ) - buff, err := txRes.ResolveTxRequest(process.RequestData{Type: process.HashType, Value: []byte("aaa")}) + data, _ := marshalizer.Marshal(&process.RequestData{Type: process.HashType, Value: []byte("aaa")}) + + msg := &mock.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg) - //we are 100% sure that the txPool resolved the request because storage is a stub - //and any call to its method whould have panic-ed (&mock.StorerStub{} has uninitialized ...Called fields) assert.Nil(t, err) - assert.Equal(t, buffToExpect, buff) + assert.True(t, searchWasCalled) + assert.True(t, sendWasCalled) } -func TestTxResolver_ResolveTxRequestFoundInTxPoolMarshalizerFailShouldRetNilAndErr(t *testing.T) { +func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetNilAndErr(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } + errExpected := errors.New("MarshalizerMock generic error") - marshalizer := &mock.MarshalizerStub{} - marshalizer.MarshalCalled = func(obj interface{}) (i []byte, e error) { - return nil, errors.New("MarshalizerMock generic error") + marshalizerMock := &mock.MarshalizerMock{} + marshalizerStub := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + return nil, errExpected + }, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return marshalizerMock.Unmarshal(obj, buff) + }, } txPool := &mock.ShardedDataStub{} @@ -189,26 +210,24 @@ func TestTxResolver_ResolveTxRequestFoundInTxPoolMarshalizerFailShouldRetNilAndE } txRes, _ := NewTxResolver( - res, + &mock.TopicResolverSenderStub{}, txPool, &mock.StorerStub{}, - marshalizer, + marshalizerStub, ) - buff, err := txRes.ResolveTxRequest(process.RequestData{Type: process.HashType, Value: []byte("aaa")}) - //Same as above test, we are sure that the marshalizer from txPool request failed as the code would have panic-ed - //otherwise - assert.Nil(t, buff) - assert.Equal(t, "MarshalizerMock generic error", err.Error()) + data, _ := marshalizerMock.Marshal(&process.RequestData{Type: process.HashType, Value: []byte("aaa")}) + + msg := &mock.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg) + + assert.Equal(t, errExpected, err) } -func TestTxResolver_ResolveTxRequestFoundInTxStorageShouldRetValAndError(t *testing.T) { +func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - marshalizer := &mock.MarshalizerMock{} txPool := &mock.ShardedDataStub{} @@ -217,37 +236,45 @@ func TestTxResolver_ResolveTxRequestFoundInTxStorageShouldRetValAndError(t *test return nil, false } - expectedBuff := []byte("bbb") + searchWasCalled := false + sendWasCalled := false txStorage := &mock.StorerStub{} txStorage.GetCalled = func(key []byte) (i []byte, e error) { if bytes.Equal([]byte("aaa"), key) { - return expectedBuff, nil + searchWasCalled = true + return make([]byte, 0), nil } return nil, nil } txRes, _ := NewTxResolver( - res, + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + sendWasCalled = true + return nil + }, + }, txPool, txStorage, marshalizer, ) - buff, _ := txRes.ResolveTxRequest(process.RequestData{Type: process.HashType, Value: []byte("aaa")}) + data, _ := marshalizer.Marshal(&process.RequestData{Type: process.HashType, Value: []byte("aaa")}) + + msg := &mock.P2PMessageMock{DataField: data} - assert.Equal(t, expectedBuff, buff) + err := txRes.ProcessReceivedMessage(msg) + assert.Nil(t, err) + assert.True(t, searchWasCalled) + assert.True(t, sendWasCalled) } -func TestTxResolver_ResolveTxRequestFoundInTxStorageCheckRetError(t *testing.T) { +func TestTxResolver_ProcessReceivedMessageFoundInTxStorageCheckRetError(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - marshalizer := &mock.MarshalizerMock{} txPool := &mock.ShardedDataStub{} @@ -256,41 +283,43 @@ func TestTxResolver_ResolveTxRequestFoundInTxStorageCheckRetError(t *testing.T) return nil, false } - expectedBuff := []byte("bbb") + errExpected := errors.New("expected error") txStorage := &mock.StorerStub{} txStorage.GetCalled = func(key []byte) (i []byte, e error) { if bytes.Equal([]byte("aaa"), key) { - return expectedBuff, errors.New("just checking output error") + return nil, errExpected } return nil, nil } txRes, _ := NewTxResolver( - res, + &mock.TopicResolverSenderStub{}, txPool, txStorage, marshalizer, ) - _, err := txRes.ResolveTxRequest(process.RequestData{Type: process.HashType, Value: []byte("aaa")}) - assert.Equal(t, "just checking output error", err.Error()) + data, _ := marshalizer.Marshal(&process.RequestData{Type: process.HashType, Value: []byte("aaa")}) + + msg := &mock.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg) + + assert.Equal(t, errExpected, err) } //------- RequestTransactionFromHash -func TestTxResolver_RequestTransactionFromHashShouldWork(t *testing.T) { +func TestTxResolver_RequestDataFromHashShouldWork(t *testing.T) { t.Parallel() - res := &mock.ResolverStub{} - res.SetResolverHandlerCalled = func(h func(rd process.RequestData) ([]byte, error)) { - } - - requested := process.RequestData{} + requested := &process.RequestData{} - res.RequestDataCalled = func(rd process.RequestData) error { + res := &mock.TopicResolverSenderStub{} + res.SendOnRequestTopicCalled = func(rd *process.RequestData) error { requested = rd return nil } @@ -304,8 +333,8 @@ func TestTxResolver_RequestTransactionFromHashShouldWork(t *testing.T) { &mock.MarshalizerMock{}, ) - assert.Nil(t, txRes.RequestTransactionFromHash(buffRequested)) - assert.Equal(t, process.RequestData{ + assert.Nil(t, txRes.RequestDataFromHash(buffRequested)) + assert.Equal(t, &process.RequestData{ Type: process.HashType, Value: buffRequested, }, requested)