diff --git a/data/appRateLimiter.go b/data/appRateLimiter.go index 1f4472e68a..9bb2fd1254 100644 --- a/data/appRateLimiter.go +++ b/data/appRateLimiter.go @@ -81,8 +81,8 @@ func makeAppRateLimiter(maxCacheSize int, maxAppPeerRate uint64, serviceRateWind serviceRatePerWindow := maxAppPeerRate * uint64(serviceRateWindow/time.Second) maxBucketSize := maxCacheSize / numBuckets if maxBucketSize == 0 { - // got the max size less then buckets, use maps of 1 - maxBucketSize = 1 + // got the max size less then buckets, use maps of 2 to avoid eviction on each insert + maxBucketSize = 2 } r := &appRateLimiter{ maxBucketSize: maxBucketSize, diff --git a/data/appRateLimiter_test.go b/data/appRateLimiter_test.go index f5e63dfb36..5a7a872133 100644 --- a/data/appRateLimiter_test.go +++ b/data/appRateLimiter_test.go @@ -40,7 +40,7 @@ func TestAppRateLimiter_Make(t *testing.T) { window := 1 * time.Second rm := makeAppRateLimiter(10, rate, window) - require.Equal(t, 1, rm.maxBucketSize) + require.Equal(t, 2, rm.maxBucketSize) require.NotEmpty(t, rm.seed) require.NotEmpty(t, rm.salt) for i := 0; i < len(rm.buckets); i++ { diff --git a/data/txHandler_test.go b/data/txHandler_test.go index 9237865037..23235f15fd 100644 --- a/data/txHandler_test.go +++ b/data/txHandler_test.go @@ -2515,8 +2515,14 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + // technically we don't need any users for this test + // but we need to create the genesis accounts to prevent this warning: + // "cannot start evaluator: overflowed subtracting rewards for block 1" + _, _, genesis := makeTestGenesisAccounts(t, 0) + genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr) ledgerName := fmt.Sprintf("%s-mem", t.Name()) const inMem = true + log := logging.TestingLog(t) log.SetLevel(logging.Panic) @@ -2525,11 +2531,9 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { cfg.TxBacklogServiceRateWindowSeconds = 1 cfg.TxBacklogAppTxPerSecondRate = 3 cfg.TxBacklogSize = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, bookkeeping.GenesisBalances{}, genesisID, genesisHash, cfg) + l, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) - defer ledger.Close() - - l := ledger + defer l.Close() func() { cfg.EnableTxBacklogRateLimiting = false @@ -2618,9 +2622,10 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) { require.Equal(t, 1, handler.appLimiter.len()) } +// TestTxHandlerAppRateLimiter submits few app txns to make the app rate limit to filter one the last txn +// to ensure it is propely integrated with the txHandler func TestTxHandlerAppRateLimiter(t *testing.T) { partitiontest.PartitionTest(t) - t.Parallel() const numUsers = 10 log := logging.TestingLog(t) @@ -2637,16 +2642,16 @@ func TestTxHandlerAppRateLimiter(t *testing.T) { cfg.TxBacklogAppTxRateLimiterMaxSize = 100 cfg.TxBacklogServiceRateWindowSeconds = 1 cfg.TxBacklogAppTxPerSecondRate = 3 - ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) + l, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg) require.NoError(t, err) - defer ledger.Close() + defer l.Close() - l := ledger handler, err := makeTestTxHandler(l, cfg) require.NoError(t, err) defer handler.txVerificationPool.Shutdown() defer close(handler.streamVerifierDropped) + handler.appLimiterBacklogThreshold = -1 // force the rate limiter to start checking transactions tx := transactions.Transaction{ Type: protocol.ApplicationCallTx, Header: transactions.Header{ @@ -2667,21 +2672,21 @@ func TestTxHandlerAppRateLimiter(t *testing.T) { require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) require.Equal(t, 1, len(handler.backlogQueue)) + counterBefore := transactionMessagesAppLimiterDrop.GetUint64Value() // trigger the rate limiter and ensure the txn is ignored - tx2 := tx - for i := 0; i < cfg.TxBacklogAppTxPerSecondRate*cfg.TxBacklogServiceRateWindowSeconds; i++ { - tx2.ForeignApps = append(tx2.ForeignApps, 1) + numTxnToTriggerARL := cfg.TxBacklogAppTxPerSecondRate * cfg.TxBacklogServiceRateWindowSeconds + for i := 0; i < numTxnToTriggerARL; i++ { + tx2 := tx + tx2.Header.Sender = addresses[i+1] + signedTx2 := tx2.Sign(secrets[i+1]) + blob2 := protocol.Encode(&signedTx2) + + action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}}) + require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) } - signedTx2 := tx.Sign(secrets[1]) - blob2 := protocol.Encode(&signedTx2) - - action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}}) - require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action) - require.Equal(t, 1, len(handler.backlogQueue)) - - // backlogQueue has the first txn, but the second one is dropped - msg := <-handler.backlogQueue - require.Equal(t, msg.rawmsg.Data, blob, blob) + // last txn should be dropped + require.Equal(t, 1+numTxnToTriggerARL-1, len(handler.backlogQueue)) + require.Equal(t, counterBefore+1, transactionMessagesAppLimiterDrop.GetUint64Value()) } // TestTxHandlerCapGuard checks there is no cap guard leak in case of invalid input.