Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

txHandler: fix TestTxHandlerAppRateLimiter #6075

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions data/appRateLimiter.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ func makeAppRateLimiter(maxCacheSize int, maxAppPeerRate uint64, serviceRateWind
serviceRatePerWindow := maxAppPeerRate * uint64(serviceRateWindow/time.Second)
maxBucketSize := maxCacheSize / numBuckets
if maxBucketSize == 0 {
// got the max size less then buckets, use maps of 1
maxBucketSize = 1
// got the max size less then buckets, use maps of 2 to avoid eviction on each insert
maxBucketSize = 2
}
r := &appRateLimiter{
maxBucketSize: maxBucketSize,
Expand Down
2 changes: 1 addition & 1 deletion data/appRateLimiter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestAppRateLimiter_Make(t *testing.T) {
window := 1 * time.Second
rm := makeAppRateLimiter(10, rate, window)

require.Equal(t, 1, rm.maxBucketSize)
require.Equal(t, 2, rm.maxBucketSize)
require.NotEmpty(t, rm.seed)
require.NotEmpty(t, rm.salt)
for i := 0; i < len(rm.buckets); i++ {
Expand Down
47 changes: 26 additions & 21 deletions data/txHandler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2515,8 +2515,14 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()

// technically we don't need any users for this test
// but we need to create the genesis accounts to prevent this warning:
// "cannot start evaluator: overflowed subtracting rewards for block 1"
_, _, genesis := makeTestGenesisAccounts(t, 0)
genBal := bookkeeping.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
ledgerName := fmt.Sprintf("%s-mem", t.Name())
const inMem = true

log := logging.TestingLog(t)
log.SetLevel(logging.Panic)

Expand All @@ -2525,11 +2531,9 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) {
cfg.TxBacklogServiceRateWindowSeconds = 1
cfg.TxBacklogAppTxPerSecondRate = 3
cfg.TxBacklogSize = 3
ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, bookkeeping.GenesisBalances{}, genesisID, genesisHash, cfg)
l, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg)
require.NoError(t, err)
defer ledger.Close()

l := ledger
defer l.Close()

func() {
cfg.EnableTxBacklogRateLimiting = false
Expand Down Expand Up @@ -2618,9 +2622,10 @@ func TestTxHandlerAppRateLimiterERLEnabled(t *testing.T) {
require.Equal(t, 1, handler.appLimiter.len())
}

// TestTxHandlerAppRateLimiter submits few app txns to make the app rate limit to filter one the last txn
// to ensure it is propely integrated with the txHandler
func TestTxHandlerAppRateLimiter(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()

const numUsers = 10
log := logging.TestingLog(t)
Expand All @@ -2637,16 +2642,16 @@ func TestTxHandlerAppRateLimiter(t *testing.T) {
cfg.TxBacklogAppTxRateLimiterMaxSize = 100
cfg.TxBacklogServiceRateWindowSeconds = 1
cfg.TxBacklogAppTxPerSecondRate = 3
ledger, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg)
l, err := LoadLedger(log, ledgerName, inMem, protocol.ConsensusCurrentVersion, genBal, genesisID, genesisHash, cfg)
require.NoError(t, err)
defer ledger.Close()
defer l.Close()

l := ledger
handler, err := makeTestTxHandler(l, cfg)
require.NoError(t, err)
defer handler.txVerificationPool.Shutdown()
defer close(handler.streamVerifierDropped)

handler.appLimiterBacklogThreshold = -1 // force the rate limiter to start checking transactions
tx := transactions.Transaction{
Type: protocol.ApplicationCallTx,
Header: transactions.Header{
Expand All @@ -2667,21 +2672,21 @@ func TestTxHandlerAppRateLimiter(t *testing.T) {
require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
require.Equal(t, 1, len(handler.backlogQueue))

counterBefore := transactionMessagesAppLimiterDrop.GetUint64Value()
// trigger the rate limiter and ensure the txn is ignored
tx2 := tx
for i := 0; i < cfg.TxBacklogAppTxPerSecondRate*cfg.TxBacklogServiceRateWindowSeconds; i++ {
tx2.ForeignApps = append(tx2.ForeignApps, 1)
numTxnToTriggerARL := cfg.TxBacklogAppTxPerSecondRate * cfg.TxBacklogServiceRateWindowSeconds
for i := 0; i < numTxnToTriggerARL; i++ {
tx2 := tx
tx2.Header.Sender = addresses[i+1]
signedTx2 := tx2.Sign(secrets[i+1])
blob2 := protocol.Encode(&signedTx2)

action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}})
require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
}
signedTx2 := tx.Sign(secrets[1])
blob2 := protocol.Encode(&signedTx2)

action = handler.processIncomingTxn(network.IncomingMessage{Data: blob2, Sender: mockSender{}})
require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
require.Equal(t, 1, len(handler.backlogQueue))

// backlogQueue has the first txn, but the second one is dropped
msg := <-handler.backlogQueue
require.Equal(t, msg.rawmsg.Data, blob, blob)
// last txn should be dropped
require.Equal(t, 1+numTxnToTriggerARL-1, len(handler.backlogQueue))
require.Equal(t, counterBefore+1, transactionMessagesAppLimiterDrop.GetUint64Value())
jasonpaulos marked this conversation as resolved.
Show resolved Hide resolved
}

// TestTxHandlerCapGuard checks there is no cap guard leak in case of invalid input.
Expand Down
Loading