Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NIT-2554][Config Change] Test manual batch-poster fallback for DAS #2665

Open
wants to merge 15 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 27 additions & 26 deletions arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ type BatchPosterDangerousConfig struct {
}

type BatchPosterConfig struct {
Enable bool `koanf:"enable"`
DisableDapFallbackStoreDataOnChain bool `koanf:"disable-dap-fallback-store-data-on-chain" reload:"hot"`
Enable bool `koanf:"enable"`
EnableDapFallbackStoreDataOnChain bool `koanf:"enable-dap-fallback-store-data-on-chain" reload:"hot"`
// Max batch size.
MaxSize int `koanf:"max-size" reload:"hot"`
// Maximum 4844 blob enabled batch size.
Expand Down Expand Up @@ -205,7 +205,7 @@ type BatchPosterConfigFetcher func() *BatchPosterConfig

func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) {
f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1")
f.Bool(prefix+".disable-dap-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDapFallbackStoreDataOnChain, "If unable to batch to DA provider, disable fallback storing data on chain")
f.Bool(prefix+".enable-dap-fallback-store-data-on-chain", DefaultBatchPosterConfig.EnableDapFallbackStoreDataOnChain, "If unable to batch to DA provider, enable fallback storing data on chain")
f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size")
f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size")
f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay")
Expand All @@ -231,8 +231,8 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) {
}

var DefaultBatchPosterConfig = BatchPosterConfig{
Enable: false,
DisableDapFallbackStoreDataOnChain: false,
Enable: false,
EnableDapFallbackStoreDataOnChain: true,
// This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go
MaxSize: 100000,
// Try to fill 3 blobs per batch
Expand Down Expand Up @@ -267,26 +267,27 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{
}

var TestBatchPosterConfig = BatchPosterConfig{
Enable: true,
MaxSize: 100000,
Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize,
PollInterval: time.Millisecond * 10,
ErrorDelay: time.Millisecond * 10,
MaxDelay: 0,
WaitForMaxDelay: false,
CompressionLevel: 2,
DASRetentionPeriod: daprovider.DefaultDASRetentionPeriod,
GasRefunderAddress: "",
ExtraBatchGas: 10_000,
Post4844Blobs: true,
IgnoreBlobPrice: false,
DataPoster: dataposter.TestDataPosterConfig,
ParentChainWallet: DefaultBatchPosterL1WalletConfig,
L1BlockBound: "",
L1BlockBoundBypass: time.Hour,
UseAccessLists: true,
GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2,
CheckBatchCorrectness: true,
Enable: true,
EnableDapFallbackStoreDataOnChain: true,
MaxSize: 100000,
Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize,
PollInterval: time.Millisecond * 10,
ErrorDelay: time.Millisecond * 10,
MaxDelay: 0,
WaitForMaxDelay: false,
CompressionLevel: 2,
DASRetentionPeriod: daprovider.DefaultDASRetentionPeriod,
GasRefunderAddress: "",
ExtraBatchGas: 10_000,
Post4844Blobs: true,
IgnoreBlobPrice: false,
DataPoster: dataposter.TestDataPosterConfig,
ParentChainWallet: DefaultBatchPosterL1WalletConfig,
L1BlockBound: "",
L1BlockBoundBypass: time.Hour,
UseAccessLists: true,
GasEstimateBaseFeeMultipleBips: arbmath.OneInUBips * 3 / 2,
CheckBatchCorrectness: true,
}

type BatchPosterOpts struct {
Expand Down Expand Up @@ -1366,7 +1367,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error)
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}
// #nosec G115
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain)
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.EnableDapFallbackStoreDataOnChain)
if err != nil {
batchPosterDAFailureCounter.Inc(1)
return false, err
Expand Down
6 changes: 3 additions & 3 deletions arbstate/daprovider/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ type Writer interface {
ctx context.Context,
message []byte,
timeout uint64,
disableFallbackStoreDataOnChain bool,
enableFallbackStoreDataOnChain bool,
) ([]byte, error)
}

Expand All @@ -31,10 +31,10 @@ type writerForDAS struct {
dasWriter DASWriter
}

func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, disableFallbackStoreDataOnChain bool) ([]byte, error) {
func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, enableFallbackStoreDataOnChain bool) ([]byte, error) {
cert, err := d.dasWriter.Store(ctx, message, timeout)
if errors.Is(err, ErrBatchToDasFailed) {
if disableFallbackStoreDataOnChain {
if !enableFallbackStoreDataOnChain {
return nil, errors.New("unable to batch to DAS and fallback storing data on chain is disabled")
}
log.Warn("Falling back to storing data on chain", "err", err)
Expand Down
97 changes: 85 additions & 12 deletions system_tests/das_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package arbtest
import (
"context"
"encoding/base64"
"errors"
"io"
"math/big"
"net"
Expand Down Expand Up @@ -43,18 +44,13 @@ func startLocalDASServer(
pubkey, _, err := das.GenerateAndStoreKeys(keyDir)
Require(t, err)

config := das.DataAvailabilityConfig{
Enable: true,
Key: das.KeyConfig{
KeyDir: keyDir,
},
LocalFileStorage: das.LocalFileStorageConfig{
Enable: true,
DataDir: dataDir,
},
ParentChainNodeURL: "none",
RequestTimeout: 5 * time.Second,
}
config := das.DefaultDataAvailabilityConfig
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is something where it would be nice if we could develop an internally-accepted style guide, but:
I really find the inline declaration of the struct more readable. The structure of the data easier to visualize.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In this case, I'm not using a composite literal because I want to initialize all fields based on the default config. The default config had changed, and the DAS tests were not using the correct config because of that.

config.Enable = true
config.Key = das.KeyConfig{KeyDir: keyDir}
config.ParentChainNodeURL = "none"
config.LocalFileStorage = das.DefaultLocalFileStorageConfig
config.LocalFileStorage.Enable = true
config.LocalFileStorage.DataDir = dataDir

storageService, lifecycleManager, err := das.CreatePersistentStorageService(ctx, &config)
defer lifecycleManager.StopAndWaitUntil(time.Second)
Expand Down Expand Up @@ -326,3 +322,80 @@ func initTest(t *testing.T) {
enableLogging(logLvl)
}
}

func TestDASBatchPosterFallback(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

// Setup L1
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.chainConfig = params.ArbitrumDevTestDASChainConfig()
builder.BuildL1(t)
l1client := builder.L1.Client
l1info := builder.L1Info

// Setup DAS server
dasDataDir := t.TempDir()
dasRpcServer, pubkey, backendConfig, _, restServerUrl := startLocalDASServer(
t, ctx, dasDataDir, l1client, builder.addresses.SequencerInbox)
authorizeDASKeyset(t, ctx, pubkey, l1info, l1client)

// Setup sequence/batch-poster L2 node
builder.nodeConfig.DataAvailability.Enable = true
builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(backendConfig)
builder.nodeConfig.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig
builder.nodeConfig.DataAvailability.RestAggregator.Enable = true
builder.nodeConfig.DataAvailability.RestAggregator.Urls = []string{restServerUrl}
builder.nodeConfig.DataAvailability.ParentChainNodeURL = "none"
builder.nodeConfig.BatchPoster.EnableDapFallbackStoreDataOnChain = false // Disable DAS fallback
builder.nodeConfig.BatchPoster.ErrorDelay = time.Millisecond * 250 // Increase error delay because we expect errors
builder.L2Info = NewArbTestInfo(t, builder.chainConfig.ChainID)
builder.L2Info.GenerateAccount("User2")
cleanup := builder.BuildL2OnL1(t)
defer cleanup()
l2client := builder.L2.Client
l2info := builder.L2Info

// Setup secondary L2 node
nodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest()
nodeConfigB.BlockValidator.Enable = false
nodeConfigB.DataAvailability.Enable = true
nodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig
nodeConfigB.DataAvailability.RestAggregator.Enable = true
nodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrl}
nodeConfigB.DataAvailability.ParentChainNodeURL = "none"
nodeBParams := SecondNodeParams{
nodeConfig: nodeConfigB,
initData: &l2info.ArbInitData,
}
l2B, cleanupB := builder.Build2ndNode(t, &nodeBParams)
defer cleanupB()

// Check batch posting using the DAS
checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(1e12), l2B.Client)

// Shutdown the DAS
err := dasRpcServer.Shutdown(ctx)
Require(t, err)

// Send 2nd transaction and check it doesn't arrive on second node
tx, _ := TransferBalanceTo(t, "Owner", l2info.GetAddress("User2"), big.NewInt(1e12), l2info, l2client, ctx)
_, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3)
if err == nil || !errors.Is(err, context.DeadlineExceeded) {
Fatal(t, "expected context-deadline exceeded error, but got:", err)
}

// Enable the DAP fallback and check the transaction on the second node.
// (We don't need to restart the node because of the hot-reload.)
builder.nodeConfig.BatchPoster.EnableDapFallbackStoreDataOnChain = true
_, err = WaitForTx(ctx, l2B.Client, tx.Hash(), time.Second*3)
Require(t, err)
l2balance, err := l2B.Client.BalanceAt(ctx, l2info.GetAddress("User2"), nil)
Require(t, err)
if l2balance.Cmp(big.NewInt(2e12)) != 0 {
Fatal(t, "Unexpected balance:", l2balance)
}

// Send another transaction with fallback on
checkBatchPosting(t, ctx, l1client, l2client, l1info, l2info, big.NewInt(3e12), l2B.Client)
}
Loading