Skip to content

Commit

Permalink
Merge branch 'develop' of github.com:scroll-tech/go-ethereum into fea…
Browse files Browse the repository at this point in the history
…t/add_log_for_long_sustain_tx
  • Loading branch information
georgehao committed Sep 23, 2024
2 parents cdc2d48 + 7ff93ca commit 7002c6d
Show file tree
Hide file tree
Showing 11 changed files with 113 additions and 51 deletions.
2 changes: 0 additions & 2 deletions Dockerfile.mockccc
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as builder
ADD . /go-ethereum
RUN cd /go-ethereum && env GO111MODULE=on go run build/ci.go install ./cmd/geth

RUN cd /go-ethereum && env GO111MODULE=on go run build/ci.go install ./cmd/geth

# Pull Geth into a second stage deploy alpine container
FROM ubuntu:20.04

Expand Down
4 changes: 4 additions & 0 deletions core/tx_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
if uint64(tx.Size()) > txMaxSize {
return ErrOversizedData
}
// Reject transactions that cannot fit into a block even as a single transaction
if !pool.chainconfig.Scroll.IsValidBlockSize(tx.Size()) {
return ErrOversizedData
}
// Check whether the init code size has been exceeded.
if pool.shanghai && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
return fmt.Errorf("%w: code size %v limit %v", ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize)
Expand Down
29 changes: 29 additions & 0 deletions core/tx_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2677,3 +2677,32 @@ func TestStatsWithMinBaseFee(t *testing.T) {
}
}
}

func TestValidateTxBlockSize(t *testing.T) {
pool, key := setupTxPoolWithConfig(params.ScrollMainnetChainConfig)
defer pool.Stop()

account := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, account, big.NewInt(1000000000000000000))

validTx := pricedDataTransaction(1, 2100000, big.NewInt(1), key, uint64(*pool.chainconfig.Scroll.MaxTxPayloadBytesPerBlock)-128)
oversizedTx := pricedDataTransaction(2, 2100000, big.NewInt(1), key, uint64(*pool.chainconfig.Scroll.MaxTxPayloadBytesPerBlock))

tests := []struct {
name string
tx *types.Transaction
want error
}{
{"Valid transaction", validTx, nil},
{"Oversized transaction", oversizedTx, ErrOversizedData},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := pool.validateTx(tt.tx, false)
if err != tt.want {
t.Errorf("validateTx() error = %v, want %v", err, tt.want)
}
})
}
}
21 changes: 21 additions & 0 deletions eth/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"github.com/scroll-tech/go-ethereum/internal/ethapi"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rlp"
"github.com/scroll-tech/go-ethereum/rollup/ccc"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/scroll-tech/go-ethereum/trie"
)
Expand Down Expand Up @@ -812,3 +813,23 @@ func (api *ScrollAPI) GetSkippedTransactionHashes(ctx context.Context, from uint

return hashes, nil
}

// CalculateRowConsumptionByBlockNumber
func (api *ScrollAPI) CalculateRowConsumptionByBlockNumber(ctx context.Context, number rpc.BlockNumber) (*types.RowConsumption, error) {
block := api.eth.blockchain.GetBlockByNumber(uint64(number.Int64()))
if block == nil {
return nil, errors.New("block not found")
}

// todo: fix temp AsyncChecker leaking the internal Checker instances
var checkErr error
asyncChecker := ccc.NewAsyncChecker(api.eth.blockchain, 1, false).WithOnFailingBlock(func(b *types.Block, err error) {
log.Error("failed to calculate row consumption on demand", "number", number, "hash", b.Hash().Hex(), "err", err)
checkErr = err
})
if err := asyncChecker.Check(block); err != nil {
return nil, err
}
asyncChecker.Wait()
return rawdb.ReadBlockRowConsumption(api.eth.ChainDb(), block.Hash()), checkErr
}
4 changes: 4 additions & 0 deletions eth/api_backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,10 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction)
return b.eth.txPool.AddLocal(signedTx)
}

func (b *EthAPIBackend) RemoveTx(txHash common.Hash) {
b.eth.txPool.RemoveTx(txHash, true)
}

func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
pending := b.eth.txPool.Pending(false)
var txs types.Transactions
Expand Down
5 changes: 5 additions & 0 deletions internal/ethapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,11 @@ func (s *PublicTxPoolAPI) Status() map[string]hexutil.Uint {
}
}

// RemoveTransactionByHash evicts a transaction from the pool.
func (s *PublicTxPoolAPI) RemoveTransactionByHash(ctx context.Context, hash common.Hash) {
s.b.RemoveTx(hash)
}

// Inspect retrieves the content of the transaction pool and flattens it into an
// easily inspectable list.
func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
Expand Down
1 change: 1 addition & 0 deletions internal/ethapi/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ type Backend interface {

// Transaction pool API
SendTx(ctx context.Context, signedTx *types.Transaction) error
RemoveTx(txHash common.Hash)
GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error)
GetPoolTransactions() (types.Transactions, error)
GetPoolTransaction(txHash common.Hash) *types.Transaction
Expand Down
11 changes: 11 additions & 0 deletions internal/web3ext/web3ext.go
Original file line number Diff line number Diff line change
Expand Up @@ -772,6 +772,11 @@ web3._extend({
call: 'txpool_contentFrom',
params: 1,
}),
new web3._extend.Method({
name: 'removeTransactionByHash',
call: 'txpool_removeTransactionByHash',
params: 1
}),
]
});
`
Expand Down Expand Up @@ -921,6 +926,12 @@ web3._extend({
inputFormatter: [web3._extend.formatters.inputCallFormatter, web3._extend.formatters.inputBlockNumberFormatter],
outputFormatter: web3._extend.utils.toDecimal
}),
new web3._extend.Method({
name: 'calculateRowConsumptionByBlockNumber',
call: 'scroll_calculateRowConsumptionByBlockNumber',
params: 1,
inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter]
}),
],
properties:
[
Expand Down
50 changes: 33 additions & 17 deletions miner/scroll_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ var (
commitReasonCCCCounter = metrics.NewRegisteredCounter("miner/commit_reason_ccc", nil)
commitReasonDeadlineCounter = metrics.NewRegisteredCounter("miner/commit_reason_deadline", nil)
commitGasCounter = metrics.NewRegisteredCounter("miner/commit_gas", nil)

missingRCOnRestartCounter = metrics.NewRegisteredCounter("miner/missing_rc_on_restart", nil)
missingAncestorRCCounter = metrics.NewRegisteredCounter("miner/missing_ancestor_rc", nil)
)

// prioritizedTransaction represents a single transaction that
Expand All @@ -86,6 +89,7 @@ type work struct {
cccLogger *ccc.Logger
vmConfig vm.Config

reorging bool
reorgReason error

// accumulated state
Expand Down Expand Up @@ -306,6 +310,8 @@ func (w *worker) checkHeadRowConsumption() error {
block := w.chain.GetBlockByNumber(curBlockNum)
// only spawn CCC checkers for blocks with no row consumption data stored in DB
if rawdb.ReadBlockRowConsumption(w.chain.Database(), block.Hash()) == nil {
missingRCOnRestartCounter.Inc(1)

if err := w.asyncChecker.Check(block); err != nil {
return err
}
Expand Down Expand Up @@ -348,7 +354,7 @@ func (w *worker) mainLoop() {
var retryableCommitError *retryableCommitError
if errors.As(err, &retryableCommitError) {
log.Warn("failed to commit to a block, retrying", "err", err)
if _, err = w.tryCommitNewWork(time.Now(), w.current.header.ParentHash, w.current.reorgReason); err != nil {
if _, err = w.tryCommitNewWork(time.Now(), w.current.header.ParentHash, w.current.reorging, w.current.reorgReason); err != nil {
continue
}
} else if err != nil {
Expand All @@ -366,20 +372,20 @@ func (w *worker) mainLoop() {
return
}
}
_, err = w.tryCommitNewWork(time.Now(), w.chain.CurrentHeader().Hash(), nil)
_, err = w.tryCommitNewWork(time.Now(), w.chain.CurrentHeader().Hash(), false, nil)
case trigger := <-w.reorgCh:
idleTimer.UpdateSince(idleStart)
err = w.handleReorg(&trigger)
case chainHead := <-w.chainHeadCh:
idleTimer.UpdateSince(idleStart)
if w.isCanonical(chainHead.Block.Header()) {
_, err = w.tryCommitNewWork(time.Now(), chainHead.Block.Hash(), nil)
_, err = w.tryCommitNewWork(time.Now(), chainHead.Block.Hash(), false, nil)
}
case <-w.current.deadlineCh():
idleTimer.UpdateSince(idleStart)
w.current.deadlineReached = true
if len(w.current.txs) > 0 {
_, err = w.commit(false)
_, err = w.commit()
}
case ev := <-w.txsCh:
idleTimer.UpdateSince(idleStart)
Expand All @@ -390,8 +396,8 @@ func (w *worker) mainLoop() {
// be automatically eliminated.
if w.current != nil {
shouldCommit, _ := w.processTxnSlice(ev.Txs)
if shouldCommit || w.current.deadlineReached {
_, err = w.commit(false)
if shouldCommit || (w.current.deadlineReached && len(w.current.txs) > 0) {
_, err = w.commit()
}
}
atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
Expand Down Expand Up @@ -430,7 +436,7 @@ func (w *worker) collectPendingL1Messages(startIndex uint64) []types.L1MessageTx
}

// newWork
func (w *worker) newWork(now time.Time, parentHash common.Hash, reorgReason error) error {
func (w *worker) newWork(now time.Time, parentHash common.Hash, reorging bool, reorgReason error) error {
parent := w.chain.GetBlockByHash(parentHash)
header := &types.Header{
ParentHash: parent.Hash(),
Expand Down Expand Up @@ -498,14 +504,19 @@ func (w *worker) newWork(now time.Time, parentHash common.Hash, reorgReason erro
coalescedLogs: []*types.Log{},
gasPool: new(core.GasPool).AddGas(header.GasLimit),
nextL1MsgIndex: nextL1MsgIndex,
reorging: reorging,
reorgReason: reorgReason,
}

// initiliaze pending block with an empty block to make sure we always have
// a pending block to serve RPC requests
w.updateSnapshot()
return nil
}

// tryCommitNewWork
func (w *worker) tryCommitNewWork(now time.Time, parent common.Hash, reorgReason error) (common.Hash, error) {
err := w.newWork(now, parent, reorgReason)
func (w *worker) tryCommitNewWork(now time.Time, parent common.Hash, reorging bool, reorgReason error) (common.Hash, error) {
err := w.newWork(now, parent, reorging, reorgReason)
if err != nil {
return common.Hash{}, fmt.Errorf("failed creating new work: %w", err)
}
Expand All @@ -516,8 +527,7 @@ func (w *worker) tryCommitNewWork(now time.Time, parent common.Hash, reorgReason
}

// check if we are reorging
reorging := w.chain.GetBlockByNumber(w.current.header.Number.Uint64()) != nil
if !shouldCommit && reorging {
if !shouldCommit && w.current.reorging {
shouldCommit, err = w.processReorgedTxns(w.current.reorgReason)
}
if err != nil {
Expand All @@ -535,7 +545,7 @@ func (w *worker) tryCommitNewWork(now time.Time, parent common.Hash, reorgReason
// if reorging, force committing even if we are not "running"
// this can happen when sequencer is instructed to shutdown while handling a reorg
// we should make sure reorg is not interrupted
if blockHash, err := w.commit(reorging); err != nil {
if blockHash, err := w.commit(); err != nil {
return common.Hash{}, fmt.Errorf("failed committing new work: %w", err)
} else {
return blockHash, nil
Expand Down Expand Up @@ -653,6 +663,10 @@ func (w *worker) processTxnSlice(txns types.Transactions) (bool, error) {
// processReorgedTxns
func (w *worker) processReorgedTxns(reason error) (bool, error) {
reorgedBlock := w.chain.GetBlockByNumber(w.current.header.Number.Uint64())
if reorgedBlock == nil {
return false, nil
}

commitGasCounter.Dec(int64(reorgedBlock.GasUsed()))
reorgedTxns := reorgedBlock.Transactions()
var errorWithTxnIdx *ccc.ErrorWithTxnIdx
Expand Down Expand Up @@ -782,14 +796,14 @@ func (e retryableCommitError) Unwrap() error {

// commit runs any post-transaction state modifications, assembles the final block
// and commits new work if consensus engine is running.
func (w *worker) commit(reorging bool) (common.Hash, error) {
func (w *worker) commit() (common.Hash, error) {
sealDelay := time.Duration(0)
defer func(t0 time.Time) {
l2CommitTimer.Update(time.Since(t0) - sealDelay)
}(time.Now())

w.updateSnapshot()
if !w.isRunning() && !reorging {
if !w.isRunning() && !w.current.reorging {
return common.Hash{}, nil
}

Expand Down Expand Up @@ -866,10 +880,12 @@ func (w *worker) commit(reorging bool) (common.Hash, error) {

currentHeight := w.current.header.Number.Uint64()
maxReorgDepth := uint64(w.config.CCCMaxWorkers + 1)
if !reorging && currentHeight > maxReorgDepth {
if !w.current.reorging && currentHeight > maxReorgDepth {
ancestorHeight := currentHeight - maxReorgDepth
ancestorHash := w.chain.GetHeaderByNumber(ancestorHeight).Hash()
if rawdb.ReadBlockRowConsumption(w.chain.Database(), ancestorHash) == nil {
missingAncestorRCCounter.Inc(1)

// reject committing to a block if its ancestor doesn't have its RC stored in DB yet.
// which may either mean that it failed CCC or it is still in the process of being checked
return common.Hash{}, retryableCommitError{inner: errors.New("ancestor doesn't have RC yet")}
Expand Down Expand Up @@ -1031,7 +1047,7 @@ func (w *worker) handleReorg(trigger *reorgTrigger) error {
return nil
}

newBlockHash, err := w.tryCommitNewWork(time.Now(), parentHash, reorgReason)
newBlockHash, err := w.tryCommitNewWork(time.Now(), parentHash, true, reorgReason)
if err != nil {
return err
}
Expand All @@ -1040,7 +1056,7 @@ func (w *worker) handleReorg(trigger *reorgTrigger) error {
if newBlockHash == (common.Hash{}) {
// force committing the new canonical head to trigger a reorg in blockchain
// otherwise we might ignore CCC errors from the new side chain since it is not canonical yet
newBlockHash, err = w.commit(true)
newBlockHash, err = w.commit()
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion params/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
const (
VersionMajor = 5 // Major version component of the current release
VersionMinor = 7 // Minor version component of the current release
VersionPatch = 13 // Patch version component of the current release
VersionPatch = 21 // Patch version component of the current release
VersionMeta = "mainnet" // Version metadata to append to the version string
)

Expand Down
35 changes: 4 additions & 31 deletions rollup/ccc/async_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,6 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con
}

header := block.Header()
header.GasUsed = 0
gasPool := new(core.GasPool).AddGas(header.GasLimit)
ccc.Reset()

accRc := new(types.RowConsumption)
Expand All @@ -184,7 +182,7 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con
}

var curRc *types.RowConsumption
curRc, err = c.checkTxAndApply(parent, header, statedb, gasPool, tx, ccc)
curRc, err = c.checkTx(parent, header, statedb, tx, ccc)
if err != nil {
err = &ErrorWithTxnIdx{
TxIdx: uint(txIdx),
Expand All @@ -208,39 +206,14 @@ func (c *AsyncChecker) checkerTask(block *types.Block, ccc *Checker, forkCtx con
}
}

func (c *AsyncChecker) checkTxAndApply(parent *types.Block, header *types.Header, state *state.StateDB, gasPool *core.GasPool, tx *types.Transaction, ccc *Checker) (*types.RowConsumption, error) {
// don't commit the state during tracing for circuit capacity checker, otherwise we cannot revert.
// and even if we don't commit the state, the `refund` value will still be correct, as explained in `CommitTransaction`
commitStateAfterApply := false
snap := state.Snapshot()

// 1. we have to check circuit capacity before `core.ApplyTransaction`,
// because if the tx can be successfully executed but circuit capacity overflows, it will be inconvenient to revert.
// 2. even if we don't commit to the state during the tracing (which means `clearJournalAndRefund` is not called during the tracing),
// the `refund` value will still be correct, because:
// 2.1 when starting handling the first tx, `state.refund` is 0 by default,
// 2.2 after tracing, the state is either committed in `core.ApplyTransaction`, or reverted, so the `state.refund` can be cleared,
// 2.3 when starting handling the following txs, `state.refund` comes as 0
func (c *AsyncChecker) checkTx(parent *types.Block, header *types.Header, state *state.StateDB, tx *types.Transaction, ccc *Checker) (*types.RowConsumption, error) {
trace, err := tracing.NewTracerWrapper().CreateTraceEnvAndGetBlockTrace(c.bc.Config(), c.bc, c.bc.Engine(), c.bc.Database(),
state, parent, types.NewBlockWithHeader(header).WithBody([]*types.Transaction{tx}, nil), commitStateAfterApply)
// `w.current.traceEnv.State` & `w.current.state` share a same pointer to the state, so only need to revert `w.current.state`
// revert to snapshot for calling `core.ApplyMessage` again, (both `traceEnv.GetBlockTrace` & `core.ApplyTransaction` will call `core.ApplyMessage`)
state.RevertToSnapshot(snap)
state, parent, types.NewBlockWithHeader(header).WithBody([]*types.Transaction{tx}, nil), true)
if err != nil {
return nil, err
}

rc, err := ccc.ApplyTransaction(trace)
if err != nil {
return rc, err
}

_, err = core.ApplyTransaction(c.bc.Config(), c.bc, nil /* coinbase will default to chainConfig.Scroll.FeeVaultAddress */, gasPool,
state, header, tx, &header.GasUsed, *c.bc.GetVMConfig())
if err != nil {
return nil, err
}
return rc, nil
return ccc.ApplyTransaction(trace)
}

// ScheduleError forces a block to error on a given transaction index
Expand Down

0 comments on commit 7002c6d

Please sign in to comment.