diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9ed921c24f..62fee7e6dc 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -168,6 +168,8 @@ var ( utils.GpoPercentileFlag, utils.GpoMaxGasPriceFlag, utils.GpoIgnoreGasPriceFlag, + utils.ParallelTxFlag, + utils.ParallelTxNumFlag, utils.MinerNotifyFullFlag, configFileFlag, utils.BlockAmountReserved, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 376817356a..7e7f59711e 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -26,6 +26,7 @@ import ( "math/big" "os" "path/filepath" + "runtime" godebug "runtime/debug" "strconv" "strings" @@ -860,6 +861,14 @@ var ( Usage: "InfluxDB bucket name to push reported metrics to (v2 only)", Value: metrics.DefaultConfig.InfluxDBBucket, } + ParallelTxFlag = cli.BoolFlag{ + Name: "parallel", + Usage: "Enable the experimental parallel transaction execution mode, only valid in full sync mode (default = false)", + } + ParallelTxNumFlag = cli.IntFlag{ + Name: "parallel.num", + Usage: "Number of slot for transaction execution, only valid in parallel mode (runtime calculated, no fixed default value)", + } // Init network InitNetworkSize = cli.IntFlag{ @@ -1696,6 +1705,26 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if ctx.GlobalIsSet(RangeLimitFlag.Name) { cfg.RangeLimit = ctx.GlobalBool(RangeLimitFlag.Name) } + if ctx.GlobalIsSet(ParallelTxFlag.Name) { + cfg.ParallelTxMode = ctx.GlobalBool(ParallelTxFlag.Name) + // The best prallel num will be tuned later, we do a simple parallel num set here + numCpu := runtime.NumCPU() + var parallelNum int + if ctx.GlobalIsSet(ParallelTxNumFlag.Name) { + // first of all, we use "--parallel.num", but "--parallel.num 0" is not allowed + parallelNum = ctx.GlobalInt(ParallelTxNumFlag.Name) + if parallelNum < 1 { + parallelNum = 1 + } + } else if numCpu == 1 { + parallelNum = 1 // single CPU core + } else if numCpu < 10 { + parallelNum = numCpu - 1 + } else { + parallelNum = 8 // we found concurrency 8 is slightly better than 15 + } + cfg.ParallelTxNum = parallelNum + } // Read the value from the flag no matter if it's set or not. cfg.Preimages = ctx.GlobalBool(CachePreimagesFlag.Name) if cfg.NoPruning && !cfg.Preimages { diff --git a/core/blockchain.go b/core/blockchain.go index 832eba973b..aeeac95534 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -81,6 +81,7 @@ var ( errInsertionInterrupted = errors.New("insertion is interrupted") errStateRootVerificationFailed = errors.New("state root verification failed") errChainStopped = errors.New("blockchain is stopped") + ParallelTxMode = false // parallel transaction execution ) const ( @@ -248,13 +249,14 @@ type BlockChain struct { running int32 // 0 if chain is running, 1 when stopped procInterrupt int32 // interrupt signaler for block processing - engine consensus.Engine - prefetcher Prefetcher - validator Validator // Block and state validator interface - processor Processor // Block transaction processor interface - forker *ForkChoice - vmConfig vm.Config - pipeCommit bool + engine consensus.Engine + prefetcher Prefetcher + validator Validator // Block and state validator interface + processor Processor // Block transaction processor interface + forker *ForkChoice + vmConfig vm.Config + pipeCommit bool + parallelExecution bool shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. @@ -1878,27 +1880,32 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) statedb.StartPrefetcher("chain") interruptCh := make(chan struct{}) // For diff sync, it may fallback to full sync, so we still do prefetch - if len(block.Transactions()) >= prefetchTxNumber { - // do Prefetch in a separate goroutine to avoid blocking the critical path - // 1.do state prefetch for snapshot cache - throwaway := statedb.CopyDoPrefetch() - go bc.prefetcher.Prefetch(block, throwaway, &bc.vmConfig, interruptCh) + // parallel mode has a pipeline, similar to this prefetch, to save CPU we disable this prefetch for parallel + if !bc.parallelExecution { + if len(block.Transactions()) >= prefetchTxNumber { + // do Prefetch in a separate goroutine to avoid blocking the critical path - // 2.do trie prefetch for MPT trie node cache - // it is for the big state trie tree, prefetch based on transaction's From/To address. - // trie prefetcher is thread safe now, ok to prefetch in a separate routine - go throwaway.TriePrefetchInAdvance(block, signer) - } + // 1.do state prefetch for snapshot cache + throwaway := statedb.CopyDoPrefetch() + go bc.prefetcher.Prefetch(block, throwaway, &bc.vmConfig, interruptCh) + // 2.do trie prefetch for MPT trie node cache + // it is for the big state trie tree, prefetch based on transaction's From/To address. + // trie prefetcher is thread safe now, ok to prefetch in a separate routine + go throwaway.TriePrefetchInAdvance(block, signer) + } + } //Process block using the parent state as reference point substart := time.Now() if bc.pipeCommit { statedb.EnablePipeCommit() } statedb.SetExpectedStateRoot(block.Root()) + statedb, receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig) close(interruptCh) // state prefetch can be stopped + if err != nil { bc.reportBlock(block, receipts, err) statedb.StopPrefetcher() @@ -3064,3 +3071,16 @@ func CalculateDiffHash(d *types.DiffLayer) (common.Hash, error) { hasher.Sum(hash[:0]) return hash, nil } + +func EnableParallelProcessor(parallelNum int) BlockChainOption { + return func(chain *BlockChain) (*BlockChain, error) { + if chain.snaps == nil { + // disable parallel processor if snapshot is not enabled to avoid concurrent issue for SecureTrie + log.Info("parallel processor is not enabled since snapshot is not enabled") + return chain, nil + } + chain.parallelExecution = true + chain.processor = NewParallelStateProcessor(chain.Config(), chain, chain.engine, parallelNum) + return chain, nil + } +} diff --git a/core/state/dump.go b/core/state/dump.go index bfcc035435..f044aee1f3 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -162,7 +162,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] account.SecureKey = it.Key } addr := common.BytesToAddress(addrBytes) - obj := newObject(s, addr, data) + obj := newObject(s, s.isParallel, addr, data) if !conf.SkipCode { account.Code = obj.Code(s.db) } diff --git a/core/state/interface.go b/core/state/interface.go new file mode 100644 index 0000000000..2362ac828b --- /dev/null +++ b/core/state/interface.go @@ -0,0 +1,82 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// StateDBer is copied from vm/interface.go +// It is used by StateObject & Journal right now, to abstract StateDB & ParallelStateDB +type StateDBer interface { + getBaseStateDB() *StateDB + getStateObject(common.Address) *StateObject // only accessible for journal + storeStateObj(common.Address, *StateObject) // only accessible for journal + + CreateAccount(common.Address) + + SubBalance(common.Address, *big.Int) + AddBalance(common.Address, *big.Int) + GetBalance(common.Address) *big.Int + + GetNonce(common.Address) uint64 + SetNonce(common.Address, uint64) + + GetCodeHash(common.Address) common.Hash + GetCode(common.Address) []byte + SetCode(common.Address, []byte) + GetCodeSize(common.Address) int + + AddRefund(uint64) + SubRefund(uint64) + GetRefund() uint64 + + GetCommittedState(common.Address, common.Hash) common.Hash + GetState(common.Address, common.Hash) common.Hash + SetState(common.Address, common.Hash, common.Hash) + + Suicide(common.Address) bool + HasSuicided(common.Address) bool + + // Exist reports whether the given account exists in state. + // Notably this should also return true for suicided accounts. + Exist(common.Address) bool + // Empty returns whether the given account is empty. Empty + // is defined according to EIP161 (balance = nonce = code = 0). + Empty(common.Address) bool + + PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) + AddressInAccessList(addr common.Address) bool + SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) + // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddAddressToAccessList(addr common.Address) + // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform + // even if the feature/fork is not active yet + AddSlotToAccessList(addr common.Address, slot common.Hash) + + RevertToSnapshot(int) + Snapshot() int + + AddLog(*types.Log) + AddPreimage(common.Hash, []byte) + + ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error +} diff --git a/core/state/journal.go b/core/state/journal.go index 4f1fe2bf48..4e3cbfdb35 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -26,7 +26,7 @@ import ( // reverted on demand. type journalEntry interface { // revert undoes the changes introduced by this journal entry. - revert(*StateDB) + revert(StateDBer) // dirtied returns the Ethereum address modified by this journal entry. dirtied() *common.Address @@ -58,10 +58,10 @@ func (j *journal) append(entry journalEntry) { // revert undoes a batch of journalled modifications along with any reverted // dirty handling too. -func (j *journal) revert(statedb *StateDB, snapshot int) { +func (j *journal) revert(dber StateDBer, snapshot int) { for i := len(j.entries) - 1; i >= snapshot; i-- { // Undo the changes made by the operation - j.entries[i].revert(statedb) + j.entries[i].revert(dber) // Drop any dirty tracking induced by the change if addr := j.entries[i].dirtied(); addr != nil { @@ -141,8 +141,18 @@ type ( } ) -func (ch createObjectChange) revert(s *StateDB) { - delete(s.stateObjects, *ch.account) +func (ch createObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + if s.parallel.isSlotDB { + delete(s.parallel.dirtiedStateObjectsInSlot, *ch.account) + delete(s.parallel.addrStateChangesInSlot, *ch.account) + delete(s.parallel.nonceChangesInSlot, *ch.account) + delete(s.parallel.balanceChangesInSlot, *ch.account) + delete(s.parallel.codeChangesInSlot, *ch.account) + delete(s.parallel.kvChangesInSlot, *ch.account) + } else { + s.deleteStateObj(*ch.account) + } delete(s.stateObjectsDirty, *ch.account) } @@ -150,10 +160,19 @@ func (ch createObjectChange) dirtied() *common.Address { return ch.account } -func (ch resetObjectChange) revert(s *StateDB) { - s.SetStateObject(ch.prev) +func (ch resetObjectChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + if s.parallel.isSlotDB { + // ch.prev must be from dirtiedStateObjectsInSlot, put it back + s.parallel.dirtiedStateObjectsInSlot[ch.prev.address] = ch.prev + } else { + // ch.prev was got from main DB, put it back to main DB. + s.storeStateObj(ch.prev.address, ch.prev) + } if !ch.prevdestruct && s.snap != nil { + s.snapParallelLock.Lock() delete(s.snapDestructs, ch.prev.address) + s.snapParallelLock.Unlock() } } @@ -161,8 +180,8 @@ func (ch resetObjectChange) dirtied() *common.Address { return nil } -func (ch suicideChange) revert(s *StateDB) { - obj := s.getStateObject(*ch.account) +func (ch suicideChange) revert(dber StateDBer) { + obj := dber.getStateObject(*ch.account) if obj != nil { obj.suicided = ch.prev obj.setBalance(ch.prevbalance) @@ -175,46 +194,47 @@ func (ch suicideChange) dirtied() *common.Address { var ripemd = common.HexToAddress("0000000000000000000000000000000000000003") -func (ch touchChange) revert(s *StateDB) { +func (ch touchChange) revert(dber StateDBer) { } func (ch touchChange) dirtied() *common.Address { return ch.account } -func (ch balanceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setBalance(ch.prev) +func (ch balanceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setBalance(ch.prev) } func (ch balanceChange) dirtied() *common.Address { return ch.account } -func (ch nonceChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setNonce(ch.prev) +func (ch nonceChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setNonce(ch.prev) } func (ch nonceChange) dirtied() *common.Address { return ch.account } -func (ch codeChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) +func (ch codeChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setCode(common.BytesToHash(ch.prevhash), ch.prevcode) } func (ch codeChange) dirtied() *common.Address { return ch.account } -func (ch storageChange) revert(s *StateDB) { - s.getStateObject(*ch.account).setState(ch.key, ch.prevalue) +func (ch storageChange) revert(dber StateDBer) { + dber.getStateObject(*ch.account).setState(ch.key, ch.prevalue) } func (ch storageChange) dirtied() *common.Address { return ch.account } -func (ch refundChange) revert(s *StateDB) { +func (ch refundChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() s.refund = ch.prev } @@ -222,7 +242,9 @@ func (ch refundChange) dirtied() *common.Address { return nil } -func (ch addLogChange) revert(s *StateDB) { +func (ch addLogChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() + logs := s.logs[ch.txhash] if len(logs) == 1 { delete(s.logs, ch.txhash) @@ -236,7 +258,8 @@ func (ch addLogChange) dirtied() *common.Address { return nil } -func (ch addPreimageChange) revert(s *StateDB) { +func (ch addPreimageChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() delete(s.preimages, ch.hash) } @@ -244,7 +267,8 @@ func (ch addPreimageChange) dirtied() *common.Address { return nil } -func (ch accessListAddAccountChange) revert(s *StateDB) { +func (ch accessListAddAccountChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() /* One important invariant here, is that whenever a (addr, slot) is added, if the addr is not already present, the add causes two journal entries: @@ -263,7 +287,8 @@ func (ch accessListAddAccountChange) dirtied() *common.Address { return nil } -func (ch accessListAddSlotChange) revert(s *StateDB) { +func (ch accessListAddSlotChange) revert(dber StateDBer) { + s := dber.getBaseStateDB() if s.accessList != nil { s.accessList.DeleteSlot(*ch.address, *ch.slot) } diff --git a/core/state/state_object.go b/core/state/state_object.go index fc89a4f904..0ef9343164 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -39,9 +39,18 @@ func (c Code) String() string { return string(c) //strings.Join(Disassemble(c), " ") } -type Storage map[common.Hash]common.Hash +type Storage interface { + String() string + GetValue(hash common.Hash) (common.Hash, bool) + StoreValue(hash common.Hash, value common.Hash) + Length() (length int) + Copy() Storage + Range(func(key, value interface{}) bool) +} + +type StorageMap map[common.Hash]common.Hash -func (s Storage) String() (str string) { +func (s StorageMap) String() (str string) { for key, value := range s { str += fmt.Sprintf("%X : %X\n", key, value) } @@ -49,8 +58,8 @@ func (s Storage) String() (str string) { return } -func (s Storage) Copy() Storage { - cpy := make(Storage) +func (s StorageMap) Copy() Storage { + cpy := make(StorageMap) for key, value := range s { cpy[key] = value } @@ -58,6 +67,79 @@ func (s Storage) Copy() Storage { return cpy } +func (s StorageMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s[hash] + return value, ok +} + +func (s StorageMap) StoreValue(hash common.Hash, value common.Hash) { + s[hash] = value +} + +func (s StorageMap) Length() int { + return len(s) +} + +func (s StorageMap) Range(f func(hash, value interface{}) bool) { + for k, v := range s { + result := f(k, v) + if !result { + return + } + } +} + +type StorageSyncMap struct { + sync.Map +} + +func (s *StorageSyncMap) String() (str string) { + s.Range(func(key, value interface{}) bool { + str += fmt.Sprintf("%X : %X\n", key, value) + return true + }) + + return +} + +func (s *StorageSyncMap) GetValue(hash common.Hash) (common.Hash, bool) { + value, ok := s.Load(hash) + if !ok { + return common.Hash{}, ok + } + + return value.(common.Hash), ok +} + +func (s *StorageSyncMap) StoreValue(hash common.Hash, value common.Hash) { + s.Store(hash, value) +} + +func (s *StorageSyncMap) Length() (length int) { + s.Range(func(key, value interface{}) bool { + length++ + return true + }) + return length +} + +func (s *StorageSyncMap) Copy() Storage { + cpy := StorageSyncMap{} + s.Range(func(key, value interface{}) bool { + cpy.Store(key, value) + return true + }) + + return &cpy +} + +func newStorage(isParallel bool) Storage { + if isParallel { + return &StorageSyncMap{} + } + return make(StorageMap) +} + // StateObject represents an Ethereum account which is being modified. // // The usage pattern is as follows: @@ -69,6 +151,7 @@ type StateObject struct { addrHash common.Hash // hash of ethereum address of the account data types.StateAccount db *StateDB + dbItf StateDBer rootCorrected bool // To indicate whether the root has been corrected in pipecommit mode // DB error. @@ -82,15 +165,17 @@ type StateObject struct { trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded + // isParallel indicates this state object is used in parallel mode, in which mode the + // storage would be sync.Map instead of map + isParallel bool sharedOriginStorage *sync.Map // Point to the entry of the stateObject in sharedPool originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction - - pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution - fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. + pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block + dirtyStorage Storage // Storage entries that have been modified in the current transaction execution + fakeStorage Storage // Fake storage which constructed by caller for debugging purpose. // Cache flags. - // When an object is marked suicided it will be delete from the trie + // When an object is marked suicided it will be deleted from the trie // during the "update" phase of the state transition. dirtyCode bool // true if the code was updated suicided bool @@ -102,13 +187,56 @@ type StateObject struct { // empty returns whether the account is considered empty. func (s *StateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + // return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + + // empty() has 3 use cases: + // 1.StateDB.Empty(), to empty check + // A: It is ok, we have handled it in Empty(), to make sure nonce, balance, codeHash are solid + // 2:AddBalance 0, empty check for touch event + // empty() will add a touch event. + // if we misjudge it, the touch event could be lost, which make address not deleted. // fixme + // 3.Finalise(), to do empty delete + // the address should be dirtied or touched + // if it nonce dirtied, it is ok, since nonce is monotonically increasing, won't be zero + // if balance is dirtied, balance could be zero, we should refer solid nonce & codeHash // fixme + // if codeHash is dirtied, it is ok, since code will not be updated. + // if suicide, it is ok + // if object is new created, it is ok + // if CreateAccount, recreate the address, it is ok. + + // Slot 0 tx 0: AddBalance(100) to addr_1, => addr_1: balance = 100, nonce = 0, code is empty + // Slot 1 tx 1: addr_1 Transfer 99.9979 with GasFee 0.0021, => addr_1: balance = 0, nonce = 1, code is empty + // notice: balance transfer cost 21,000 gas, with gasPrice = 100Gwei, GasFee will be 0.0021 + // Slot 0 tx 2: add balance 0 to addr_1(empty check for touch event), + // the object was lightCopied from tx 0, + + // in parallel mode, we should not check empty by raw nonce, balance, codeHash anymore, + // since it could be invalid. + // e.g., AddBalance() to an address, we will do lightCopy to get a new StateObject, we did balance fixup to + // make sure object's Balance is reliable. But we did not fixup nonce or code, we only do nonce or codehash + // fixup on need, that's when we wanna to update the nonce or codehash. + // So nonce, balance + // Before the block is processed, addr_1 account: nonce = 0, emptyCodeHash, balance = 100 + // Slot 0 tx 0: no access to addr_1 + // Slot 1 tx 1: sub balance 100, it is empty and deleted + // Slot 0 tx 2: GetNonce, lightCopy based on main DB(balance = 100) , not empty + + if s.dbItf.GetBalance(s.address).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.dbItf.GetNonce(s.address) != 0 { + return false + } + codeHash := s.dbItf.GetCodeHash(s.address) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } // newObject creates a state object. -func newObject(db *StateDB, address common.Address, data types.StateAccount) *StateObject { +func newObject(dbItf StateDBer, isParallel bool, address common.Address, data types.StateAccount) *StateObject { + db := dbItf.getBaseStateDB() if data.Balance == nil { - data.Balance = new(big.Int) + data.Balance = new(big.Int) // todo: why not common.Big0? } if data.CodeHash == nil { data.CodeHash = emptyCodeHash @@ -124,13 +252,15 @@ func newObject(db *StateDB, address common.Address, data types.StateAccount) *St return &StateObject{ db: db, + dbItf: dbItf, address: address, addrHash: crypto.Keccak256Hash(address[:]), data: data, + isParallel: isParallel, sharedOriginStorage: storageMap, - originStorage: make(Storage), - pendingStorage: make(Storage), - dirtyStorage: make(Storage), + originStorage: newStorage(isParallel), + dirtyStorage: newStorage(isParallel), + pendingStorage: newStorage(isParallel), } } @@ -187,19 +317,20 @@ func (s *StateObject) getTrie(db Database) Trie { func (s *StateObject) GetState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) if s.fakeStorage != nil { - return s.fakeStorage[key] + fakeValue, _ := s.fakeStorage.GetValue(key) + return fakeValue } // If we have a dirty value for this state entry, return it - value, dirty := s.dirtyStorage[key] + value, dirty := s.dirtyStorage.GetValue(key) if dirty { return value } - // Otherwise return the entry's original value + // Otherwise, return the entry's original value return s.GetCommittedState(db, key) } func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { - if value, cached := s.originStorage[key]; cached { + if value, cached := s.originStorage.GetValue(key); cached { return value, true } // if L1 cache miss, try to get it from shared pool @@ -208,9 +339,8 @@ func (s *StateObject) getOriginStorage(key common.Hash) (common.Hash, bool) { if !ok { return common.Hash{}, false } - storage := val.(common.Hash) - s.originStorage[key] = storage - return storage, true + s.originStorage.StoreValue(key, val.(common.Hash)) + return val.(common.Hash), true } return common.Hash{}, false } @@ -219,17 +349,18 @@ func (s *StateObject) setOriginStorage(key common.Hash, value common.Hash) { if s.db.writeOnSharedStorage && s.sharedOriginStorage != nil { s.sharedOriginStorage.Store(key, value) } - s.originStorage[key] = value + s.originStorage.StoreValue(key, value) } // GetCommittedState retrieves a value from the committed account storage trie. func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash { // If the fake storage is set, only lookup the state here(in the debugging mode) if s.fakeStorage != nil { - return s.fakeStorage[key] + fakeValue, _ := s.fakeStorage.GetValue(key) + return fakeValue } // If we have a pending write or clean cached, return that - if value, pending := s.pendingStorage[key]; pending { + if value, pending := s.pendingStorage.GetValue(key); pending { return value } @@ -248,9 +379,12 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has // 1) resurrect happened, and new slot values were set -- those should // have been handles via pendingStorage above. // 2) we don't have new values, and can deliver empty response back - if _, destructed := s.db.snapDestructs[s.address]; destructed { + s.db.snapParallelLock.RLock() + if _, destructed := s.db.snapDestructs[s.address]; destructed { // fixme: use sync.Map, instead of RWMutex? + s.db.snapParallelLock.RUnlock() return common.Hash{} } + s.db.snapParallelLock.RUnlock() start := time.Now() enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) if metrics.EnabledExpensive { @@ -289,11 +423,18 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has func (s *StateObject) SetState(db Database, key, value common.Hash) { // If the fake storage is set, put the temporary state update here. if s.fakeStorage != nil { - s.fakeStorage[key] = value + s.fakeStorage.StoreValue(key, value) return } // If the new value is the same as old, don't set - prev := s.GetState(db, key) + // In parallel mode, it has to get from StateDB, in case: + // a.the Slot did not set the key before and try to set it to `val_1` + // b.Unconfirmed DB has set the key to `val_2` + // c.if we use StateObject.GetState, and the key load from the main DB is `val_1` + // this `SetState could be skipped` + // d.Finally, the key's value will be `val_2`, while it should be `val_1` + // such as: https://bscscan.com/txs?block=2491181 + prev := s.dbItf.GetState(s.address, key) if prev == value { return } @@ -303,6 +444,10 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { key: key, prevalue: prev, }) + if s.db.parallel.isSlotDB { + s.db.parallel.kvChangesInSlot[s.address][key] = struct{}{} // should be moved to here, after `s.db.GetState()` + } + s.setState(key, value) } @@ -315,36 +460,40 @@ func (s *StateObject) SetState(db Database, key, value common.Hash) { func (s *StateObject) SetStorage(storage map[common.Hash]common.Hash) { // Allocate fake storage if it's nil. if s.fakeStorage == nil { - s.fakeStorage = make(Storage) + s.fakeStorage = newStorage(s.isParallel) } for key, value := range storage { - s.fakeStorage[key] = value + s.fakeStorage.StoreValue(key, value) } // Don't bother journal since this function should only be used for // debugging and the `fake` storage won't be committed to database. } func (s *StateObject) setState(key, value common.Hash) { - s.dirtyStorage[key] = value + s.dirtyStorage.StoreValue(key, value) } // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. func (s *StateObject) finalise(prefetch bool) { - slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) - for key, value := range s.dirtyStorage { - s.pendingStorage[key] = value - if value != s.originStorage[key] { - slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + slotsToPrefetch := make([][]byte, 0, s.dirtyStorage.Length()) + s.dirtyStorage.Range(func(key, value interface{}) bool { + s.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + + originalValue, _ := s.originStorage.GetValue(key.(common.Hash)) + if value.(common.Hash) != originalValue { + originalKey := key.(common.Hash) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure } - } + return true + }) prefetcher := s.db.prefetcher if prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash) } - if len(s.dirtyStorage) > 0 { - s.dirtyStorage = make(Storage) + if s.dirtyStorage.Length() > 0 { + s.dirtyStorage = newStorage(s.isParallel) } } @@ -353,7 +502,7 @@ func (s *StateObject) finalise(prefetch bool) { func (s *StateObject) updateTrie(db Database) Trie { // Make sure all dirty slots are finalized into the pending storage area s.finalise(false) // Don't prefetch anymore, pull directly if need be - if len(s.pendingStorage) == 0 { + if s.pendingStorage.Length() == 0 { return s.trie } // Track the amount of time wasted on updating the storage trie @@ -367,21 +516,30 @@ func (s *StateObject) updateTrie(db Database) Trie { // Insert all the pending updates into the trie tr := s.getTrie(db) - usedStorage := make([][]byte, 0, len(s.pendingStorage)) dirtyStorage := make(map[common.Hash][]byte) - for key, value := range s.pendingStorage { + usedStorage := make([][]byte, 0, s.pendingStorage.Length()) + s.pendingStorage.Range(func(keyItf, valueItf interface{}) bool { + key := keyItf.(common.Hash) + value := valueItf.(common.Hash) // Skip noop changes, persist actual changes - if value == s.originStorage[key] { - continue + originalValue, _ := s.originStorage.GetValue(key) + if value == originalValue { + return true } - s.originStorage[key] = value - var v []byte - if value != (common.Hash{}) { + + s.setOriginStorage(key, value) + + var vs []byte + if (value == common.Hash{}) { + s.setError(tr.TryDelete(key[:])) + } else { // Encoding []byte cannot fail, ok to ignore the error. - v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) + vs, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) + s.setError(tr.TryUpdate(key[:], vs)) } - dirtyStorage[key] = v - } + dirtyStorage[key] = vs + return true + }) var wg sync.WaitGroup wg.Add(1) go func() { @@ -392,7 +550,7 @@ func (s *StateObject) updateTrie(db Database) Trie { } else { s.setError(tr.TryUpdate(key[:], value)) } - usedStorage = append(usedStorage, common.CopyBytes(key[:])) + usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure } }() if s.db.snap != nil { @@ -419,9 +577,8 @@ func (s *StateObject) updateTrie(db Database) Trie { if prefetcher != nil { prefetcher.used(s.data.Root, usedStorage) } - - if len(s.pendingStorage) > 0 { - s.pendingStorage = make(Storage) + if s.pendingStorage.Length() > 0 { + s.pendingStorage = newStorage(s.isParallel) } return tr } @@ -512,8 +669,24 @@ func (s *StateObject) setBalance(amount *big.Int) { s.data.Balance = amount } +// Return the gas back to the origin. Used by the Virtual machine or Closures +func (s *StateObject) ReturnGas(gas *big.Int) {} + +func (s *StateObject) lightCopy(db *ParallelStateDB) *StateObject { + stateObject := newObject(db, s.isParallel, s.address, s.data) + if s.trie != nil { + // fixme: no need to copy trie for light copy, since light copied object won't access trie DB + stateObject.trie = db.db.CopyTrie(s.trie) + } + stateObject.code = s.code + stateObject.suicided = false // should be false + stateObject.dirtyCode = s.dirtyCode // it is not used in slot, but keep it is ok + stateObject.deleted = false // should be false + return stateObject +} + func (s *StateObject) deepCopy(db *StateDB) *StateObject { - stateObject := newObject(db, s.address, s.data) + stateObject := newObject(db, s.isParallel, s.address, s.data) if s.trie != nil { stateObject.trie = db.db.CopyTrie(s.trie) } @@ -527,6 +700,15 @@ func (s *StateObject) deepCopy(db *StateDB) *StateObject { return stateObject } +func (s *StateObject) MergeSlotObject(db Database, dirtyObjs *StateObject, keys StateKeys) { + for key := range keys { + // In parallel mode, always GetState by StateDB, not by StateObject directly, + // since it the KV could exist in unconfirmed DB. + // But here, it should be ok, since the KV should be changed and valid in the SlotDB, + s.setState(key, dirtyObjs.GetState(db, key)) + } +} + // // Attribute accessors // @@ -570,7 +752,7 @@ func (s *StateObject) CodeSize(db Database) int { } func (s *StateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code(s.db.db) + prevcode := s.dbItf.GetCode(s.address) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -586,9 +768,10 @@ func (s *StateObject) setCode(codeHash common.Hash, code []byte) { } func (s *StateObject) SetNonce(nonce uint64) { + prevNonce := s.dbItf.GetNonce(s.address) s.db.journal.append(nonceChange{ account: &s.address, - prev: s.data.Nonce, + prev: prevNonce, }) s.setNonce(nonce) } diff --git a/core/state/state_test.go b/core/state/state_test.go index 4cc5c33a85..6c781a9f8f 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -230,30 +230,47 @@ func compareStateObjects(so0, so1 *StateObject, t *testing.T) { t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) } - if len(so1.dirtyStorage) != len(so0.dirtyStorage) { - t.Errorf("Dirty storage size mismatch: have %d, want %d", len(so1.dirtyStorage), len(so0.dirtyStorage)) + if so1.dirtyStorage.Length() != so0.dirtyStorage.Length() { + t.Errorf("Dirty storage size mismatch: have %d, want %d", so1.dirtyStorage.Length(), so0.dirtyStorage.Length()) } - for k, v := range so1.dirtyStorage { - if so0.dirtyStorage[k] != v { - t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, so0.dirtyStorage[k], v) + + so1.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.dirtyStorage.GetValue(k); tmpV != v { + t.Errorf("Dirty storage key %x mismatch: have %v, want %v", k, tmpV.String(), v) } - } - for k, v := range so0.dirtyStorage { - if so1.dirtyStorage[k] != v { + return true + }) + + so0.dirtyStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.dirtyStorage.GetValue(k); tmpV != v { t.Errorf("Dirty storage key %x mismatch: have %v, want none.", k, v) } + return true + }) + + if so1.originStorage.Length() != so0.originStorage.Length() { + t.Errorf("Origin storage size mismatch: have %d, want %d", so1.originStorage.Length(), so0.originStorage.Length()) } - if len(so1.originStorage) != len(so0.originStorage) { - t.Errorf("Origin storage size mismatch: have %d, want %d", len(so1.originStorage), len(so0.originStorage)) - } - for k, v := range so1.originStorage { - if so0.originStorage[k] != v { - t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, so0.originStorage[k], v) + + so1.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so0.originStorage.GetValue(k); tmpV != v { + t.Errorf("Origin storage key %x mismatch: have %v, want %v", k, tmpV, v) } - } - for k, v := range so0.originStorage { - if so1.originStorage[k] != v { + return true + }) + + so0.originStorage.Range(func(key, value interface{}) bool { + k, v := key.(common.Hash), value.(common.Hash) + + if tmpV, _ := so1.originStorage.GetValue(k); tmpV != v { t.Errorf("Origin storage key %x mismatch: have %v, want none.", k, v) } - } + return true + }) } diff --git a/core/state/statedb.go b/core/state/statedb.go index e7bf3c9491..f68c4e5a05 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -18,6 +18,7 @@ package state import ( + "bytes" "errors" "fmt" "math/big" @@ -49,8 +50,10 @@ type revision struct { var ( // emptyRoot is the known root hash of an empty trie. emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - emptyAddr = crypto.Keccak256Hash(common.Address{}.Bytes()) + + WBNBAddress = common.HexToAddress("0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c") + parallelKvOnce sync.Once ) type proofList [][]byte @@ -64,6 +67,105 @@ func (n *proofList) Delete(key []byte) error { panic("not supported") } +type StateKeys map[common.Hash]struct{} + +type StateObjectSyncMap struct { + sync.Map +} + +func (s *StateObjectSyncMap) LoadStateObject(addr common.Address) (*StateObject, bool) { + stateObject, ok := s.Load(addr) + if !ok { + return nil, ok + } + return stateObject.(*StateObject), ok +} + +func (s *StateObjectSyncMap) StoreStateObject(addr common.Address, stateObject *StateObject) { + s.Store(addr, stateObject) +} + +// loadStateObj is the entry for loading state object from stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) loadStateObj(addr common.Address) (*StateObject, bool) { + if s.isParallel { + return s.parallel.stateObjects.LoadStateObject(addr) + } + obj, ok := s.stateObjects[addr] + return obj, ok +} + +// storeStateObj is the entry for storing state object to stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) storeStateObj(addr common.Address, stateObject *StateObject) { + if s.isParallel { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + stateObject.db.storeParallelLock.Lock() + s.parallel.stateObjects.Store(addr, stateObject) + stateObject.db.storeParallelLock.Unlock() + } else { + s.stateObjects[addr] = stateObject + } +} + +// deleteStateObj is the entry for deleting state object to stateObjects in StateDB or stateObjects in parallel +func (s *StateDB) deleteStateObj(addr common.Address) { + if s.isParallel { + s.parallel.stateObjects.Delete(addr) + } else { + delete(s.stateObjects, addr) + } +} + +// ParallelState is for parallel mode only +type ParallelState struct { + isSlotDB bool // denotes StateDB is used in slot, we will try to remove it + SlotIndex int // for debug, to be removed + // stateObjects holds the state objects in the base slot db + // the reason for using stateObjects instead of stateObjects on the outside is + // we need a thread safe map to hold state objects since there are many slots will read + // state objects from it; + // And we will merge all the changes made by the concurrent slot into it. + stateObjects *StateObjectSyncMap + + baseStateDB *StateDB // for parallel mode, there will be a base StateDB in dispatcher routine. + baseTxIndex int // slotDB is created base on this tx index. + dirtiedStateObjectsInSlot map[common.Address]*StateObject + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/ // do unconfirmed reference in same slot. + + // we will record the read detail for conflict check and + // the changed addr or key for object merge, the changed detail can be achieved from the dirty object + nonceChangesInSlot map[common.Address]struct{} + nonceReadsInSlot map[common.Address]uint64 + balanceChangesInSlot map[common.Address]struct{} // the address's balance has been changed + balanceReadsInSlot map[common.Address]*big.Int // the address's balance has been read and used. + // codeSize can be derived based on code, but codeHash can not be directly derived based on code + // - codeSize is 0 for address not exist or empty code + // - codeHash is `common.Hash{}` for address not exist, emptyCodeHash(`Keccak256Hash(nil)`) for empty code, + // so we use codeReadsInSlot & codeHashReadsInSlot to keep code and codeHash, codeSize is derived from code + codeReadsInSlot map[common.Address][]byte // empty if address not exist or no code in this address + codeHashReadsInSlot map[common.Address]common.Hash + codeChangesInSlot map[common.Address]struct{} + kvReadsInSlot map[common.Address]Storage + kvChangesInSlot map[common.Address]StateKeys // value will be kept in dirtiedStateObjectsInSlot + // Actions such as SetCode, Suicide will change address's state. + // Later call like Exist(), Empty(), HasSuicided() depend on the address's state. + addrStateReadsInSlot map[common.Address]bool // true: exist, false: not exist or deleted + addrStateChangesInSlot map[common.Address]bool // true: created, false: deleted + + addrSnapDestructsReadsInSlot map[common.Address]bool + + // Transaction will pay gas fee to system address. + // Parallel execution will clear system address's balance at first, in order to maintain transaction's + // gas fee value. Normal transaction will access system address twice, otherwise it means the transaction + // needs real system address's balance, the transaction will be marked redo with keepSystemAddressBalance = true + systemAddress common.Address + systemAddressOpsCount int + keepSystemAddressBalance bool + + // we may need to redo for some specific reasons, like we read the wrong state and need to panic in sequential mode in SubRefund + needsRedo bool +} + // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: @@ -87,13 +189,15 @@ type StateDB struct { fullProcessed bool pipeCommit bool - snaps *snapshot.Tree - snap snapshot.Snapshot - snapAccountMux sync.Mutex // Mutex for snap account access - snapStorageMux sync.Mutex // Mutex for snap storage access - snapDestructs map[common.Address]struct{} - snapAccounts map[common.Address][]byte - snapStorage map[common.Address]map[string][]byte + snaps *snapshot.Tree + snap snapshot.Snapshot + snapAccountMux sync.Mutex // Mutex for snap account access + snapStorageMux sync.Mutex // Mutex for snap storage access + storeParallelLock sync.RWMutex + snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write. + snapDestructs map[common.Address]struct{} + snapAccounts map[common.Address][]byte + snapStorage map[common.Address]map[string][]byte // This map holds 'live' objects, which will get modified while processing a state transition. stateObjects map[common.Address]*StateObject @@ -102,6 +206,9 @@ type StateDB struct { storagePool *StoragePool // sharedPool to store L1 originStorage of stateObjects writeOnSharedStorage bool // Write to the shared origin storage of a stateObject while reading from the underlying storage layer. + isParallel bool + parallel ParallelState // to keep all the parallel execution elements + // DB error. // State objects are used by the consensus core and VM which are // unable to deal with database-level errors. Any error that occurs @@ -153,7 +260,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) return newStateDB(root, db, snaps) } -// NewWithSharedPool creates a new state with sharedStorge on layer 1.5 +// NewWithSharedPool creates a new state with sharedStorage on layer 1.5 func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { statedb, err := newStateDB(root, db, snaps) if err != nil { @@ -165,12 +272,16 @@ func NewWithSharedPool(root common.Hash, db Database, snaps *snapshot.Tree) (*St func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { sdb := &StateDB{ - db: db, - originalRoot: root, - snaps: snaps, - stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), + db: db, + originalRoot: root, + snaps: snaps, + stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots), + parallel: ParallelState{ + SlotIndex: -1, + }, stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots), stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots), + txIndex: -1, logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -193,6 +304,7 @@ func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, } _, sdb.noTrie = tr.(*trie.EmptyTrie) sdb.trie = tr + sdb.EnableWriteOnSharedStorage() return sdb, nil } @@ -200,6 +312,14 @@ func (s *StateDB) EnableWriteOnSharedStorage() { s.writeOnSharedStorage = true } +func (s *StateDB) getBaseStateDB() *StateDB { + return s +} + +func (s *StateDB) getStateObjectFromStateObjects(addr common.Address) (*StateObject, bool) { + return s.loadStateObj(addr) +} + // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -237,7 +357,7 @@ func (s *StateDB) StopPrefetcher() { } func (s *StateDB) TriePrefetchInAdvance(block *types.Block, signer types.Signer) { - // s is a temporary throw away StateDB, s.prefetcher won't be resetted to nil + // s is a temporary throw away StateDB, s.prefetcher won't be reset to nil // so no need to add lock for s.prefetcher prefetcher := s.prefetcher if prefetcher == nil { @@ -396,7 +516,8 @@ func (s *StateDB) SubRefund(gas uint64) { // Exist reports whether the given account address exists in the state. // Notably this also returns true for suicided accounts. func (s *StateDB) Exist(addr common.Address) bool { - return s.getStateObject(addr) != nil + exist := s.getStateObject(addr) != nil + return exist } // Empty returns whether the state object is either non-existent @@ -408,20 +529,26 @@ func (s *StateDB) Empty(addr common.Address) bool { // GetBalance retrieves the balance from the given address or 0 if object not found func (s *StateDB) GetBalance(addr common.Address) *big.Int { + balance := common.Big0 stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.Balance() + balance = stateObject.Balance() } - return common.Big0 + return balance +} + +func (s *StateDB) GetBalanceOpCode(addr common.Address) *big.Int { + return s.GetBalance(addr) } func (s *StateDB) GetNonce(addr common.Address) uint64 { + var nonce uint64 = 0 stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.Nonce() + nonce = stateObject.Nonce() } - return 0 + return nonce } // TxIndex returns the current transaction index set by Prepare. @@ -429,37 +556,50 @@ func (s *StateDB) TxIndex() int { return s.txIndex } +// BaseTxIndex returns the tx index that slot db based. +func (s *StateDB) BaseTxIndex() int { + return s.parallel.baseTxIndex +} + func (s *StateDB) GetCode(addr common.Address) []byte { stateObject := s.getStateObject(addr) + var code []byte if stateObject != nil { - return stateObject.Code(s.db) + code = stateObject.Code(s.db) } - return nil + return code } func (s *StateDB) GetCodeSize(addr common.Address) int { + var codeSize = 0 stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.CodeSize(s.db) + codeSize = stateObject.CodeSize(s.db) } - return 0 + return codeSize } +// GetCodeHash return: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { stateObject := s.getStateObject(addr) - if stateObject == nil { - return common.Hash{} + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) } - return common.BytesToHash(stateObject.CodeHash()) + return codeHash } // GetState retrieves a value from the given account's storage trie. func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) + val := common.Hash{} if stateObject != nil { - return stateObject.GetState(s.db, hash) + val = stateObject.GetState(s.db, hash) } - return common.Hash{} + return val } // GetProof returns the Merkle proof for a given account. @@ -491,10 +631,11 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, // GetCommittedState retrieves a value from the given account's committed storage trie. func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) + val := common.Hash{} if stateObject != nil { - return stateObject.GetCommittedState(s.db, hash) + val = stateObject.GetCommittedState(s.db, hash) } - return common.Hash{} + return val } // Database retrieves the low level database supporting the lower level trie ops. @@ -559,7 +700,8 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { func (s *StateDB) SetCode(addr common.Address, code []byte) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { - stateObject.SetCode(crypto.Keccak256Hash(code), code) + codeHash := crypto.Keccak256Hash(code) + stateObject.SetCode(codeHash, code) } } @@ -589,14 +731,15 @@ func (s *StateDB) Suicide(addr common.Address) bool { if stateObject == nil { return false } + s.journal.append(suicideChange{ account: &addr, - prev: stateObject.suicided, - prevbalance: new(big.Int).Set(stateObject.Balance()), + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), }) + stateObject.markSuicided() stateObject.data.Balance = new(big.Int) - return true } @@ -646,17 +789,8 @@ func (s *StateDB) getStateObject(addr common.Address) *StateObject { return nil } -// getDeletedStateObject is similar to getStateObject, but instead of returning -// nil for a deleted state object, it returns the actual object with the deleted -// flag set. This is needed by the state journal to revert to the correct s- -// destructed object instead of wiping all knowledge about the state object. -func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { - // Prefer live objects if any is available - if obj := s.stateObjects[addr]; obj != nil { - return obj - } +func (s *StateDB) getStateObjectFromSnapshotOrTrie(addr common.Address) (data *types.StateAccount, ok bool) { // If no live objects are available, attempt to use snapshots - var data *types.StateAccount if s.snap != nil { start := time.Now() acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())) @@ -665,7 +799,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } if err == nil { if acc == nil { - return nil + return nil, false } data = &types.StateAccount{ Nonce: acc.Nonce, @@ -688,7 +822,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { tr, err := s.db.OpenTrie(s.originalRoot) if err != nil { s.setError(fmt.Errorf("failed to open trie tree")) - return nil + return nil, false } s.trie = tr } @@ -699,59 +833,87 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } if err != nil { s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) - return nil + return nil, false } if len(enc) == 0 { - return nil + return nil, false } data = new(types.StateAccount) if err := rlp.DecodeBytes(enc, data); err != nil { log.Error("Failed to decode state object", "addr", addr, "err", err) - return nil + return nil, false } } - // Insert into the live set - obj := newObject(s, addr, *data) - s.SetStateObject(obj) - return obj + return data, true } -func (s *StateDB) SetStateObject(object *StateObject) { - s.stateObjects[object.Address()] = object +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + // Insert into the live set + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + return obj } // GetOrNewStateObject retrieves a state object or create a new state object if nil. func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject { stateObject := s.getStateObject(addr) if stateObject == nil { - stateObject, _ = s.createObject(addr) + stateObject = s.createObject(addr) } return stateObject } // createObject creates a new state object. If there is an existing account with // the given address, it is overwritten and returned as the second return value. -func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) { - prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is existed in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *StateDB) createObject(addr common.Address) (newobj *StateObject) { + prev := s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! var prevdestruct bool + if s.snap != nil && prev != nil { + s.snapParallelLock.Lock() // fixme: with new dispatch policy, the ending Tx could running, while the block have processed. _, prevdestruct = s.snapDestructs[prev.address] if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. s.snapDestructs[prev.address] = struct{}{} } + s.snapParallelLock.Unlock() } - newobj = newObject(s, addr, types.StateAccount{}) + newobj = newObject(s, s.isParallel, addr, types.StateAccount{}) + newobj.setNonce(0) // sets the object to dirty if prev == nil { s.journal.append(createObjectChange{account: &addr}) } else { s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } - s.SetStateObject(newobj) - if prev != nil && !prev.deleted { - return newobj, prev - } - return newobj, nil + + s.storeStateObj(addr, newobj) + return newobj } // CreateAccount explicitly creates a state object. If a state object with the address @@ -765,22 +927,24 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) // // Carrying over the balance ensures that Ether doesn't disappear. func (s *StateDB) CreateAccount(addr common.Address) { - newObj, prev := s.createObject(addr) - if prev != nil { - newObj.setBalance(prev.data.Balance) - } + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj } -func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { - so := db.getStateObject(addr) +func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + so := s.getStateObject(addr) if so == nil { return nil } - it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil)) + it := trie.NewIterator(so.getTrie(s.db).NodeIterator(nil)) for it.Next() { - key := common.BytesToHash(db.trie.GetKey(it.Key)) - if value, dirty := so.dirtyStorage[key]; dirty { + key := common.BytesToHash(s.trie.GetKey(it.Key)) + if value, dirty := so.dirtyStorage.GetValue(key); dirty { if !cb(key, value) { return nil } @@ -828,6 +992,7 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { preimages: make(map[common.Hash][]byte, len(s.preimages)), journal: newJournal(), hasher: crypto.NewKeccakState(), + parallel: ParallelState{}, } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -835,11 +1000,11 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { // and in the Finalise-method, there is a case where an object is in the journal but not // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for // nil - if object, exist := s.stateObjects[addr]; exist { + if object, exist := s.getStateObjectFromStateObjects(addr); exist { // Even though the original object is dirty, we are not copying the journal, // so we need to make sure that anyside effect the journal would have caused // during a commit (or similar op) is already applied to the copy. - state.stateObjects[addr] = object.deepCopy(state) + state.storeStateObj(addr, object.deepCopy(state)) state.stateObjectsDirty[addr] = struct{}{} // Mark the copy dirty to force internal (code/state) commits state.stateObjectsPending[addr] = struct{}{} // Mark the copy pending to force external (account) commits @@ -849,14 +1014,16 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { // loop above will be a no-op, since the copy's journal is empty. // Thus, here we iterate over stateObjects, to enable copies of copies for addr := range s.stateObjectsPending { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } state.stateObjectsPending[addr] = struct{}{} } for addr := range s.stateObjectsDirty { - if _, exist := state.stateObjects[addr]; !exist { - state.stateObjects[addr] = s.stateObjects[addr].deepCopy(state) + if _, exist := state.getStateObjectFromStateObjects(addr); !exist { + object, _ := s.getStateObjectFromStateObjects(addr) + state.storeStateObj(addr, object.deepCopy(state)) } state.stateObjectsDirty[addr] = struct{}{} } @@ -889,7 +1056,7 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { } if s.snaps != nil { // In order for the miner to be able to use and make additions - // to the snapshot tree, we need to copy that aswell. + // to the snapshot tree, we need to copy that as well. // Otherwise, any block mined by ourselves will cause gaps in the tree, // and force the miner to operate trie-backed only state.snaps = s.snaps @@ -915,6 +1082,252 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB { return state } +var journalPool = sync.Pool{ + New: func() interface{} { + return &journal{ + dirties: make(map[common.Address]int, defaultNumOfSlots), + entries: make([]journalEntry, 0, defaultNumOfSlots), + } + }, +} + +var addressToStructPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]struct{}, defaultNumOfSlots) }, +} + +var addressToStateKeysPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]StateKeys, defaultNumOfSlots) }, +} + +var addressToStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]Storage, defaultNumOfSlots) }, +} + +var addressToStateObjectsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*StateObject, defaultNumOfSlots) }, +} + +var balancePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]*big.Int, defaultNumOfSlots) }, +} + +var addressToHashPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]common.Hash, defaultNumOfSlots) }, +} + +var addressToBytesPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address][]byte, defaultNumOfSlots) }, +} + +var addressToBoolPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]bool, defaultNumOfSlots) }, +} + +var addressToUintPool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]uint64, defaultNumOfSlots) }, +} + +var snapStoragePool = sync.Pool{ + New: func() interface{} { return make(map[common.Address]map[string][]byte, defaultNumOfSlots) }, +} + +var snapStorageValuePool = sync.Pool{ + New: func() interface{} { return make(map[string][]byte, defaultNumOfSlots) }, +} + +var logsPool = sync.Pool{ + New: func() interface{} { return make(map[common.Hash][]*types.Log, defaultNumOfSlots) }, +} + +func (s *StateDB) PutSyncPool() { + for key := range s.parallel.codeReadsInSlot { + delete(s.parallel.codeReadsInSlot, key) + } + addressToBytesPool.Put(s.parallel.codeReadsInSlot) + + for key := range s.parallel.codeHashReadsInSlot { + delete(s.parallel.codeHashReadsInSlot, key) + } + addressToHashPool.Put(s.parallel.codeHashReadsInSlot) + + for key := range s.parallel.codeChangesInSlot { + delete(s.parallel.codeChangesInSlot, key) + } + addressToStructPool.Put(s.parallel.codeChangesInSlot) + + for key := range s.parallel.kvChangesInSlot { + delete(s.parallel.kvChangesInSlot, key) + } + addressToStateKeysPool.Put(s.parallel.kvChangesInSlot) + + for key := range s.parallel.kvReadsInSlot { + delete(s.parallel.kvReadsInSlot, key) + } + addressToStoragePool.Put(s.parallel.kvReadsInSlot) + + for key := range s.parallel.balanceChangesInSlot { + delete(s.parallel.balanceChangesInSlot, key) + } + addressToStructPool.Put(s.parallel.balanceChangesInSlot) + + for key := range s.parallel.balanceReadsInSlot { + delete(s.parallel.balanceReadsInSlot, key) + } + balancePool.Put(s.parallel.balanceReadsInSlot) + + for key := range s.parallel.addrStateReadsInSlot { + delete(s.parallel.addrStateReadsInSlot, key) + } + addressToBoolPool.Put(s.parallel.addrStateReadsInSlot) + + for key := range s.parallel.addrStateChangesInSlot { + delete(s.parallel.addrStateChangesInSlot, key) + } + addressToBoolPool.Put(s.parallel.addrStateChangesInSlot) + + for key := range s.parallel.nonceChangesInSlot { + delete(s.parallel.nonceChangesInSlot, key) + } + addressToStructPool.Put(s.parallel.nonceChangesInSlot) + + for key := range s.parallel.nonceReadsInSlot { + delete(s.parallel.nonceReadsInSlot, key) + } + addressToUintPool.Put(s.parallel.nonceReadsInSlot) + + for key := range s.parallel.addrSnapDestructsReadsInSlot { + delete(s.parallel.addrSnapDestructsReadsInSlot, key) + } + addressToBoolPool.Put(s.parallel.addrSnapDestructsReadsInSlot) + + for key := range s.parallel.dirtiedStateObjectsInSlot { + delete(s.parallel.dirtiedStateObjectsInSlot, key) + } + addressToStateObjectsPool.Put(s.parallel.dirtiedStateObjectsInSlot) + + for key := range s.stateObjectsPending { + delete(s.stateObjectsPending, key) + } + addressToStructPool.Put(s.stateObjectsPending) + + for key := range s.stateObjectsDirty { + delete(s.stateObjectsDirty, key) + } + addressToStructPool.Put(s.stateObjectsDirty) + + for key := range s.logs { + delete(s.logs, key) + } + logsPool.Put(s.logs) + + for key := range s.journal.dirties { + delete(s.journal.dirties, key) + } + s.journal.entries = s.journal.entries[:0] + journalPool.Put(s.journal) + + for key := range s.snapDestructs { + delete(s.snapDestructs, key) + } + addressToStructPool.Put(s.snapDestructs) + + for key := range s.snapAccounts { + delete(s.snapAccounts, key) + } + addressToBytesPool.Put(s.snapAccounts) + + for key, storage := range s.snapStorage { + for key := range storage { + delete(storage, key) + } + snapStorageValuePool.Put(storage) + delete(s.snapStorage, key) + } + snapStoragePool.Put(s.snapStorage) +} + +// CopyForSlot copy all the basic fields, initialize the memory ones +func (s *StateDB) CopyForSlot() *ParallelStateDB { + parallel := ParallelState{ + // use base(dispatcher) slot db's stateObjects. + // It is a SyncMap, only readable to slot, not writable + stateObjects: s.parallel.stateObjects, + codeReadsInSlot: addressToBytesPool.Get().(map[common.Address][]byte), + codeHashReadsInSlot: addressToHashPool.Get().(map[common.Address]common.Hash), + codeChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), + kvChangesInSlot: addressToStateKeysPool.Get().(map[common.Address]StateKeys), + kvReadsInSlot: addressToStoragePool.Get().(map[common.Address]Storage), + balanceChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), + balanceReadsInSlot: balancePool.Get().(map[common.Address]*big.Int), + addrStateReadsInSlot: addressToBoolPool.Get().(map[common.Address]bool), + addrStateChangesInSlot: addressToBoolPool.Get().(map[common.Address]bool), + nonceChangesInSlot: addressToStructPool.Get().(map[common.Address]struct{}), + nonceReadsInSlot: addressToUintPool.Get().(map[common.Address]uint64), + addrSnapDestructsReadsInSlot: addressToBoolPool.Get().(map[common.Address]bool), + isSlotDB: true, + dirtiedStateObjectsInSlot: addressToStateObjectsPool.Get().(map[common.Address]*StateObject), + } + state := &ParallelStateDB{ + StateDB: StateDB{ + db: s.db, + trie: nil, // Parallel StateDB can not access trie, since it is concurrent safe. + stateObjects: make(map[common.Address]*StateObject), // replaced by parallel.stateObjects in parallel mode + stateObjectsPending: addressToStructPool.Get().(map[common.Address]struct{}), + stateObjectsDirty: addressToStructPool.Get().(map[common.Address]struct{}), + refund: 0, // should be 0 + logs: logsPool.Get().(map[common.Hash][]*types.Log), + logSize: 0, + preimages: make(map[common.Hash][]byte, len(s.preimages)), + journal: journalPool.Get().(*journal), + hasher: crypto.NewKeccakState(), + isParallel: true, + parallel: parallel, + }, + wbnbMakeUp: true, + } + // no need to copy preimages, comment out and remove later + // for hash, preimage := range s.preimages { + // state.preimages[hash] = preimage + // } + + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that as well. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = addressToStructPool.Get().(map[common.Address]struct{}) + s.snapParallelLock.RLock() + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + s.snapParallelLock.RUnlock() + // snapAccounts is useless in SlotDB, comment out and remove later + // state.snapAccounts = make(map[common.Address][]byte) // snapAccountPool.Get().(map[common.Address][]byte) + // for k, v := range s.snapAccounts { + // state.snapAccounts[k] = v + // } + + // snapStorage is useless in SlotDB either, it is updated on updateTrie, which is validation phase to update the snapshot of a finalized block. + // state.snapStorage = snapStoragePool.Get().(map[common.Address]map[string][]byte) + // for k, v := range s.snapStorage { + // temp := snapStorageValuePool.Get().(map[string][]byte) + // for kk, vv := range v { + // temp[kk] = vv + // } + // state.snapStorage[k] = temp + // } + + // trie prefetch should be done by dispatcher on StateObject Merge, + // disable it in parallel slot + // state.prefetcher = s.prefetcher + } + + return state +} + // Snapshot returns an identifier for the current revision of the state. func (s *StateDB) Snapshot() int { id := s.nextRevisionId @@ -958,10 +1371,22 @@ func (s *StateDB) WaitPipeVerification() error { // Finalise finalises the state by removing the s destructed objects and clears // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. -func (s *StateDB) Finalise(deleteEmptyObjects bool) { +func (s *StateDB) Finalise(deleteEmptyObjects bool) { // fixme: concurrent safe... addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { - obj, exist := s.stateObjects[addr] + var obj *StateObject + var exist bool + if s.parallel.isSlotDB { + obj = s.parallel.dirtiedStateObjectsInSlot[addr] + if obj != nil { + exist = true + } else { + log.Error("StateDB Finalise dirty addr not in dirtiedStateObjectsInSlot", + "addr", addr) + } + } else { + obj, exist = s.getStateObjectFromStateObjects(addr) + } if !exist { // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 // That tx goes out of gas, and although the notion of 'touched' does not exist there, the @@ -972,19 +1397,29 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { continue } if obj.suicided || (deleteEmptyObjects && obj.empty()) { + if s.parallel.isSlotDB { + s.parallel.addrStateChangesInSlot[addr] = false // false: deleted + } obj.deleted = true // If state snapshotting is active, also mark the destruction there. // Note, we can't do this only at the end of a block because multiple - // transactions within the same block might self destruct and then + // transactions within the same block might self-destruct and then // ressurrect an account; but the snapshotter needs both events. if s.snap != nil { + s.snapParallelLock.Lock() s.snapDestructs[obj.address] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) - delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (may be recreated via a ressurrect) - delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (may be recreated via a ressurrect) + s.snapParallelLock.Unlock() + delete(s.snapAccounts, obj.address) // Clear out any previously updated account data (maybe recreated via a ressurrect) + delete(s.snapStorage, obj.address) // Clear out any previously updated storage data (maybe recreated via a ressurrect) } } else { - obj.finalise(true) // Prefetch slots in the background + // 1.none parallel mode, we do obj.finalise(true) as normal + // 2.with parallel mode, we do obj.finalise(true) on dispatcher, not on slot routine + // obj.finalise(true) will clear its dirtyStorage, will make prefetch broken. + if !s.isParallel || !s.parallel.isSlotDB { + obj.finalise(true) // Prefetch slots in the background + } } if _, exist := s.stateObjectsPending[addr]; !exist { s.stateObjectsPending[addr] = struct{}{} @@ -1055,7 +1490,7 @@ func (s *StateDB) CorrectAccountsRoot(blockRoot common.Hash) { //PopulateSnapAccountAndStorage tries to populate required accounts and storages for pipecommit func (s *StateDB) PopulateSnapAccountAndStorage() { for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { if s.snap != nil { s.populateSnapStorage(obj) s.snapAccounts[obj.address] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) @@ -1066,14 +1501,17 @@ func (s *StateDB) PopulateSnapAccountAndStorage() { //populateSnapStorage tries to populate required storages for pipecommit, and returns a flag to indicate whether the storage root changed or not func (s *StateDB) populateSnapStorage(obj *StateObject) bool { - for key, value := range obj.dirtyStorage { - obj.pendingStorage[key] = value - } - if len(obj.pendingStorage) == 0 { + obj.dirtyStorage.Range(func(key, value interface{}) bool { + obj.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash)) + return true + }) + if obj.pendingStorage.Length() == 0 { return false } var storage map[string][]byte - for key, value := range obj.pendingStorage { + obj.pendingStorage.Range(func(keyItf, valueItf interface{}) bool { + key := keyItf.(common.Hash) + value := valueItf.(common.Hash) var v []byte if (value != common.Hash{}) { // Encoding []byte cannot fail, ok to ignore the error. @@ -1090,16 +1528,17 @@ func (s *StateDB) populateSnapStorage(obj *StateObject) bool { } storage[string(key[:])] = v // v will be nil if value is 0x00 } - } + return true + }) return true } func (s *StateDB) AccountsIntermediateRoot() { - tasks := make(chan func()) + tasks := make(chan func()) // use buffer chan? finishCh := make(chan struct{}) defer close(finishCh) wg := sync.WaitGroup{} - for i := 0; i < runtime.NumCPU(); i++ { + for i := 0; i < runtime.NumCPU(); i++ { // more the cpu num since there are async IO operation go func() { for { select { @@ -1115,14 +1554,13 @@ func (s *StateDB) AccountsIntermediateRoot() { // Although naively it makes sense to retrieve the account trie and then do // the contract storage and account updates sequentially, that short circuits // the account prefetcher. Instead, let's process all the storage updates - // first, giving the account prefeches just a few more milliseconds of time + // first, giving the account prefetches just a few more milliseconds of time // to pull useful data from disk. for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; !obj.deleted { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { wg.Add(1) tasks <- func() { obj.updateRoot(s.db) - // If state snapshotting is active, cache the data til commit. Note, this // update mechanism is not symmetric to the deletion, because whereas it is // enough to track account updates at commit time, deletions need tracking @@ -1175,7 +1613,7 @@ func (s *StateDB) StateIntermediateRoot() common.Hash { usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) if !s.noTrie { for addr := range s.stateObjectsPending { - if obj := s.stateObjects[addr]; obj.deleted { + if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted { s.deleteStateObject(obj) } else { s.updateStateObject(obj) @@ -1214,7 +1652,7 @@ func (s *StateDB) clearJournalAndRefund() { s.journal = newJournal() s.refund = 0 } - s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires + s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries } func (s *StateDB) LightCommit() (common.Hash, *types.DiffLayer, error) { @@ -1421,7 +1859,7 @@ func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() er } for addr := range s.stateObjectsDirty { - if obj := s.stateObjects[addr]; !obj.deleted { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { // Write any contract code associated with the state object tasks <- func() { // Write any storage changes in the state object to its storage trie @@ -1497,7 +1935,7 @@ func (s *StateDB) Commit(failPostCommitFunc func(), postCommitFuncs ...func() er func() error { codeWriter := s.db.TrieDB().DiskDB().NewBatch() for addr := range s.stateObjectsDirty { - if obj := s.stateObjects[addr]; !obj.deleted { + if obj, _ := s.getStateObjectFromStateObjects(addr); !obj.deleted { if obj.code != nil && obj.dirtyCode { rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) obj.dirtyCode = false @@ -1723,3 +2161,1611 @@ func (s *StateDB) GetDirtyAccounts() []common.Address { func (s *StateDB) GetStorage(address common.Address) *sync.Map { return s.storagePool.getStorage(address) } + +// PrepareForParallel prepares for state db to be used in parallel execution mode. +func (s *StateDB) PrepareForParallel() { + s.isParallel = true + s.parallel.stateObjects = &StateObjectSyncMap{} +} + +func (s *StateDB) AddrPrefetch(slotDb *ParallelStateDB) { + addressesToPrefetch := make([][]byte, 0, len(slotDb.parallel.dirtiedStateObjectsInSlot)) + for addr, obj := range slotDb.parallel.dirtiedStateObjectsInSlot { + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + if obj.deleted { + continue + } + // copied from obj.finalise(true) + slotsToPrefetch := make([][]byte, 0, obj.dirtyStorage.Length()) + obj.dirtyStorage.Range(func(key, value interface{}) bool { + originalValue, _ := obj.originStorage.GetValue(key.(common.Hash)) + if value.(common.Hash) != originalValue { + originalKey := key.(common.Hash) + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(originalKey[:])) // Copy needed for closure + } + return true + }) + if s.prefetcher != nil && len(slotsToPrefetch) > 0 && obj.data.Root != emptyRoot { + s.prefetcher.prefetch(obj.data.Root, slotsToPrefetch, obj.addrHash) + } + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + // log.Info("AddrPrefetch", "slotDb.TxIndex", slotDb.TxIndex(), + // "len(addressesToPrefetch)", len(slotDb.parallel.addressesToPrefetch)) + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) + } +} + +// MergeSlotDB is for Parallel execution mode, when the transaction has been +// finalized(dirty -> pending) on execution slot, the execution results should be +// merged back to the main StateDB. +func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int) { + // receipt.Logs use unified log index within a block + // align slotDB's log index to the block stateDB's logSize + for _, l := range slotReceipt.Logs { + l.Index += s.logSize + } + s.logSize += slotDb.logSize + + // before merge, pay the gas fee first: AddBalance to consensus.SystemAddress + systemAddress := slotDb.parallel.systemAddress + if slotDb.parallel.keepSystemAddressBalance { + s.SetBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } else { + s.AddBalance(systemAddress, slotDb.GetBalance(systemAddress)) + } + // system address is EOA account, it should have no storage change + delete(slotDb.stateObjectsDirty, systemAddress) + // only merge dirty objects + addressesToPrefetch := make([][]byte, 0, len(slotDb.stateObjectsDirty)) + for addr := range slotDb.stateObjectsDirty { + if _, exist := s.stateObjectsDirty[addr]; !exist { + s.stateObjectsDirty[addr] = struct{}{} + } + + // stateObjects: KV, balance, nonce... + dirtyObj, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr] + if !ok { + log.Error("parallel merge, but dirty object not exist!", "SlotIndex", slotDb.parallel.SlotIndex, "txIndex:", slotDb.txIndex, "addr", addr) + continue + } + mainObj, exist := s.loadStateObj(addr) + if !exist { // fixme: it is also state change + // addr not exist on main DB, do ownership transfer + // dirtyObj.db = s + // dirtyObj.finalise(true) // true: prefetch on dispatcher + mainObj = dirtyObj.deepCopy(s) + if addr == WBNBAddress && slotDb.wbnbMakeUpBalance != nil { + mainObj.setBalance(slotDb.wbnbMakeUpBalance) + } + mainObj.finalise(true) + s.storeStateObj(addr, mainObj) + // fixme: should not delete, would cause unconfirmed DB incorrect? + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // addr already in main DB, do merge: balance, KV, code, State(create, suicide) + // can not do copy or ownership transfer directly, since dirtyObj could have outdated + // data(maybe updated within the conflict window) + + var newMainObj = mainObj // we don't need to copy the object since the storages are thread safe + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; ok { + // there are 3 kinds of state change: + // 1.Suicide + // 2.Empty Delete + // 3.createObject + // a: AddBalance,SetState to a non-exist or deleted(suicide, empty delete) address. + // b: CreateAccount: like DAO the fork, regenerate an account carry its balance without KV + // For these state change, do ownership transfer for efficiency: + // dirtyObj.db = s + // newMainObj = dirtyObj + newMainObj = dirtyObj.deepCopy(s) + // should not delete, would cause unconfirmed DB incorrect. + // delete(slotDb.parallel.dirtiedStateObjectsInSlot, addr) // transfer ownership, fixme: shared read? + if dirtyObj.deleted { + // remove the addr from snapAccounts&snapStorage only when object is deleted. + // "deleted" is not equal to "snapDestructs", since createObject() will add an addr for + // snapDestructs to destroy previous object, while it will keep the addr in snapAccounts & snapAccounts + delete(s.snapAccounts, addr) + delete(s.snapStorage, addr) + } + } else { + // deepCopy a temporary *StateObject for safety, since slot could read the address, + // dispatch should avoid overwrite the StateObject directly otherwise, it could + // crash for: concurrent map iteration and map write + + if _, balanced := slotDb.parallel.balanceChangesInSlot[addr]; balanced { + newMainObj.setBalance(dirtyObj.Balance()) + } + if _, coded := slotDb.parallel.codeChangesInSlot[addr]; coded { + newMainObj.code = dirtyObj.code + newMainObj.data.CodeHash = dirtyObj.data.CodeHash + newMainObj.dirtyCode = true + } + if keys, stated := slotDb.parallel.kvChangesInSlot[addr]; stated { + newMainObj.MergeSlotObject(s.db, dirtyObj, keys) + } + if _, nonced := slotDb.parallel.nonceChangesInSlot[addr]; nonced { + // dirtyObj.Nonce() should not be less than newMainObj + newMainObj.setNonce(dirtyObj.Nonce()) + } + } + if addr == WBNBAddress && slotDb.wbnbMakeUpBalance != nil { + newMainObj.setBalance(slotDb.wbnbMakeUpBalance) + } + newMainObj.finalise(true) // true: prefetch on dispatcher + // update the object + s.storeStateObj(addr, newMainObj) + } + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr) // prefetch for trie node of account + } + + for addr := range slotDb.stateObjectsPending { + if _, exist := s.stateObjectsPending[addr]; !exist { + s.stateObjectsPending[addr] = struct{}{} + } + } + + // slotDb.logs: logs will be kept in receipts, no need to do merge + + for hash, preimage := range slotDb.preimages { + s.preimages[hash] = preimage + } + if s.accessList != nil { + // fixme: accessList is not enabled yet, but it should use merge rather than overwrite Copy + s.accessList = slotDb.accessList.Copy() + } + + if slotDb.snaps != nil { + for k := range slotDb.snapDestructs { + // There could be a race condition for parallel transaction execution + // One transaction add balance 0 to an empty address, will delete it(delete empty is enabled). + // While another concurrent transaction could add a none-zero balance to it, make it not empty + // We fixed it by add an addr state read record for add balance 0 + s.snapParallelLock.Lock() + s.snapDestructs[k] = struct{}{} + s.snapParallelLock.Unlock() + } + + // slotDb.snapAccounts should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapAccounts { + // s.snapAccounts[k] = v + // } + // slotDb.snapStorage should be empty, comment out and to be deleted later + // for k, v := range slotDb.snapStorage { + // temp := make(map[string][]byte) + // for kk, vv := range v { + // temp[kk] = vv + // } + // s.snapStorage[k] = temp + // } + } + s.txIndex = txIndex +} + +func (s *StateDB) ParallelMakeUp(common.Address, []byte) { + // do nothing, this API is for parallel mode +} + +type ParallelKvCheckUnit struct { + addr common.Address + key common.Hash + val common.Hash +} +type ParallelKvCheckMessage struct { + slotDB *ParallelStateDB + isStage2 bool + kvUnit ParallelKvCheckUnit +} + +var parallelKvCheckReqCh chan ParallelKvCheckMessage +var parallelKvCheckResCh chan bool + +type ParallelStateDB struct { + StateDB + wbnbMakeUp bool // default true, we can not do WBNB make up if its absolute balance is used. + wbnbMakeUpBalance *big.Int +} + +func hasKvConflict(slotDB *ParallelStateDB, addr common.Address, key common.Hash, val common.Hash, isStage2 bool) bool { + mainDB := slotDB.parallel.baseStateDB + + if isStage2 { // update slotDB's unconfirmed DB list and try + if valUnconfirm, ok := slotDB.getKVFromUnconfirmedDB(addr, key); ok { + if !bytes.Equal(val.Bytes(), valUnconfirm.Bytes()) { + log.Debug("IsSlotDBReadsValid KV read is invalid in unconfirmed", "addr", addr, + "valSlot", val, "valUnconfirm", valUnconfirm, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return true + } + } + } + valMain := mainDB.GetState(addr, key) + if !bytes.Equal(val.Bytes(), valMain.Bytes()) { + log.Debug("hasKvConflict is invalid", "addr", addr, + "key", key, "valSlot", val, + "valMain", valMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return true // return false, Range will be terminated. + } + return false +} + +// StartKvCheckLoop start several routines to do conflict check +func StartKvCheckLoop() { + parallelKvCheckReqCh = make(chan ParallelKvCheckMessage, 200) + parallelKvCheckResCh = make(chan bool, 10) + for i := 0; i < runtime.NumCPU(); i++ { + go func() { + for { + kvEle1 := <-parallelKvCheckReqCh + parallelKvCheckResCh <- hasKvConflict(kvEle1.slotDB, kvEle1.kvUnit.addr, + kvEle1.kvUnit.key, kvEle1.kvUnit.val, kvEle1.isStage2) + } + }() + } +} + +// NewSlotDB creates a new State DB based on the provided StateDB. +// With parallel, each execution slot would have its own StateDB. +func NewSlotDB(db *StateDB, systemAddr common.Address, txIndex int, baseTxIndex int, keepSystem bool, + unconfirmedDBs *sync.Map /*map[int]*ParallelStateDB*/) *ParallelStateDB { + slotDB := db.CopyForSlot() + slotDB.txIndex = txIndex + slotDB.originalRoot = db.originalRoot + slotDB.parallel.baseStateDB = db + slotDB.parallel.baseTxIndex = baseTxIndex + slotDB.parallel.systemAddress = systemAddr + slotDB.parallel.systemAddressOpsCount = 0 + slotDB.parallel.keepSystemAddressBalance = keepSystem + slotDB.storagePool = NewStoragePool() + slotDB.EnableWriteOnSharedStorage() + slotDB.parallel.unconfirmedDBs = unconfirmedDBs + + // All transactions will pay gas fee to the systemAddr at the end, this address is + // deemed to conflict, we handle it specially, clear it now and set it back to the main + // StateDB later; + // But there are transactions that will try to read systemAddr's balance, such as: + // https://bscscan.com/tx/0xcd69755be1d2f55af259441ff5ee2f312830b8539899e82488a21e85bc121a2a. + // It will trigger transaction redo and keepSystem will be marked as true. + if !keepSystem { + slotDB.SetBalance(systemAddr, big.NewInt(0)) + } + + return slotDB +} + +// RevertSlotDB keep the Read list for conflict detect, +// discard all state changes except: +// - nonce and balance of from address +// - balance of system address: will be used on merge to update SystemAddress's balance +func (s *ParallelStateDB) RevertSlotDB(from common.Address) { + s.parallel.kvChangesInSlot = make(map[common.Address]StateKeys) + + s.parallel.nonceChangesInSlot = make(map[common.Address]struct{}) + s.parallel.balanceChangesInSlot = make(map[common.Address]struct{}, 1) + s.parallel.addrStateChangesInSlot = make(map[common.Address]bool) // 0: created, 1: deleted + + selfStateObject := s.parallel.dirtiedStateObjectsInSlot[from] + systemAddress := s.parallel.systemAddress + systemStateObject := s.parallel.dirtiedStateObjectsInSlot[systemAddress] + s.parallel.dirtiedStateObjectsInSlot = make(map[common.Address]*StateObject, 2) + // keep these elements + s.parallel.dirtiedStateObjectsInSlot[from] = selfStateObject + s.parallel.dirtiedStateObjectsInSlot[systemAddress] = systemStateObject + s.parallel.balanceChangesInSlot[from] = struct{}{} + s.parallel.balanceChangesInSlot[systemAddress] = struct{}{} + s.parallel.nonceChangesInSlot[from] = struct{}{} +} + +func (s *ParallelStateDB) getBaseStateDB() *StateDB { + return &s.StateDB +} + +func (s *ParallelStateDB) SetSlotIndex(index int) { + s.parallel.SlotIndex = index +} + +// for parallel execution mode, try to get dirty StateObject in slot first. +// it is mainly used by journal revert right now. +func (s *ParallelStateDB) getStateObject(addr common.Address) *StateObject { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj + } + // can not call s.StateDB.getStateObject(), since `newObject` need ParallelStateDB as the interface + return s.getStateObjectNoSlot(addr) +} + +func (s *ParallelStateDB) storeStateObj(addr common.Address, stateObject *StateObject) { + // When a state object is stored into s.parallel.stateObjects, + // it belongs to base StateDB, it is confirmed and valid. + stateObject.db = s.parallel.baseStateDB + stateObject.dbItf = s.parallel.baseStateDB + // the object could be created in SlotDB, if it got the object from DB and + // update it to the shared `s.parallel.stateObjects`` + stateObject.db.storeParallelLock.Lock() + if _, ok := s.parallel.stateObjects.Load(addr); !ok { + s.parallel.stateObjects.Store(addr, stateObject) + } + stateObject.db.storeParallelLock.Unlock() +} + +func (s *ParallelStateDB) getStateObjectNoSlot(addr common.Address) *StateObject { + if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted { + return obj + } + return nil +} + +// createObject creates a new state object. If there is an existing account with +// the given address, it is overwritten and returned as the second return value. + +// prev is used for CreateAccount to get its balance +// Parallel mode: +// if prev in dirty: revert is ok +// if prev in unconfirmed DB: addr state read record, revert should not put it back +// if prev in main DB: addr state read record, revert should not put it back +// if pre no exist: addr state read record, + +// `prev` is used to handle revert, to recover with the `prev` object +// In Parallel mode, we only need to recover to `prev` in SlotDB, +// a.if it is not in SlotDB, `revert` will remove it from the SlotDB +// b.if it is existed in SlotDB, `revert` will recover to the `prev` in SlotDB +// c.as `snapDestructs` it is the same +func (s *ParallelStateDB) createObject(addr common.Address) (newobj *StateObject) { + // do not get from unconfirmed DB, since it will have problem on revert + prev := s.parallel.dirtiedStateObjectsInSlot[addr] + + var prevdestruct bool + + if s.snap != nil && prev != nil { + s.snapParallelLock.Lock() + _, prevdestruct = s.snapDestructs[prev.address] + s.parallel.addrSnapDestructsReadsInSlot[addr] = prevdestruct + if !prevdestruct { + // To destroy the previous trie node first and update the trie tree + // with the new object on block commit. + s.snapDestructs[prev.address] = struct{}{} + } + s.snapParallelLock.Unlock() + + } + newobj = newObject(s, s.isParallel, addr, types.StateAccount{}) + newobj.setNonce(0) // sets the object to dirty + if prev == nil { + s.journal.append(createObjectChange{account: &addr}) + } else { + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) + } + + s.parallel.addrStateChangesInSlot[addr] = true // the object sis created + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // notice: all the KVs are cleared if any + s.parallel.kvChangesInSlot[addr] = make(StateKeys) + return newobj +} + +// getDeletedStateObject is similar to getStateObject, but instead of returning +// nil for a deleted state object, it returns the actual object with the deleted +// flag set. This is needed by the state journal to revert to the correct s- +// destructed object instead of wiping all knowledge about the state object. +func (s *ParallelStateDB) getDeletedStateObject(addr common.Address) *StateObject { + // Prefer live objects if any is available + if obj, _ := s.getStateObjectFromStateObjects(addr); obj != nil { + return obj + } + data, ok := s.getStateObjectFromSnapshotOrTrie(addr) + if !ok { + return nil + } + // this is why we have to use a separate getDeletedStateObject for ParallelStateDB + // `s` has to be the ParallelStateDB + obj := newObject(s, s.isParallel, addr, *data) + s.storeStateObj(addr, obj) + return obj +} + +// GetOrNewStateObject retrieves a state object or create a new state object if nil. +// dirtyInSlot -> Unconfirmed DB -> main DB -> snapshot, no? create one +func (s *ParallelStateDB) GetOrNewStateObject(addr common.Address) *StateObject { + var stateObject *StateObject = nil + if stateObject, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return stateObject + } + + stateObject, _ = s.getStateObjectFromUnconfirmedDB(addr) + if stateObject == nil { + stateObject = s.getStateObjectNoSlot(addr) // try to get from base db + } + + exist := true + if stateObject == nil || stateObject.deleted || stateObject.suicided { + stateObject = s.createObject(addr) + exist = false + } + + s.parallel.addrStateReadsInSlot[addr] = exist // true: exist, false: not exist + return stateObject +} + +// Exist reports whether the given account address exists in the state. +// Notably this also returns true for suicided accounts. +func (s *ParallelStateDB) Exist(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object should not be deleted, since deleted is only flagged on finalise + // and if it is suicided in contract call, suicide is taken as exist until it is finalised + // todo: add a check here, to be removed later + if obj.deleted || obj.suicided { + log.Error("Exist in dirty, but marked as deleted or suicided", + "txIndex", s.txIndex, "baseTxIndex:", s.parallel.baseTxIndex) + } + return true + } + // 2.Try to get from unconfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + return exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist + } + + // 3.Try to get from main StateDB + exist := s.getStateObjectNoSlot(addr) != nil + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return exist +} + +// Empty returns whether the state object is either non-existent +// or empty according to the EIP161 specification (balance = nonce = code = 0) +func (s *ParallelStateDB) Empty(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // dirty object is light copied and fixup on need, + // empty could be wrong, except it is created with this TX + if _, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + return obj.empty() + } + // so we have to check it manually + // empty means: Nonce == 0 && Balance == 0 && CodeHash == emptyCodeHash + if s.GetBalance(addr).Sign() != 0 { // check balance first, since it is most likely not zero + return false + } + if s.GetNonce(addr) != 0 { + return false + } + codeHash := s.GetCodeHash(addr) + return bytes.Equal(codeHash.Bytes(), emptyCodeHash) // code is empty, the object is empty + } + // 2.Try to get from unconfirmed & main DB + // 2.1 Already read before + if exist, ok := s.parallel.addrStateReadsInSlot[addr]; ok { + // exist means not empty + return !exist + } + // 2.2 Try to get from unconfirmed DB if exist + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + s.parallel.addrStateReadsInSlot[addr] = exist // update and cache + return !exist + } + + so := s.getStateObjectNoSlot(addr) + empty := so == nil || so.empty() + s.parallel.addrStateReadsInSlot[addr] = !empty // update and cache + return empty +} + +// GetBalance retrieves the balance from the given address or 0 if object not found +// GetFrom the dirty list => from unconfirmed DB => get from main stateDB +func (s *ParallelStateDB) GetBalance(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + // 1.Try to get from dirty + if _, ok := s.parallel.balanceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on balance fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return obj.Balance() + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if balance, ok := s.parallel.balanceReadsInSlot[addr]; ok { + return balance + } + // 2.2 Try to get from unconfirmed DB if exist + if balance := s.getBalanceFromUnconfirmedDB(addr); balance != nil { + s.parallel.balanceReadsInSlot[addr] = balance + return balance + } + + // 3. Try to get from main StateObject + balance := common.Big0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + balance = stateObject.Balance() + } + s.parallel.balanceReadsInSlot[addr] = balance + return balance +} + +// GetBalanceOpCode different from GetBalance(), it is opcode triggered +func (s *ParallelStateDB) GetBalanceOpCode(addr common.Address) *big.Int { + if addr == WBNBAddress { + s.wbnbMakeUp = false + } + return s.GetBalance(addr) +} + +func (s *ParallelStateDB) GetNonce(addr common.Address) uint64 { + // 1.Try to get from dirty + if _, ok := s.parallel.nonceChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on nonce fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup nonce based on unconfirmed DB or main DB + return obj.Nonce() + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if nonce, ok := s.parallel.nonceReadsInSlot[addr]; ok { + return nonce + } + // 2.2 Try to get from unconfirmed DB if exist + if nonce, ok := s.getNonceFromUnconfirmedDB(addr); ok { + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce + } + + // 3.Try to get from main StateDB + var nonce uint64 = 0 + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + nonce = stateObject.Nonce() + } + s.parallel.nonceReadsInSlot[addr] = nonce + return nonce +} + +func (s *ParallelStateDB) GetCode(addr common.Address) []byte { + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + code := obj.Code(s.db) + return code + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return code + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return code + } + + // 3. Try to get from main StateObject + stateObject := s.getStateObjectNoSlot(addr) + var code []byte + if stateObject != nil { + code = stateObject.Code(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return code +} + +func (s *ParallelStateDB) GetCodeSize(addr common.Address) int { + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup code based on unconfirmed DB or main DB + return obj.CodeSize(s.db) + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if code, ok := s.parallel.codeReadsInSlot[addr]; ok { + return len(code) // len(nil) is 0 too + } + // 2.2 Try to get from unconfirmed DB if exist + if code, ok := s.getCodeFromUnconfirmedDB(addr); ok { + s.parallel.codeReadsInSlot[addr] = code + return len(code) // len(nil) is 0 too + } + + // 3. Try to get from main StateObject + var codeSize = 0 + var code []byte + stateObject := s.getStateObjectNoSlot(addr) + + if stateObject != nil { + code = stateObject.Code(s.db) + codeSize = stateObject.CodeSize(s.db) + } + s.parallel.codeReadsInSlot[addr] = code + return codeSize +} + +// GetCodeHash return: +// - common.Hash{}: the address does not exist +// - emptyCodeHash: the address exist, but code is empty +// - others: the address exist, and code is not empty +func (s *ParallelStateDB) GetCodeHash(addr common.Address) common.Hash { + // 1.Try to get from dirty + if _, ok := s.parallel.codeChangesInSlot[addr]; ok { + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + // on code fixup, addr may not exist in dirtiedStateObjectsInSlot + // we intend to fixup balance based on unconfirmed DB or main DB + return common.BytesToHash(obj.CodeHash()) + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if codeHash, ok := s.parallel.codeHashReadsInSlot[addr]; ok { + return codeHash + } + // 2.2 Try to get from unconfirmed DB if exist + if codeHash, ok := s.getCodeHashFromUnconfirmedDB(addr); ok { + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash + } + // 3. Try to get from main StateObject + stateObject := s.getStateObjectNoSlot(addr) + codeHash := common.Hash{} + if stateObject != nil { + codeHash = common.BytesToHash(stateObject.CodeHash()) + } + s.parallel.codeHashReadsInSlot[addr] = codeHash + return codeHash +} + +// GetState retrieves a value from the given account's storage trie. +// For parallel mode wih, get from the state in order: +// -> self dirty, both Slot & MainProcessor +// -> pending of self: Slot on merge +// -> pending of unconfirmed DB +// -> pending of main StateDB +// -> origin +func (s *ParallelStateDB) GetState(addr common.Address, hash common.Hash) common.Hash { + // 1.Try to get from dirty + if exist, ok := s.parallel.addrStateChangesInSlot[addr]; ok { + if !exist { + // it could be suicided within this SlotDB? + // it should be able to get state from suicided address within a Tx: + // e.g. within a transaction: call addr:suicide -> get state: should be ok + // return common.Hash{} + log.Info("ParallelStateDB GetState suicided", "addr", addr, "hash", hash) + } else { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + if keys, ok := s.parallel.kvChangesInSlot[addr]; ok { + if _, ok := keys[hash]; ok { + obj := s.parallel.dirtiedStateObjectsInSlot[addr] // addr must exist in dirtiedStateObjectsInSlot + return obj.GetState(s.db, hash) + } + } + // 2.Try to get from unconfirmed DB or main DB + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3.Get from main StateDB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +// GetCommittedState retrieves a value from the given account's committed storage trie. +func (s *ParallelStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { + // 1.No need to get from pending of itself even on merge, since stateobject in SlotDB won't do finalise + // 2.Try to get from unconfirmed DB or main DB + // KVs in unconfirmed DB can be seen as pending storage + // KVs in main DB are merged from SlotDB and has done finalise() on merge, can be seen as pending storage too. + // 2.1 Already read before + if storage, ok := s.parallel.kvReadsInSlot[addr]; ok { + if val, ok := storage.GetValue(hash); ok { + return val + } + } + // 2.2 Try to get from unconfirmed DB if exist + if val, ok := s.getKVFromUnconfirmedDB(addr, hash); ok { + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val + } + + // 3. Try to get from main DB + stateObject := s.getStateObjectNoSlot(addr) + val := common.Hash{} + if stateObject != nil { + val = stateObject.GetCommittedState(s.db, hash) + } + if s.parallel.kvReadsInSlot[addr] == nil { + s.parallel.kvReadsInSlot[addr] = newStorage(false) + } + s.parallel.kvReadsInSlot[addr].StoreValue(hash, val) // update cache + return val +} + +func (s *ParallelStateDB) HasSuicided(addr common.Address) bool { + // 1.Try to get from dirty + if obj, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj.suicided + } + // 2.Try to get from unconfirmed + if exist, ok := s.getAddrStateFromUnconfirmedDB(addr); ok { + return !exist + } + + stateObject := s.getStateObjectNoSlot(addr) + if stateObject != nil { + return stateObject.suicided + } + return false +} + +// AddBalance adds amount to the account associated with addr. +func (s *ParallelStateDB) AddBalance(addr common.Address, amount *big.Int) { + // add balance will perform a read operation first + // if amount == 0, no balance change, but there is still an empty check. + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) // it will record the balance read operation + newStateObject.setBalance(balance) + newStateObject.AddBalance(amount) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.balanceChangesInSlot[addr] = struct{}{} + return + } + // already dirty, make sure the balance is fixed up since it could be previously dirtied by nonce or KV... + if addr != s.parallel.systemAddress { + balance := s.GetBalance(addr) + if stateObject.Balance().Cmp(balance) != 0 { + log.Warn("AddBalance in dirty, but balance has not do fixup", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", balance) + stateObject.setBalance(balance) + } + } + + stateObject.AddBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +// SubBalance subtracts amount from the account associated with addr. +func (s *ParallelStateDB) SubBalance(addr common.Address, amount *big.Int) { + // unlike add, sub 0 balance will not touch empty object + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) // light copy from main DB + // do balance fixup from the confirmed DB, it could be more reliable than main DB + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // already dirty, make sure the balance is fixed up since it could be previously dirtied by nonce or KV... if addr != s.parallel.systemAddress { + if addr != s.parallel.systemAddress { + balance := s.GetBalance(addr) + if stateObject.Balance().Cmp(balance) != 0 { + log.Warn("SubBalance in dirty, but balance is incorrect", "txIndex", s.txIndex, "addr", addr, + "stateObject.Balance()", stateObject.Balance(), "s.GetBalance(addr)", balance) + stateObject.setBalance(balance) + } + } + + stateObject.SubBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetBalance(addr common.Address, amount *big.Int) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if addr == s.parallel.systemAddress { + s.parallel.systemAddressOpsCount++ + } + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + // update balance for revert, in case child contract is reverted, + // it should revert to the previous balance + balance := s.GetBalance(addr) + newStateObject.setBalance(balance) + newStateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // do balance fixup + if addr != s.parallel.systemAddress { + balance := s.GetBalance(addr) + stateObject.setBalance(balance) + } + stateObject.SetBalance(amount) + s.parallel.balanceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetNonce(addr common.Address, nonce uint64) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + noncePre := s.GetNonce(addr) + newStateObject.setNonce(noncePre) // nonce fixup + newStateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + noncePre := s.GetNonce(addr) + stateObject.setNonce(noncePre) // nonce fixup + + stateObject.SetNonce(nonce) + s.parallel.nonceChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetCode(addr common.Address, code []byte) { + stateObject := s.GetOrNewStateObject(addr) + if stateObject != nil { + codeHash := crypto.Keccak256Hash(code) + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + newStateObject.setCode(codeHashPre, codePre) + + newStateObject.SetCode(codeHash, code) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.codeChangesInSlot[addr] = struct{}{} + return + } + codePre := s.GetCode(addr) // code fixup + codeHashPre := crypto.Keccak256Hash(codePre) + stateObject.setCode(codeHashPre, codePre) + + stateObject.SetCode(codeHash, code) + s.parallel.codeChangesInSlot[addr] = struct{}{} + } +} + +func (s *ParallelStateDB) SetState(addr common.Address, key, value common.Hash) { + stateObject := s.GetOrNewStateObject(addr) // attention: if StateObject's lightCopy, its storage is only a part of the full storage, + if stateObject != nil { + if s.parallel.baseTxIndex+1 == s.txIndex { + // we check if state is unchanged + // only when current transaction is the next transaction to be committed + // fixme: there is a bug, block: 14,962,284, + // stateObject is in dirty (light copy), but the key is in mainStateDB + // stateObject dirty -> committed, will skip mainStateDB dirty + if s.GetState(addr, key) == value { + log.Debug("Skip set same state", "baseTxIndex", s.parallel.baseTxIndex, + "txIndex", s.txIndex, "addr", addr, + "key", key, "value", value) + return + } + } + + if s.parallel.kvChangesInSlot[addr] == nil { + s.parallel.kvChangesInSlot[addr] = make(StateKeys) // make(Storage, defaultNumOfSlots) + } + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + newStateObject := stateObject.lightCopy(s) + newStateObject.SetState(s.db, key, value) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + return + } + // do State Update + stateObject.SetState(s.db, key, value) + } +} + +// Suicide marks the given account as suicided. +// This clears the account balance. +// +// The account's state object is still available until the state is committed, +// getStateObject will return a non-nil account after Suicide. +func (s *ParallelStateDB) Suicide(addr common.Address) bool { + var stateObject *StateObject + // 1.Try to get from dirty, it could be suicided inside of contract call + stateObject = s.parallel.dirtiedStateObjectsInSlot[addr] + if stateObject == nil { + // 2.Try to get from unconfirmed, if deleted return false, since the address does not exist + if obj, ok := s.getStateObjectFromUnconfirmedDB(addr); ok { + stateObject = obj + s.parallel.addrStateReadsInSlot[addr] = !stateObject.deleted // true: exist, false: deleted + if stateObject.deleted { + log.Error("Suicide addr already deleted in confirmed DB", "txIndex", s.txIndex, "addr", addr) + return false + } + } + } + + if stateObject == nil { + // 3.Try to get from main StateDB + stateObject = s.getStateObjectNoSlot(addr) + if stateObject == nil { + s.parallel.addrStateReadsInSlot[addr] = false // true: exist, false: deleted + log.Error("Suicide addr not exist", "txIndex", s.txIndex, "addr", addr) + return false + } + s.parallel.addrStateReadsInSlot[addr] = true // true: exist, false: deleted + } + + s.journal.append(suicideChange{ + account: &addr, + prev: stateObject.suicided, // todo: must be false? + prevbalance: new(big.Int).Set(s.GetBalance(addr)), + }) + + if _, ok := s.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // do copy-on-write for suicide "write" + newStateObject := stateObject.lightCopy(s) + newStateObject.markSuicided() + newStateObject.data.Balance = new(big.Int) + s.parallel.dirtiedStateObjectsInSlot[addr] = newStateObject + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist any more, + // s.parallel.nonceChangesInSlot[addr] = struct{}{} + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + // s.parallel.kvChangesInSlot[addr] = make(StateKeys) // all key changes are discarded + return true + } + s.parallel.addrStateChangesInSlot[addr] = false // false: the address does not exist anymore + s.parallel.balanceChangesInSlot[addr] = struct{}{} + s.parallel.codeChangesInSlot[addr] = struct{}{} + + stateObject.markSuicided() + stateObject.data.Balance = new(big.Int) + return true +} + +// CreateAccount explicitly creates a state object. If a state object with the address +// already exists the balance is carried over to the new account. +// +// CreateAccount is called during the EVM CREATE operation. The situation might arise that +// a contract does the following: +// +// 1. sends funds to sha(account ++ (nonce + 1)) +// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1) +// +// Carrying over the balance ensures that Ether doesn't disappear. +func (s *ParallelStateDB) CreateAccount(addr common.Address) { + // no matter it is got from dirty, unconfirmed or main DB + // if addr not exist, preBalance will be common.Big0, it is same as new(big.Int) which + // is the value newObject(), + preBalance := s.GetBalance(addr) // parallel balance read will be recorded inside GetBalance + newObj := s.createObject(addr) + newObj.setBalance(new(big.Int).Set(preBalance)) // new big.Int for newObj +} + +// RevertToSnapshot reverts all state changes made since the given revision. +func (s *ParallelStateDB) RevertToSnapshot(revid int) { + // Find the snapshot in the stack of valid snapshots. + idx := sort.Search(len(s.validRevisions), func(i int) bool { + return s.validRevisions[i].id >= revid + }) + if idx == len(s.validRevisions) || s.validRevisions[idx].id != revid { + panic(fmt.Errorf("revision id %v cannot be reverted", revid)) + } + snapshot := s.validRevisions[idx].journalIndex + + // Replay the journal to undo changes and remove invalidated snapshots + s.journal.revert(s, snapshot) + s.validRevisions = s.validRevisions[:idx] +} + +// AddRefund adds gas to the refund counter +// journal.append will use ParallelState for revert +func (s *ParallelStateDB) AddRefund(gas uint64) { // todo: not needed, can be deleted + s.journal.append(refundChange{prev: s.refund}) + s.refund += gas +} + +// SubRefund removes gas from the refund counter. +// This method will panic if the refund counter goes below zero +func (s *ParallelStateDB) SubRefund(gas uint64) { + s.journal.append(refundChange{prev: s.refund}) + if gas > s.refund { + // we don't need to panic here if we read the wrong state in parallel mode + // we just need to redo this transaction + log.Info(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund), "tx", s.thash.String()) + s.parallel.needsRedo = true + return + } + s.refund -= gas +} + +// For Parallel Execution Mode, it can be seen as Penetrated Access: +// ------------------------------------------------------- +// | BaseTxIndex | Unconfirmed Txs... | Current TxIndex | +// ------------------------------------------------------- +// Access from the unconfirmed DB with range&priority: txIndex -1(previous tx) -> baseTxIndex + 1 +func (s *ParallelStateDB) getBalanceFromUnconfirmedDB(addr common.Address) *big.Int { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil + } + + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + // 1.Refer the state of address, exist or not in dirtiedStateObjectsInSlot + balanceHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + balanceHit = true + } + if _, exist := db.parallel.balanceChangesInSlot[addr]; exist { // only changed balance is reliable + balanceHit = true + } + if !balanceHit { + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + balance := obj.Balance() + if obj.deleted { + balance = common.Big0 + } + return balance + + } + return nil +} + +// Similar to getBalanceFromUnconfirmedDB +func (s *ParallelStateDB) getNonceFromUnconfirmedDB(addr common.Address) (uint64, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return 0, false + } + + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + + nonceHit := false + if _, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + nonceHit = true + } else if _, ok := db.parallel.nonceChangesInSlot[addr]; ok { + nonceHit = true + } + if !nonceHit { + // nonce refer not hit, try next unconfirmedDb + continue + } + // nonce hit, return the nonce + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get nonce from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + nonce := obj.Nonce() + // deleted object with nonce == 0 + if obj.deleted { + nonce = 0 + } + return nonce, true + } + return 0, false +} + +// Similar to getBalanceFromUnconfirmedDB +// It is not only for code, but also codeHash and codeSize, we return the *StateObject for convenience. +func (s *ParallelStateDB) getCodeFromUnconfirmedDB(addr common.Address) ([]byte, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return nil, false + } + + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + + codeHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + codeHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + codeHit = true + } + if !codeHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get code from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + code := obj.Code(s.db) + if obj.deleted { + code = nil + } + return code, true + + } + return nil, false +} + +// Similar to getCodeFromUnconfirmedDB +// but differ when address is deleted or not exist +func (s *ParallelStateDB) getCodeHashFromUnconfirmedDB(addr common.Address) (common.Hash, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return common.Hash{}, false + } + + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + hashHit := false + if _, exist := db.parallel.addrStateChangesInSlot[addr]; exist { + hashHit = true + } + if _, exist := db.parallel.codeChangesInSlot[addr]; exist { + hashHit = true + } + if !hashHit { + // try next unconfirmedDb + continue + } + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if obj == nil { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get codeHash from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + codeHash := common.Hash{} + if !obj.deleted { + codeHash = common.BytesToHash(obj.CodeHash()) + } + return codeHash, true + } + return common.Hash{}, false +} + +// Similar to getCodeFromUnconfirmedDB +// It is for address state check of: Exist(), Empty() and HasSuicided() +// Since the unconfirmed DB should have done Finalise() with `deleteEmptyObjects = true` +// If the dirty address is empty or suicided, it will be marked as deleted, so we only need to return `deleted` or not. +func (s *ParallelStateDB) getAddrStateFromUnconfirmedDB(addr common.Address) (bool, bool) { + if addr == s.parallel.systemAddress { + // never get systemaddress from unconfirmed DB + return false, false + } + + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + if exist, ok := db.parallel.addrStateChangesInSlot[addr]; ok { + if _, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + // could not exist, if it is changed but reverted + // fixme: revert should remove the change record + log.Debug("Get addr State from UnconfirmedDB, changed but object not exist, ", + "txIndex", s.txIndex, "referred txIndex", i, "addr", addr) + continue + } + + return exist, true + } + } + return false, false +} + +func (s *ParallelStateDB) getKVFromUnconfirmedDB(addr common.Address, key common.Hash) (common.Hash, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + if _, ok := db.parallel.kvChangesInSlot[addr]; ok { + obj := db.parallel.dirtiedStateObjectsInSlot[addr] + if val, exist := obj.dirtyStorage.GetValue(key); exist { + return val, true + } + } + } + return common.Hash{}, false +} + +func (s *ParallelStateDB) getStateObjectFromUnconfirmedDB(addr common.Address) (*StateObject, bool) { + // check the unconfirmed DB with range: baseTxIndex -> txIndex -1(previous tx) + for i := s.txIndex - 1; i > s.parallel.baseStateDB.txIndex; i-- { + db_, ok := s.parallel.unconfirmedDBs.Load(i) + if !ok { + continue + } + db := db_.(*ParallelStateDB) + if obj, ok := db.parallel.dirtiedStateObjectsInSlot[addr]; ok { + return obj, true + } + } + return nil, false +} + +// IsParallelReadsValid If stage2 is true, it is a likely conflict check, +// to detect these potential conflict results in advance and schedule redo ASAP. +func (s *ParallelStateDB) IsParallelReadsValid(isStage2 bool) bool { + parallelKvOnce.Do(func() { + StartKvCheckLoop() + }) + slotDB := s + mainDB := slotDB.parallel.baseStateDB + // for nonce + for addr, nonceSlot := range slotDB.parallel.nonceReadsInSlot { + if isStage2 { // update slotDB's unconfirmed DB list and try + if nonceUnconfirm, ok := slotDB.getNonceFromUnconfirmedDB(addr); ok { + if nonceSlot != nonceUnconfirm { + log.Debug("IsSlotDBReadsValid nonce read is invalid in unconfirmed", "addr", addr, + "nonceSlot", nonceSlot, "nonceUnconfirm", nonceUnconfirm, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + nonceMain := mainDB.GetNonce(addr) + if nonceSlot != nonceMain { + log.Debug("IsSlotDBReadsValid nonce read is invalid", "addr", addr, + "nonceSlot", nonceSlot, "nonceMain", nonceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // balance + for addr, balanceSlot := range slotDB.parallel.balanceReadsInSlot { + if isStage2 { // update slotDB's unconfirmed DB list and try + if balanceUnconfirm := slotDB.getBalanceFromUnconfirmedDB(addr); balanceUnconfirm != nil { + if balanceSlot.Cmp(balanceUnconfirm) == 0 { + continue + } + if addr == WBNBAddress && slotDB.WBNBMakeUp() { + log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + continue // stage2 will skip WBNB check, no balance makeup + } + return false + } + } + + if addr != slotDB.parallel.systemAddress { // skip balance check for system address + balanceMain := mainDB.GetBalance(addr) + if balanceSlot.Cmp(balanceMain) != 0 { + if addr == WBNBAddress && slotDB.WBNBMakeUp() { // WBNB balance make up + if isStage2 { + log.Debug("IsSlotDBReadsValid skip makeup for WBNB in stage 2", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + continue // stage2 will skip WBNB check, no balance makeup + } + if _, ok := s.parallel.balanceChangesInSlot[addr]; !ok { + // balance unchanged, no need to make up + log.Debug("IsSlotDBReadsValid WBNB balance no makeup since it is not changed ", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "updated WBNB balance", slotDB.GetBalance(addr)) + continue + } + balanceDelta := new(big.Int).Sub(balanceMain, balanceSlot) + slotDB.wbnbMakeUpBalance = new(big.Int).Add(slotDB.GetBalance(addr), balanceDelta) + /* + if _, exist := slotDB.stateObjectsPending[addr]; !exist { + slotDB.stateObjectsPending[addr] = struct{}{} + } + if _, exist := slotDB.stateObjectsDirty[addr]; !exist { + // only read, but never change WBNB's balance or state + // log.Warn("IsSlotDBReadsValid balance makeup for WBNB, but it is not in dirty", + // "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex) + slotDB.stateObjectsDirty[addr] = struct{}{} + } + */ + log.Debug("IsSlotDBReadsValid balance makeup for WBNB", + "SlotIndex", slotDB.parallel.SlotIndex, "txIndex", slotDB.txIndex, + "updated WBNB balance", slotDB.GetBalance(addr)) + continue + } + + log.Debug("IsSlotDBReadsValid balance read is invalid", "addr", addr, + "balanceSlot", balanceSlot, "balanceMain", balanceMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + // check KV + var units []ParallelKvCheckUnit // todo: pre-allocate to make it faster + for addr, read := range slotDB.parallel.kvReadsInSlot { + read.Range(func(keySlot, valSlot interface{}) bool { + units = append(units, ParallelKvCheckUnit{addr, keySlot.(common.Hash), valSlot.(common.Hash)}) + return true + }) + } + readLen := len(units) + if readLen < 8 || isStage2 { + for _, unit := range units { + if hasKvConflict(slotDB, unit.addr, unit.key, unit.val, isStage2) { + return false + } + } + } else { + msgHandledNum := 0 + msgSendNum := 0 + for _, unit := range units { + for { // make sure the unit is consumed + consumed := false + select { + case conflict := <-parallelKvCheckResCh: + msgHandledNum++ + if conflict { + // make sure all request are handled or discarded + for { + if msgHandledNum == msgSendNum { + break + } + select { + case <-parallelKvCheckReqCh: + msgHandledNum++ + case <-parallelKvCheckResCh: + msgHandledNum++ + } + } + return false + } + case parallelKvCheckReqCh <- ParallelKvCheckMessage{slotDB, isStage2, unit}: + msgSendNum++ + consumed = true + } + if consumed { + break + } + } + } + for { + if msgHandledNum == readLen { + break + } + conflict := <-parallelKvCheckResCh + msgHandledNum++ + if conflict { + // make sure all request are handled or discarded + for { + if msgHandledNum == msgSendNum { + break + } + select { + case <-parallelKvCheckReqCh: + msgHandledNum++ + case <-parallelKvCheckResCh: + msgHandledNum++ + } + } + return false + } + } + } + if isStage2 { // stage2 skip check code, or state, since they are likely unchanged. + return true + } + + // check code + for addr, codeSlot := range slotDB.parallel.codeReadsInSlot { + codeMain := mainDB.GetCode(addr) + if !bytes.Equal(codeSlot, codeMain) { + log.Debug("IsSlotDBReadsValid code read is invalid", "addr", addr, + "len codeSlot", len(codeSlot), "len codeMain", len(codeMain), "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // check codeHash + for addr, codeHashSlot := range slotDB.parallel.codeHashReadsInSlot { + codeHashMain := mainDB.GetCodeHash(addr) + if !bytes.Equal(codeHashSlot.Bytes(), codeHashMain.Bytes()) { + log.Debug("IsSlotDBReadsValid codehash read is invalid", "addr", addr, + "codeHashSlot", codeHashSlot, "codeHashMain", codeHashMain, "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + // addr state check + for addr, stateSlot := range slotDB.parallel.addrStateReadsInSlot { + stateMain := false // addr not exist + if mainDB.getStateObject(addr) != nil { + stateMain = true // addr exist in main DB + } + if stateSlot != stateMain { + // skip addr state check for system address + if addr != slotDB.parallel.systemAddress { + log.Debug("IsSlotDBReadsValid addrState read invalid(true: exist, false: not exist)", + "addr", addr, "stateSlot", stateSlot, "stateMain", stateMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + } + // snapshot destructs check + for addr, destructRead := range slotDB.parallel.addrSnapDestructsReadsInSlot { + mainObj := mainDB.getStateObject(addr) + if mainObj == nil { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid, address should exist", + "addr", addr, "destruct", destructRead, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + slotDB.snapParallelLock.RLock() // fixme: this lock is not needed + _, destructMain := mainDB.snapDestructs[addr] // addr not exist + slotDB.snapParallelLock.RUnlock() + if destructRead != destructMain { + log.Debug("IsSlotDBReadsValid snapshot destructs read invalid", + "addr", addr, "destructRead", destructRead, "destructMain", destructMain, + "SlotIndex", slotDB.parallel.SlotIndex, + "txIndex", slotDB.txIndex, "baseTxIndex", slotDB.parallel.baseTxIndex) + return false + } + } + + return true +} + +// SystemAddressRedo +// For most of the transactions, systemAddressOpsCount should be 3: +// - one for SetBalance(0) on NewSlotDB() +// - the second is for AddBalance(GasFee) at the end, +// - the third is for GetBalance() which is triggered by AddBalance() +// (systemAddressOpsCount > 3) means the transaction tries to access systemAddress, in this case, the +// transaction needs the accurate systemAddress info, then it should redo and keep its balance on NewSlotDB() +// for example: +// https://bscscan.com/tx/0xe469f1f948de90e9508f96da59a96ed84b818e71432ca11c5176eb60eb66671b +func (s *ParallelStateDB) SystemAddressRedo() bool { + if s.parallel.systemAddressOpsCount > 4 { + log.Info("SystemAddressRedo", "SlotIndex", s.parallel.SlotIndex, + "txIndex", s.txIndex, + "systemAddressOpsCount", s.parallel.systemAddressOpsCount) + return true + } + return false +} + +// NeedsRedo returns true if there is any clear reason that we need to redo this transaction +func (s *ParallelStateDB) NeedsRedo() bool { + return s.parallel.needsRedo +} + +// WBNBMakeUp +// WBNB makeup is allowed only when its balance is accessed through contract Call. +// If it is accessed not through contract all, e.g., by `address.balance`, `address.transfer(amount)`, +// we can not do balance make up. +func (s *ParallelStateDB) WBNBMakeUp() bool { + return s.wbnbMakeUp +} + +func (s *ParallelStateDB) ParallelMakeUp(addr common.Address, input []byte) { + if addr == WBNBAddress { + if len(input) < 4 { + // should never less than 4 + // log.Warn("ParallelMakeUp for WBNB input size invalid", "input size", len(input), "input", input) + s.wbnbMakeUp = false + return + } + // EVM use big-endian mode, so as the MethodID + wbnbDeposit := []byte{0xd0, 0xe3, 0x0d, 0xb0} // "0xd0e30db0": Keccak-256("deposit()") + wbnbWithdraw := []byte{0x2e, 0x1a, 0x7d, 0x4d} // "0x2e1a7d4d": Keccak-256("withdraw(uint256)") + wbnbApprove := []byte{0x09, 0x5e, 0xa7, 0xb3} // "0x095ea7b3": Keccak-256("approve(address,uint256)") + wbnbTransfer := []byte{0xa9, 0x05, 0x9c, 0xbb} // "0xa9059cbb": Keccak-256("transfer(address,uint256)") + wbnbTransferFrom := []byte{0x23, 0xb8, 0x72, 0xdd} // "0x23b872dd": Keccak-256("transferFrom(address,address,uint256)") + // wbnbTotalSupply := []byte{0x18, 0x16, 0x0d, 0xdd} // "0x18160ddd": Keccak-256("totalSupply()") + // unknown WBNB interface 1: {0xDD, 0x62,0xED, 0x3E} in block: 14,248,627 + // unknown WBNB interface 2: {0x70, 0xa0,0x82, 0x31} in block: 14,249,300 + + methodId := input[:4] + if bytes.Equal(methodId, wbnbDeposit) { + return + } + if bytes.Equal(methodId, wbnbWithdraw) { + return + } + if bytes.Equal(methodId, wbnbApprove) { + return + } + if bytes.Equal(methodId, wbnbTransfer) { + return + } + if bytes.Equal(methodId, wbnbTransferFrom) { + return + } + // if bytes.Equal(methodId, wbnbTotalSupply) { + // log.Debug("ParallelMakeUp for WBNB, not for totalSupply", "input size", len(input), "input", input) + // s.wbnbMakeUp = false // can not makeup + // return + // } + + // log.Warn("ParallelMakeUp for WBNB unknown method id", "input size", len(input), "input", input) + s.wbnbMakeUp = false + } + +} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 4b3a91cde6..1c097a3ae0 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -19,6 +19,7 @@ package state import ( "bytes" "encoding/binary" + "encoding/hex" "fmt" "math" "math/big" @@ -34,6 +35,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +var ( + systemAddress = common.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE") +) + // Tests that updating a state trie does not leak any database writes prior to // actually committing the state. func TestUpdateLeaks(t *testing.T) { @@ -932,3 +937,397 @@ func TestStateDBAccessList(t *testing.T) { t.Fatalf("expected empty, got %d", got) } } + +func TestSuicide(t *testing.T) { + // Create an initial state with a few accounts + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, false) + + addr := common.BytesToAddress([]byte("so")) + slotDb.SetBalance(addr, big.NewInt(1)) + + result := slotDb.Suicide(addr) + if !result { + t.Fatalf("expected account suicide, got %v", result) + } + + if _, ok := slotDb.parallel.stateObjectsSuicidedInSlot[addr]; !ok { + t.Fatalf("address should exist in stateObjectsSuicidedInSlot") + } + + if _, ok := slotDb.parallel.addrStateChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateChangesInSlot") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + hasSuicide := slotDb.HasSuicided(addr) + if !hasSuicide { + t.Fatalf("address should be suicided") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateReadsInSlot") + } +} + +func TestSetAndGetState(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, false) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + + slotDb.SetState(addr, common.BytesToHash([]byte("test key")), common.BytesToHash([]byte("test store"))) + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.stateChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in stateChangesInSlot") + } + + oldValueRead := state.GetState(addr, common.BytesToHash([]byte("test key"))) + emptyHash := common.Hash{} + if oldValueRead != emptyHash { + t.Fatalf("value read in old state should be empty") + } + + valueRead := slotDb.GetState(addr, common.BytesToHash([]byte("test key"))) + if valueRead != common.BytesToHash([]byte("test store")) { + t.Fatalf("value read should be equal to the stored value") + } + + if _, ok := slotDb.parallel.stateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in stateReadsInSlot") + } +} + +func TestSetAndGetCode(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, false) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; ok { + t.Fatalf("address should not exist in dirtiedStateObjectsInSlot") + } + + slotDb.SetCode(addr, []byte("test code")) + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.codeChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in codeChangesInSlot") + } + + codeRead := slotDb.GetCode(addr) + if string(codeRead) != "test code" { + t.Fatalf("code read should be equal to the code stored") + } + + if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in codeReadsInSlot") + } +} + +func TestGetCodeSize(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, false) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + + slotDb.SetCode(addr, []byte("test code")) + + codeSize := slotDb.GetCodeSize(addr) + if codeSize != 9 { + t.Fatalf("code size should be 9") + } + + if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in codeReadsInSlot") + } +} + +func TestGetCodeHash(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, false) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + + slotDb.SetCode(addr, []byte("test code")) + + codeSize := slotDb.GetCodeHash(addr) + print(hex.EncodeToString(codeSize[:])) + if hex.EncodeToString(codeSize[:]) != "6e73fa02f7828b28608b078b007a4023fb40453c3e102b83828a3609a94d8cbb" { + t.Fatalf("code hash should be 6e73fa02f7828b28608b078b007a4023fb40453c3e102b83828a3609a94d8cbb") + } + if _, ok := slotDb.parallel.codeReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in codeReadsInSlot") + } +} + +func TestSetNonce(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, false) + + addr := common.BytesToAddress([]byte("so")) + state.SetBalance(addr, big.NewInt(1)) + state.SetNonce(addr, 1) + + slotDb.SetNonce(addr, 2) + + oldNonce := state.GetNonce(addr) + if oldNonce != 1 { + t.Fatalf("old nonce should be 1") + } + + newNonce := slotDb.GetNonce(addr) + if newNonce != 2 { + t.Fatalf("new nonce should be 2") + } + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } +} + +func TestSetAndGetBalance(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, true) + + addr := systemAddress + state.SetBalance(addr, big.NewInt(1)) + + slotDb.SetBalance(addr, big.NewInt(2)) + + oldBalance := state.GetBalance(addr) + if oldBalance.Int64() != 1 { + t.Fatalf("old balance should be 1") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceChangesInSlot") + } + + if slotDb.parallel.systemAddressOpsCount != 1 { + t.Fatalf("systemAddressOpsCount should be 1") + } + + newBalance := slotDb.GetBalance(addr) + if newBalance.Int64() != 2 { + t.Fatalf("new nonce should be 2") + } + + if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceReadsInSlot") + } + + if slotDb.parallel.systemAddressOpsCount != 2 { + t.Fatalf("systemAddressOpsCount should be 1") + } +} + +func TestSubBalance(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, true) + + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + + slotDb.SubBalance(addr, big.NewInt(1)) + + oldBalance := state.GetBalance(addr) + if oldBalance.Int64() != 2 { + t.Fatalf("old balance should be 1") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceChangesInSlot") + } + + if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceReadsInSlot") + } + + if slotDb.parallel.systemAddressOpsCount != 1 { + t.Fatalf("systemAddressOpsCount should be 1") + } + + newBalance := slotDb.GetBalance(addr) + if newBalance.Int64() != 1 { + t.Fatalf("new nonce should be 2") + } +} + +func TestAddBalance(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, true) + + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + + slotDb.AddBalance(addr, big.NewInt(1)) + + oldBalance := state.GetBalance(addr) + if oldBalance.Int64() != 2 { + t.Fatalf("old balance should be 1") + } + + if _, ok := slotDb.parallel.dirtiedStateObjectsInSlot[addr]; !ok { + t.Fatalf("address should exist in dirtiedStateObjectsInSlot") + } + + if _, ok := slotDb.parallel.balanceChangesInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceChangesInSlot") + } + + if _, ok := slotDb.parallel.balanceReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in balanceReadsInSlot") + } + + if slotDb.parallel.systemAddressOpsCount != 1 { + t.Fatalf("systemAddressOpsCount should be 1") + } + + newBalance := slotDb.GetBalance(addr) + if newBalance.Int64() != 3 { + t.Fatalf("new nonce should be 2") + } +} + +func TestEmpty(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, true) + + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + + empty := slotDb.Empty(addr) + if empty { + t.Fatalf("address should exist") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateReadsInSlot") + } +} + +func TestExist(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + slotDb := NewSlotDB(state, systemAddress, 0, true) + + addr := systemAddress + state.SetBalance(addr, big.NewInt(2)) + + exist := slotDb.Exist(addr) + if !exist { + t.Fatalf("address should exist") + } + + if _, ok := slotDb.parallel.addrStateReadsInSlot[addr]; !ok { + t.Fatalf("address should exist in addrStateReadsInSlot") + } +} + +func TestMergeSlotDB(t *testing.T) { + memDb := rawdb.NewMemoryDatabase() + db := NewDatabase(memDb) + state, _ := New(common.Hash{}, db, nil) + state.PrepareForParallel() + + oldSlotDb := NewSlotDB(state, systemAddress, 0, true) + + newSlotDb := NewSlotDB(state, systemAddress, 0, true) + + addr := systemAddress + newSlotDb.SetBalance(addr, big.NewInt(2)) + newSlotDb.SetState(addr, common.BytesToHash([]byte("test key")), common.BytesToHash([]byte("test store"))) + newSlotDb.SetCode(addr, []byte("test code")) + newSlotDb.Suicide(addr) + + changeList := oldSlotDb.MergeSlotDB(newSlotDb, &types.Receipt{}, 0) + + if _, ok := changeList.StateObjectSuicided[addr]; !ok { + t.Fatalf("address should exist in StateObjectSuicided") + } + + if _, ok := changeList.StateObjectSuicided[addr]; !ok { + t.Fatalf("address should exist in StateObjectSuicided") + } + + if _, ok := changeList.StateChangeSet[addr]; !ok { + t.Fatalf("address should exist in StateChangeSet") + } + + if _, ok := changeList.BalanceChangeSet[addr]; !ok { + t.Fatalf("address should exist in StateChangeSet") + } + + if _, ok := changeList.CodeChangeSet[addr]; !ok { + t.Fatalf("address should exist in CodeChangeSet") + } + + if _, ok := changeList.AddrStateChangeSet[addr]; !ok { + t.Fatalf("address should exist in AddrStateChangeSet") + } +} diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index 4ec9fc3ecd..980c6c0783 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -69,7 +69,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c case txIndex := <-txChan: tx := transactions[txIndex] // Convert the transaction into an executable message and pre-cache its sender - msg, err := tx.AsMessageNoNonceCheck(signer) + msg, err := tx.AsMessageNoNonceCheck(signer, header.BaseFee) if err != nil { return // Also invalid block, bail out } @@ -114,7 +114,7 @@ func (p *statePrefetcher) PrefetchMining(txs *types.TransactionsByPriceAndNonce, select { case tx := <-startCh: // Convert the transaction into an executable message and pre-cache its sender - msg, err := tx.AsMessageNoNonceCheck(signer) + msg, err := tx.AsMessageNoNonceCheck(signer, header.BaseFee) if err != nil { return // Also invalid block, bail out } diff --git a/core/state_processor.go b/core/state_processor.go index b42938adf9..bc798f1ed3 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -22,7 +22,9 @@ import ( "fmt" "math/big" "math/rand" + "runtime" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -46,6 +48,11 @@ const ( recentTime = 1024 * 3 recentDiffLayerTimeout = 5 farDiffLayerTimeout = 2 + + parallelPrimarySlot = 0 + parallelShadowSlot = 1 + stage2CheckNumber = 30 // ConfirmStage2 will check this number of transaction, to avoid too busy stage2 check + stage2AheadNum = 3 // enter ConfirmStage2 in advance to avoid waiting for Fat Tx ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -54,11 +61,10 @@ const ( // StateProcessor implements Processor. type StateProcessor struct { config *params.ChainConfig // Chain configuration options - bc *BlockChain // Canonical block chain + bc *BlockChain // Canonical blockchain engine consensus.Engine // Consensus engine used for block rewards } -// NewStateProcessor initialises a new StateProcessor. func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor { return &StateProcessor{ config: config, @@ -67,6 +73,39 @@ func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consen } } +type ParallelStateProcessor struct { + StateProcessor + parallelNum int // leave a CPU to dispatcher + slotState []*SlotState // idle, or pending messages + allTxReqs []*ParallelTxRequest + txResultChan chan *ParallelTxResult // to notify dispatcher that a tx is done + mergedTxIndex int // the latest finalized tx index, fixme: use Atomic + pendingConfirmResults map[int][]*ParallelTxResult // tx could be executed several times, with several result to check + unconfirmedResults *sync.Map // this is for stage2 confirm, since pendingConfirmResults can not be accessed in stage2 loop + unconfirmedDBs *sync.Map + slotDBsToRelease []*state.ParallelStateDB + stopSlotChan chan struct{} + stopConfirmChan chan struct{} + debugConflictRedoNum int + // start for confirm stage2 + confirmStage2Chan chan int + stopConfirmStage2Chan chan struct{} + txReqExecuteRecord map[int]int + txReqExecuteCount int + inConfirmStage2 bool + targetStage2Count int // when executed txNUM reach it, enter stage2 RT confirm + nextStage2TxIndex int +} + +func NewParallelStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine, parallelNum int) *ParallelStateProcessor { + processor := &ParallelStateProcessor{ + StateProcessor: *NewStateProcessor(config, bc, engine), + parallelNum: parallelNum, + } + processor.init() + return processor +} + type LightStateProcessor struct { check int64 StateProcessor @@ -370,6 +409,676 @@ func (p *LightStateProcessor) LightProcess(diffLayer *types.DiffLayer, block *ty return diffLayer.Receipts, allLogs, gasUsed, nil } +type MergedTxInfo struct { + slotDB *state.StateDB // used for SlotDb reuse only, otherwise, it can be discarded + StateObjectSuicided map[common.Address]struct{} + StateChangeSet map[common.Address]state.StateKeys + BalanceChangeSet map[common.Address]struct{} + CodeChangeSet map[common.Address]struct{} + AddrStateChangeSet map[common.Address]struct{} + txIndex int +} + +type SlotState struct { + pendingTxReqList []*ParallelTxRequest + primaryWakeUpChan chan struct{} + shadowWakeUpChan chan struct{} + primaryStopChan chan struct{} + shadowStopChan chan struct{} + activatedType int32 // 0: primary slot, 1: shadow slot +} + +type ParallelTxResult struct { + executedIndex int32 // the TxReq can be executed several time, increase index for each execution + slotIndex int // slot index + txReq *ParallelTxRequest + receipt *types.Receipt + slotDB *state.ParallelStateDB // if updated, it is not equal to txReq.slotDB + gpSlot *GasPool + evm *vm.EVM + result *ExecutionResult + err error +} + +type ParallelTxRequest struct { + txIndex int + baseStateDB *state.StateDB + staticSlotIndex int // static dispatched id + tx *types.Transaction + gasLimit uint64 + msg types.Message + block *types.Block + vmConfig vm.Config + bloomProcessor *AsyncReceiptBloomGenerator + usedGas *uint64 + curTxChan chan int + systemAddrRedo bool + runnable int32 // 0: not runnable, 1: runnable + executedNum int32 +} + +// to create and start the execution slot goroutines +func (p *ParallelStateProcessor) init() { + log.Info("Parallel execution mode is enabled", "Parallel Num", p.parallelNum, + "CPUNum", runtime.NumCPU()) + p.txResultChan = make(chan *ParallelTxResult, 200) + p.stopSlotChan = make(chan struct{}, 1) + p.stopConfirmChan = make(chan struct{}, 1) + p.stopConfirmStage2Chan = make(chan struct{}, 1) + + p.slotState = make([]*SlotState, p.parallelNum) + for i := 0; i < p.parallelNum; i++ { + p.slotState[i] = &SlotState{ + primaryWakeUpChan: make(chan struct{}, 1), + shadowWakeUpChan: make(chan struct{}, 1), + primaryStopChan: make(chan struct{}, 1), + shadowStopChan: make(chan struct{}, 1), + } + // start the primary slot's goroutine + go func(slotIndex int) { + p.runSlotLoop(slotIndex, parallelPrimarySlot) // this loop will be permanent live + }(i) + + // start the shadow slot. + // It is back up of the primary slot to make sure transaction can be redone ASAP, + // since the primary slot could be busy at executing another transaction + go func(slotIndex int) { + p.runSlotLoop(slotIndex, parallelShadowSlot) // this loop will be permanent live + }(i) + + } + + p.confirmStage2Chan = make(chan int, 10) + go func() { + p.runConfirmStage2Loop() // this loop will be permanent live + }() +} + +// clear slot state for each block. +func (p *ParallelStateProcessor) resetState(txNum int, statedb *state.StateDB) { + if txNum == 0 { + return + } + p.mergedTxIndex = -1 + p.debugConflictRedoNum = 0 + p.inConfirmStage2 = false + + statedb.PrepareForParallel() + p.allTxReqs = make([]*ParallelTxRequest, 0) + p.slotDBsToRelease = make([]*state.ParallelStateDB, 0, txNum) + + stateDBsToRelease := p.slotDBsToRelease + go func() { + for _, slotDB := range stateDBsToRelease { + slotDB.PutSyncPool() + } + }() + for _, slot := range p.slotState { + slot.pendingTxReqList = make([]*ParallelTxRequest, 0) + slot.activatedType = parallelPrimarySlot + } + p.unconfirmedResults = new(sync.Map) + p.unconfirmedDBs = new(sync.Map) + p.pendingConfirmResults = make(map[int][]*ParallelTxResult, 200) + p.txReqExecuteRecord = make(map[int]int, 200) + p.txReqExecuteCount = 0 + p.nextStage2TxIndex = 0 +} + +// Benefits of StaticDispatch: +// ** try best to make Txs with same From() in same slot +// ** reduce IPC cost by dispatch in Unit +// ** make sure same From in same slot +// ** try to make it balanced, queue to the most hungry slot for new Address +func (p *ParallelStateProcessor) doStaticDispatch(txReqs []*ParallelTxRequest) { + fromSlotMap := make(map[common.Address]int, 100) + toSlotMap := make(map[common.Address]int, 100) + for _, txReq := range txReqs { + var slotIndex = -1 + if i, ok := fromSlotMap[txReq.msg.From()]; ok { + // first: same From are all in same slot + slotIndex = i + } else if txReq.msg.To() != nil { + // To Address, with txIndex sorted, could be in different slot. + if i, ok := toSlotMap[*txReq.msg.To()]; ok { + slotIndex = i + } + } + + // not found, dispatch to most hungry slot + if slotIndex == -1 { + var workload = len(p.slotState[0].pendingTxReqList) + slotIndex = 0 + for i, slot := range p.slotState { // can start from index 1 + if len(slot.pendingTxReqList) < workload { + slotIndex = i + workload = len(slot.pendingTxReqList) + } + } + } + // update + fromSlotMap[txReq.msg.From()] = slotIndex + if txReq.msg.To() != nil { + toSlotMap[*txReq.msg.To()] = slotIndex + } + + slot := p.slotState[slotIndex] + txReq.staticSlotIndex = slotIndex // txReq is better to be executed in this slot + slot.pendingTxReqList = append(slot.pendingTxReqList, txReq) + } +} + +// do conflict detect +func (p *ParallelStateProcessor) hasConflict(txResult *ParallelTxResult, isStage2 bool) bool { + slotDB := txResult.slotDB + if txResult.err != nil { + return true + } else if slotDB.SystemAddressRedo() { + if !isStage2 { + // for system addr redo, it has to wait until it's turn to keep the system address balance + txResult.txReq.systemAddrRedo = true + } + return true + } else if slotDB.NeedsRedo() { + // if this is any reason that indicates this transaction needs to redo, skip the conflict check + return true + } else { + // to check if what the slot db read is correct. + if !slotDB.IsParallelReadsValid(isStage2) { + return true + } + } + return false +} + +func (p *ParallelStateProcessor) switchSlot(slotIndex int) { + slot := p.slotState[slotIndex] + if atomic.CompareAndSwapInt32(&slot.activatedType, parallelPrimarySlot, parallelShadowSlot) { + // switch from normal to shadow slot + if len(slot.shadowWakeUpChan) == 0 { + slot.shadowWakeUpChan <- struct{}{} // only notify when target once + } + } else if atomic.CompareAndSwapInt32(&slot.activatedType, parallelShadowSlot, parallelPrimarySlot) { + // switch from shadow to normal slot + if len(slot.primaryWakeUpChan) == 0 { + slot.primaryWakeUpChan <- struct{}{} // only notify when target once + } + } +} + +func (p *ParallelStateProcessor) executeInSlot(slotIndex int, txReq *ParallelTxRequest) *ParallelTxResult { + atomic.AddInt32(&txReq.executedNum, 1) + slotDB := state.NewSlotDB(txReq.baseStateDB, consensus.SystemAddress, txReq.txIndex, + p.mergedTxIndex, txReq.systemAddrRedo, p.unconfirmedDBs) + + slotDB.Prepare(txReq.tx.Hash(), txReq.txIndex) + blockContext := NewEVMBlockContext(txReq.block.Header(), p.bc, nil) // can share blockContext within a block for efficiency + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, slotDB, p.config, txReq.vmConfig) + // gasLimit not accurate, but it is ok for block import. + // each slot would use its own gas pool, and will do gas limit check later + gpSlot := new(GasPool).AddGas(txReq.gasLimit) // block.GasLimit() + + evm, result, err := applyTransactionStageExecution(txReq.msg, gpSlot, slotDB, vmenv) + txResult := ParallelTxResult{ + executedIndex: atomic.LoadInt32(&txReq.executedNum), + slotIndex: slotIndex, + txReq: txReq, + receipt: nil, // receipt is generated in finalize stage + slotDB: slotDB, + err: err, + gpSlot: gpSlot, + evm: evm, + result: result, + } + if err == nil { + if result.Failed() { + // if Tx is reverted, all its state change will be discarded + slotDB.RevertSlotDB(txReq.msg.From()) + } + slotDB.Finalise(true) // Finalise could write s.parallel.addrStateChangesInSlot[addr], keep Read and Write in same routine to avoid crash + p.unconfirmedDBs.Store(txReq.txIndex, slotDB) + } else { + // the transaction failed at check(nonce or balance), actually it has not been executed yet. + atomic.CompareAndSwapInt32(&txReq.runnable, 0, 1) + // the error could be caused by unconfirmed balance reference, + // the balance could insufficient to pay its gas limit, which cause it preCheck.buyGas() failed + // redo could solve it. + log.Debug("In slot execution error", "error", err, + "slotIndex", slotIndex, "txIndex", txReq.txIndex) + } + p.unconfirmedResults.Store(txReq.txIndex, &txResult) + return &txResult +} + +// to confirm a serial TxResults with same txIndex +func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bool) *ParallelTxResult { + if isStage2 { + if targetTxIndex <= p.mergedTxIndex+1 { + // `p.mergedTxIndex+1` is the one to be merged, + // in stage2, we do likely conflict check, for these not their turn. + return nil + } + } + + for { + // handle a targetTxIndex in a loop + var targetResult *ParallelTxResult + if isStage2 { + result, ok := p.unconfirmedResults.Load(targetTxIndex) + if !ok { + return nil + } + targetResult = result.(*ParallelTxResult) + // in stage 2, don't schedule a new redo if the TxReq is: + // a.runnable: it will be redone + // b.running: the new result will be more reliable, we skip check right now + if atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 1, 1) { + return nil + } + if targetResult.executedIndex < atomic.LoadInt32(&targetResult.txReq.executedNum) { + return nil + } + } else { + results := p.pendingConfirmResults[targetTxIndex] + resultsLen := len(results) + if resultsLen == 0 { // there is no pending result can be verified, break and wait for incoming results + return nil + } + targetResult = results[len(results)-1] // last is the freshest, stack based priority + p.pendingConfirmResults[targetTxIndex] = p.pendingConfirmResults[targetTxIndex][:resultsLen-1] // remove from the queue + } + + valid := p.toConfirmTxIndexResult(targetResult, isStage2) + if !valid { + staticSlotIndex := targetResult.txReq.staticSlotIndex // it is better to run the TxReq in its static dispatch slot + if isStage2 { + atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 0, 1) // needs redo + p.debugConflictRedoNum++ + // interrupt the slot's current routine, and switch to the other routine + p.switchSlot(staticSlotIndex) + return nil + } + if len(p.pendingConfirmResults[targetTxIndex]) == 0 { // this is the last result to check, and it is not valid + atomic.CompareAndSwapInt32(&targetResult.txReq.runnable, 0, 1) // needs redo + p.debugConflictRedoNum++ + // interrupt its current routine, and switch to the other routine + p.switchSlot(staticSlotIndex) + return nil + } + continue + } + if isStage2 { + // likely valid, but not sure, can not deliver + return nil + } + return targetResult + } +} + +// to confirm one txResult, return true if the result is valid +// if it is in Stage 2 it is a likely result, not 100% sure +func (p *ParallelStateProcessor) toConfirmTxIndexResult(txResult *ParallelTxResult, isStage2 bool) bool { + txReq := txResult.txReq + if p.hasConflict(txResult, isStage2) { + return false + } + if isStage2 { // not its turn + return true // likely valid, not sure, not finalized right now. + } + + // goroutine unsafe operation will be handled from here for safety + gasConsumed := txReq.gasLimit - txResult.gpSlot.Gas() + if gasConsumed != txResult.result.UsedGas { + log.Error("gasConsumed != result.UsedGas mismatch", + "gasConsumed", gasConsumed, "result.UsedGas", txResult.result.UsedGas) + } + + // ok, time to do finalize, stage2 should not be parallel + header := txReq.block.Header() + txResult.receipt, txResult.err = applyTransactionStageFinalization(txResult.evm, txResult.result, + txReq.msg, p.config, txResult.slotDB, header, + txReq.tx, txReq.usedGas, txReq.bloomProcessor) + return true +} + +func (p *ParallelStateProcessor) runSlotLoop(slotIndex int, slotType int32) { + curSlot := p.slotState[slotIndex] + var wakeupChan chan struct{} + var stopChan chan struct{} + + if slotType == parallelPrimarySlot { + wakeupChan = curSlot.primaryWakeUpChan + stopChan = curSlot.primaryStopChan + } else { + wakeupChan = curSlot.shadowWakeUpChan + stopChan = curSlot.shadowStopChan + } + for { + select { + case <-stopChan: + p.stopSlotChan <- struct{}{} + continue + case <-wakeupChan: + } + + interrupted := false + for _, txReq := range curSlot.pendingTxReqList { + if txReq.txIndex <= p.mergedTxIndex { + continue + } + + if atomic.LoadInt32(&curSlot.activatedType) != slotType { + interrupted = true + break + } + if !atomic.CompareAndSwapInt32(&txReq.runnable, 1, 0) { + // not swapped: txReq.runnable == 0 + continue + } + p.txResultChan <- p.executeInSlot(slotIndex, txReq) + } + // switched to the other slot. + if interrupted { + continue + } + + // txReq in this Slot have all been executed, try steal one from other slot. + // as long as the TxReq is runnable, we steal it, mark it as stolen + for _, stealTxReq := range p.allTxReqs { + if stealTxReq.txIndex <= p.mergedTxIndex { + continue + } + if atomic.LoadInt32(&curSlot.activatedType) != slotType { + interrupted = true + break + } + + if !atomic.CompareAndSwapInt32(&stealTxReq.runnable, 1, 0) { + // not swapped: txReq.runnable == 0 + continue + } + p.txResultChan <- p.executeInSlot(slotIndex, stealTxReq) + } + } +} + +func (p *ParallelStateProcessor) runConfirmStage2Loop() { + for { + // var mergedTxIndex int + select { + case <-p.stopConfirmStage2Chan: + for len(p.confirmStage2Chan) > 0 { + <-p.confirmStage2Chan + } + p.stopSlotChan <- struct{}{} + continue + case <-p.confirmStage2Chan: + for len(p.confirmStage2Chan) > 0 { + <-p.confirmStage2Chan // drain the chan to get the latest merged txIndex + } + } + // stage 2,if all tx have been executed at least once, and its result has been received. + // in Stage 2, we will run check when merge is advanced. + // more aggressive tx result confirm, even for these Txs not in turn + // now we will be more aggressive: + // do conflict check , as long as tx result is generated, + // if lucky, it is the Tx's turn, we will do conflict check with WBNB makeup + // otherwise, do conflict check without WBNB makeup, but we will ignore WBNB's balance conflict. + // throw these likely conflicted tx back to re-execute + startTxIndex := p.mergedTxIndex + 2 // stage 2's will start from the next target merge index + endTxIndex := startTxIndex + stage2CheckNumber + txSize := len(p.allTxReqs) + if endTxIndex > (txSize - 1) { + endTxIndex = txSize - 1 + } + log.Debug("runConfirmStage2Loop", "startTxIndex", startTxIndex, "endTxIndex", endTxIndex) + // conflictNumMark := p.debugConflictRedoNum + for txIndex := startTxIndex; txIndex < endTxIndex; txIndex++ { + p.toConfirmTxIndex(txIndex, true) + } + // make sure all slots are wake up + for i := 0; i < p.parallelNum; i++ { + p.switchSlot(i) + } + } + +} + +func (p *ParallelStateProcessor) handleTxResults() *ParallelTxResult { + confirmedResult := p.toConfirmTxIndex(p.mergedTxIndex+1, false) + if confirmedResult == nil { + return nil + } + // schedule stage 2 when new Tx has been merged, schedule once and ASAP + // stage 2,if all tx have been executed at least once, and its result has been received. + // in Stage 2, we will run check when main DB is advanced, i.e., new Tx result has been merged. + if p.inConfirmStage2 && p.mergedTxIndex >= p.nextStage2TxIndex { + p.nextStage2TxIndex = p.mergedTxIndex + stage2CheckNumber + p.confirmStage2Chan <- p.mergedTxIndex + } + return confirmedResult +} + +// wait until the next Tx is executed and its result is merged to the main stateDB +func (p *ParallelStateProcessor) confirmTxResults(statedb *state.StateDB, gp *GasPool) *ParallelTxResult { + result := p.handleTxResults() + if result == nil { + return nil + } + // ok, the tx result is valid and can be merged + + if err := gp.SubGas(result.receipt.GasUsed); err != nil { + log.Error("gas limit reached", "block", result.txReq.block.Number(), + "txIndex", result.txReq.txIndex, "GasUsed", result.receipt.GasUsed, "gp.Gas", gp.Gas()) + } + resultTxIndex := result.txReq.txIndex + statedb.MergeSlotDB(result.slotDB, result.receipt, resultTxIndex) + + if resultTxIndex != p.mergedTxIndex+1 { + log.Error("ProcessParallel tx result out of order", "resultTxIndex", resultTxIndex, + "p.mergedTxIndex", p.mergedTxIndex) + } + p.mergedTxIndex = resultTxIndex + // log.Debug("confirmTxResults result is merged", "result.slotIndex", result.slotIndex, + // "TxIndex", result.txReq.txIndex, "p.mergedTxIndex", p.mergedTxIndex) + return result +} + +func (p *ParallelStateProcessor) doCleanUp() { + // 1.clean up all slot: primary and shadow, to make sure they are stopped + for _, slot := range p.slotState { + slot.primaryStopChan <- struct{}{} + slot.shadowStopChan <- struct{}{} + <-p.stopSlotChan + <-p.stopSlotChan + } + // 2.discard delayed txResults if any + for { + if len(p.txResultChan) > 0 { // drop prefetch addr? + <-p.txResultChan + continue + } + break + } + // 3.make sure the confirmation routine is stopped + p.stopConfirmStage2Chan <- struct{}{} + <-p.stopSlotChan +} + +// Implement BEP-130: Parallel Transaction Execution. +func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*state.StateDB, types.Receipts, []*types.Log, uint64, error) { + var ( + usedGas = new(uint64) + header = block.Header() + gp = new(GasPool).AddGas(block.GasLimit()) + ) + var receipts = make([]*types.Receipt, 0) + txNum := len(block.Transactions()) + p.resetState(txNum, statedb) + // Iterate over and process the individual transactions + posa, isPoSA := p.engine.(consensus.PoSA) + commonTxs := make([]*types.Transaction, 0, txNum) + // usually do have two tx, one for validator set contract, another for system reward contract. + systemTxs := make([]*types.Transaction, 0, 2) + + signer, _, bloomProcessor := p.preExecute(block, statedb, cfg, true) + // var txReqs []*ParallelTxRequest + for i, tx := range block.Transactions() { + if isPoSA { + if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil { + bloomProcessor.Close() + return statedb, nil, nil, 0, err + } else if isSystemTx { + systemTxs = append(systemTxs, tx) + continue + } + } + + // can be moved it into slot for efficiency, but signer is not concurrent safe + // Parallel Execution 1.0&2.0 is for full sync mode, Nonce PreCheck is not necessary + // And since we will do out-of-order execution, the Nonce PreCheck could fail. + // We will disable it and leave it to Parallel 3.0 which is for validator mode + msg, err := tx.AsMessageNoNonceCheck(signer, header.BaseFee) + if err != nil { + bloomProcessor.Close() + return statedb, nil, nil, 0, err + } + + // parallel start, wrap an exec message, which will be dispatched to a slot + txReq := &ParallelTxRequest{ + txIndex: i, + baseStateDB: statedb, + staticSlotIndex: -1, + tx: tx, + gasLimit: block.GasLimit(), // gp.Gas(). + msg: msg, + block: block, + vmConfig: cfg, + bloomProcessor: bloomProcessor, + usedGas: usedGas, + curTxChan: make(chan int, 1), + systemAddrRedo: false, // set to true, when systemAddr access is detected. + runnable: 1, // 0: not runnable, 1: runnable + executedNum: 0, + } + p.allTxReqs = append(p.allTxReqs, txReq) + } + // set up stage2 enter criteria + p.targetStage2Count = len(p.allTxReqs) + if p.targetStage2Count > 50 { + // usually, the last Tx could be the bottleneck it could be very slow, + // so it is better for us to enter stage 2 a bit earlier + p.targetStage2Count = p.targetStage2Count - stage2AheadNum + } + + p.doStaticDispatch(p.allTxReqs) // todo: put txReqs in unit? + // after static dispatch, we notify the slot to work. + for _, slot := range p.slotState { + slot.primaryWakeUpChan <- struct{}{} + } + // wait until all Txs have processed. + for { + if len(commonTxs)+len(systemTxs) == txNum { + // put it ahead of chan receive to avoid waiting for empty block + break + } + + unconfirmedResult := <-p.txResultChan + unconfirmedTxIndex := unconfirmedResult.txReq.txIndex + if unconfirmedTxIndex <= p.mergedTxIndex { + // log.Warn("drop merged txReq", "unconfirmedTxIndex", unconfirmedTxIndex, "p.mergedTxIndex", p.mergedTxIndex) + continue + } + p.pendingConfirmResults[unconfirmedTxIndex] = append(p.pendingConfirmResults[unconfirmedTxIndex], unconfirmedResult) + + // schedule prefetch once only when unconfirmedResult is valid + if unconfirmedResult.err == nil { + if _, ok := p.txReqExecuteRecord[unconfirmedTxIndex]; !ok { + p.txReqExecuteRecord[unconfirmedTxIndex] = 0 + p.txReqExecuteCount++ + statedb.AddrPrefetch(unconfirmedResult.slotDB) // todo: prefetch when it is not merged + // enter stage2, RT confirm + if !p.inConfirmStage2 && p.txReqExecuteCount == p.targetStage2Count { + p.inConfirmStage2 = true + } + + } + p.txReqExecuteRecord[unconfirmedTxIndex]++ + } + + for { + result := p.confirmTxResults(statedb, gp) + if result == nil { + break + } + // update tx result + if result.err != nil { + log.Error("ProcessParallel a failed tx", "resultSlotIndex", result.slotIndex, + "resultTxIndex", result.txReq.txIndex, "result.err", result.err) + p.doCleanUp() + bloomProcessor.Close() + return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", result.txReq.txIndex, result.txReq.tx.Hash().Hex(), result.err) + } + commonTxs = append(commonTxs, result.txReq.tx) + receipts = append(receipts, result.receipt) + } + } + // to do clean up when the block is processed + p.doCleanUp() + + // len(commonTxs) could be 0, such as: https://bscscan.com/block/14580486 + if len(commonTxs) > 0 { + log.Info("ProcessParallel tx all done", "block", header.Number, "usedGas", *usedGas, + "txNum", txNum, + "len(commonTxs)", len(commonTxs), + "conflictNum", p.debugConflictRedoNum, + "redoRate(%)", 100*(p.debugConflictRedoNum)/len(commonTxs)) + } + allLogs, err := p.postExecute(block, statedb, &commonTxs, &receipts, &systemTxs, usedGas, bloomProcessor) + return statedb, receipts, allLogs, *usedGas, err +} + +// Before transactions are executed, do shared preparation for Process() & ProcessParallel() +func (p *StateProcessor) preExecute(block *types.Block, statedb *state.StateDB, cfg vm.Config, parallel bool) (types.Signer, *vm.EVM, *AsyncReceiptBloomGenerator) { + signer := types.MakeSigner(p.bc.chainConfig, block.Number()) + // Mutate the block and state according to any hard-fork specs + if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { + misc.ApplyDAOHardFork(statedb) + } + // Handle upgrade build-in system contract code + systemcontracts.UpgradeBuildInSystemContract(p.config, block.Number(), statedb) + + // with parallel mode, vmenv will be created inside of slot + var vmenv *vm.EVM + if !parallel { + blockContext := NewEVMBlockContext(block.Header(), p.bc, nil) + vmenv = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) + } + + // initialise bloom processors + bloomProcessor := NewAsyncReceiptBloomGenerator(len(block.Transactions())) + statedb.MarkFullProcessed() + + return signer, vmenv, bloomProcessor +} + +func (p *StateProcessor) postExecute(block *types.Block, statedb *state.StateDB, commonTxs *[]*types.Transaction, + receipts *[]*types.Receipt, systemTxs *[]*types.Transaction, usedGas *uint64, bloomProcessor *AsyncReceiptBloomGenerator) ([]*types.Log, error) { + allLogs := make([]*types.Log, 0, len(*receipts)) + + bloomProcessor.Close() + + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) + err := p.engine.Finalize(p.bc, block.Header(), statedb, commonTxs, block.Uncles(), receipts, systemTxs, usedGas) + if err != nil { + return allLogs, err + } + for _, receipt := range *receipts { + allLogs = append(allLogs, receipt.Logs...) + } + return allLogs, nil +} + // Process processes the state changes according to the Ethereum rules by running // the transaction messages using the statedb and applying any rewards to both // the processor (coinbase) and any included uncles. @@ -388,33 +1097,22 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg ) var receipts = make([]*types.Receipt, 0) - // Mutate the block and state according to any hard-fork specs - if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(statedb) - } - // Handle upgrade build-in system contract code - systemcontracts.UpgradeBuildInSystemContract(p.config, block.Number(), statedb) - - blockContext := NewEVMBlockContext(header, p.bc, nil) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) - txNum := len(block.Transactions()) + if txNum > 0 { + log.Info("Process", "block", header.Number, "txNum", txNum) + } // Iterate over and process the individual transactions posa, isPoSA := p.engine.(consensus.PoSA) commonTxs := make([]*types.Transaction, 0, txNum) - // initialise bloom processors - bloomProcessors := NewAsyncReceiptBloomGenerator(txNum) - statedb.MarkFullProcessed() - signer := types.MakeSigner(p.config, header.Number) - // usually do have two tx, one for validator set contract, another for system reward contract. systemTxs := make([]*types.Transaction, 0, 2) + signer, vmenv, bloomProcessor := p.preExecute(block, statedb, cfg, false) for i, tx := range block.Transactions() { if isPoSA { if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil { - bloomProcessors.Close() + bloomProcessor.Close() return statedb, nil, nil, 0, err } else if isSystemTx { systemTxs = append(systemTxs, tx) @@ -424,31 +1122,21 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg msg, err := tx.AsMessage(signer, header.BaseFee) if err != nil { - bloomProcessors.Close() + bloomProcessor.Close() return statedb, nil, nil, 0, err } statedb.Prepare(tx.Hash(), i) - - receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, bloomProcessors) + receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, bloomProcessor) if err != nil { - bloomProcessors.Close() + bloomProcessor.Close() return statedb, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } commonTxs = append(commonTxs, tx) receipts = append(receipts, receipt) } - bloomProcessors.Close() - - // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - err := p.engine.Finalize(p.bc, header, statedb, &commonTxs, block.Uncles(), &receipts, &systemTxs, usedGas) - if err != nil { - return statedb, receipts, allLogs, *usedGas, err - } - for _, receipt := range receipts { - allLogs = append(allLogs, receipt.Logs...) - } - return statedb, receipts, allLogs, *usedGas, nil + allLogs, err := p.postExecute(block, statedb, &commonTxs, &receipts, &systemTxs, usedGas, bloomProcessor) + return statedb, receipts, allLogs, *usedGas, err } func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) { @@ -498,6 +1186,57 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon return receipt, err } +func applyTransactionStageExecution(msg types.Message, gp *GasPool, statedb *state.ParallelStateDB, evm *vm.EVM) (*vm.EVM, *ExecutionResult, error) { + // Create a new context to be used in the EVM environment. + txContext := NewEVMTxContext(msg) + evm.Reset(txContext, statedb) + + // Apply the transaction to the current state (included in the env). + result, err := ApplyMessage(evm, msg, gp) + if err != nil { + return nil, nil, err + } + + return evm, result, err +} + +func applyTransactionStageFinalization(evm *vm.EVM, result *ExecutionResult, msg types.Message, config *params.ChainConfig, statedb *state.ParallelStateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, receiptProcessors ...ReceiptProcessor) (*types.Receipt, error) { + // Update the state with pending changes. + var root []byte + if config.IsByzantium(header.Number) { + statedb.Finalise(true) + } else { + root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes() + } + *usedGas += result.UsedGas + + // Create a new receipt for the transaction, storing the intermediate root and gas used + // by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas} + if result.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = tx.Hash() + receipt.GasUsed = result.UsedGas + + // If the transaction created a contract, store the creation address in the receipt. + if msg.To() == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) + } + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash(), header.Hash()) + receipt.BlockHash = header.Hash() + receipt.BlockNumber = header.Number + receipt.TransactionIndex = uint(statedb.TxIndex()) + for _, receiptProcessor := range receiptProcessors { + receiptProcessor.Apply(receipt) + } + return receipt, nil +} + // ApplyTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, diff --git a/core/types/transaction.go b/core/types/transaction.go index 1e6c45cd7f..b9e18bcf40 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -690,8 +690,13 @@ func (tx *Transaction) AsMessage(s Signer, baseFee *big.Int) (Message, error) { } // AsMessageNoNonceCheck returns the transaction with checkNonce field set to be false. -func (tx *Transaction) AsMessageNoNonceCheck(s Signer) (Message, error) { +func (tx *Transaction) AsMessageNoNonceCheck(s Signer, baseFee *big.Int) (Message, error) { msg, err := tx.AsMessage(s, nil) + // If baseFee provided, set gasPrice to effectiveGasPrice. + if baseFee != nil { + msg.gasPrice = math.BigMin(msg.gasPrice.Add(msg.gasTipCap, baseFee), msg.gasFeeCap) + } + if err == nil { msg.isFake = true } diff --git a/core/vm/evm.go b/core/vm/evm.go index 339b8e2993..6fcfd3b89a 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -241,6 +241,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) ret, err = evm.interpreter.Run(contract, input, false) + evm.StateDB.ParallelMakeUp(addr, input) gas = contract.Gas } } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 92be3bf259..5bd93dedb4 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -261,7 +261,7 @@ func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) - slot.SetFromBig(interpreter.evm.StateDB.GetBalance(address)) + slot.SetFromBig(interpreter.evm.StateDB.GetBalanceOpCode(address)) return nil, nil } diff --git a/core/vm/interface.go b/core/vm/interface.go index ad9b05d666..be263002b7 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -30,6 +30,7 @@ type StateDB interface { SubBalance(common.Address, *big.Int) AddBalance(common.Address, *big.Int) GetBalance(common.Address) *big.Int + GetBalanceOpCode(common.Address) *big.Int GetNonce(common.Address) uint64 SetNonce(common.Address, uint64) @@ -74,6 +75,7 @@ type StateDB interface { AddPreimage(common.Hash, []byte) ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error + ParallelMakeUp(addr common.Address, input []byte) } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/eth/backend.go b/eth/backend.go index e9c077e6a7..3801bf9e5f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -214,6 +214,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { bcOps := make([]core.BlockChainOption, 0) if config.DiffSync && !config.PipeCommit && config.TriesVerifyMode == core.LocalVerify { bcOps = append(bcOps, core.EnableLightProcessor) + } else if config.ParallelTxMode { + bcOps = append(bcOps, core.EnableParallelProcessor(config.ParallelTxNum)) } if config.PipeCommit { bcOps = append(bcOps, core.EnablePipelineCommit) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 03104c6109..2a83210a86 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -140,12 +140,14 @@ type Config struct { NoPruning bool // Whether to disable pruning and flush everything to disk DirectBroadcast bool - DisableSnapProtocol bool //Whether disable snap protocol - DisableDiffProtocol bool //Whether disable diff protocol - EnableTrustProtocol bool //Whether enable trust protocol + DisableSnapProtocol bool // Whether disable snap protocol + DisableDiffProtocol bool // Whether disable diff protocol + EnableTrustProtocol bool // Whether enable trust protocol DiffSync bool // Whether support diff sync PipeCommit bool RangeLimit bool + ParallelTxMode bool // Whether to execute transaction in parallel mode when do full sync + ParallelTxNum int // Number of slot for transaction execution TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. diff --git a/go.mod b/go.mod index b0e5845fb7..b75e70923d 100644 --- a/go.mod +++ b/go.mod @@ -61,6 +61,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.4.5 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 + github.com/prometheus/client_golang v1.0.0 github.com/prometheus/tsdb v0.7.1 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect github.com/rjeczalik/notify v0.9.1 diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index 3ebe8cc68a..563a55bf65 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -8,6 +8,8 @@ import ( "net/http" "sync" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/prometheus" @@ -44,6 +46,7 @@ func Exp(r metrics.Registry) { // http.HandleFunc("/debug/vars", e.expHandler) // haven't found an elegant way, so just use a different endpoint http.Handle("/debug/metrics", h) + http.Handle("/debug/metrics/go_prometheus", promhttp.Handler()) http.Handle("/debug/metrics/prometheus", prometheus.Handler(r)) } @@ -58,6 +61,7 @@ func ExpHandler(r metrics.Registry) http.Handler { func Setup(address string) { m := http.NewServeMux() m.Handle("/debug/metrics", ExpHandler(metrics.DefaultRegistry)) + m.Handle("/debug/metrics/go_prometheus", promhttp.Handler()) m.Handle("/debug/metrics/prometheus", prometheus.Handler(metrics.DefaultRegistry)) log.Info("Starting metrics server", "addr", fmt.Sprintf("http://%s/debug/metrics", address)) go func() {