Skip to content

Commit

Permalink
Merge pull request #850 from bnb-chain/develop
Browse files Browse the repository at this point in the history
[R4R] Release v1.1.9
  • Loading branch information
unclezoro authored Apr 8, 2022
2 parents 859186f + 1aeadc1 commit 74ecbf2
Show file tree
Hide file tree
Showing 27 changed files with 566 additions and 146 deletions.
4 changes: 2 additions & 2 deletions .github/release.env
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
MAINNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.7/mainnet.zip"
TESTNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.7/testnet.zip"
MAINNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.8/mainnet.zip"
TESTNET_FILE_URL="https://github.com/binance-chain/bsc/releases/download/v1.1.8/testnet.zip"
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
# Changelog

## v1.1.9

IMPROVEMENT
* [\#792](https://github.com/binance-chain/bsc/pull/792) add shared storage for prefetching state data
* [\#795](https://github.com/binance-chain/bsc/pull/795) implement state verification pipeline in pipecommit
* [\#803](https://github.com/binance-chain/bsc/pull/803) prefetch state data during the mining process
* [\#812](https://github.com/bnb-chain/bsc/pull/812) skip verification on account storage root to tolerate with fastnode when doing diffsync
* [\#818](https://github.com/bnb-chain/bsc/pull/818) add shared storage to the prefetcher of miner
* [\#820](https://github.com/bnb-chain/bsc/pull/820) disable diffsync when pipecommit is enabled
* [\#830](https://github.com/bnb-chain/bsc/pull/830) change the number of prefetch threads

BUGFIX
* [\#797](https://github.com/bnb-chain/bsc/pull/797) fix race condition on preimage in pipecommit
* [\#808](https://github.com/bnb-chain/bsc/pull/808) fix code of difflayer not assign when new smart contract created
* [\#817](https://github.com/bnb-chain/bsc/pull/817) fix bugs of prune block tool
* [\#834](https://github.com/bnb-chain/bsc/pull/834) fix deadlock when failed to verify state root in pipecommit
* [\#835](https://github.com/bnb-chain/bsc/pull/835) fix deadlock on miner module when failed to commit trie
* [\#842](https://github.com/bnb-chain/bsc/pull/842) fix invalid nil check of statedb in diffsync

## v1.1.8
FEATURES
* [\#668](https://github.com/binance-chain/bsc/pull/668) implement State Verification && Snapshot Commit pipeline
Expand Down
3 changes: 2 additions & 1 deletion cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,8 @@ func pruneBlock(ctx *cli.Context) error {
var newAncientPath string
oldAncientPath := ctx.GlobalString(utils.AncientFlag.Name)
if !filepath.IsAbs(oldAncientPath) {
oldAncientPath = stack.ResolvePath(oldAncientPath)
// force absolute paths, which often fail due to the splicing of relative paths
return errors.New("datadir.ancient not abs path")
}

path, _ := filepath.Split(oldAncientPath)
Expand Down
2 changes: 1 addition & 1 deletion cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ var (
}
PipeCommitFlag = cli.BoolFlag{
Name: "pipecommit",
Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false)",
Usage: "Enable MPT pipeline commit, it will improve syncing performance. It is an experimental feature(default is false), diffsync will be disable if pipeline commit is enabled",
}
RangeLimitFlag = cli.BoolFlag{
Name: "rangelimit",
Expand Down
8 changes: 5 additions & 3 deletions core/block_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64, skipHeavyVerify bool) error {
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
header := block.Header()
if block.GasUsed() != usedGas {
return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
Expand All @@ -135,13 +135,15 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
return nil
},
}
if skipHeavyVerify {
if statedb.IsPipeCommit() {
validateFuns = append(validateFuns, func() error {
if err := statedb.WaitPipeVerification(); err != nil {
return err
}
statedb.CorrectAccountsRoot()
statedb.Finalise(v.config.IsEIP158(header.Number))
statedb.AccountsIntermediateRoot()
// State verification pipeline - accounts root are not calculated here, just populate needed fields for process
statedb.PopulateSnapAccountAndStorage()
return nil
})
} else {
Expand Down
9 changes: 7 additions & 2 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -842,6 +842,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache, bc.snaps)
}

// StateAtWithSharedPool returns a new mutable state based on a particular point in time with sharedStorage
func (bc *BlockChain) StateAtWithSharedPool(root common.Hash) (*state.StateDB, error) {
return state.NewWithSharedPool(root, bc.stateCache, bc.snaps)
}

// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
Expand Down Expand Up @@ -2101,7 +2106,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
if parent == nil {
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
statedb, err := state.NewWithSharedPool(parent.Root, bc.stateCache, bc.snaps)
if err != nil {
return it.index, err
}
Expand Down Expand Up @@ -2143,7 +2148,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Validate the state using the default validator
substart = time.Now()
if !statedb.IsLightProcessed() {
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas, bc.pipeCommit); err != nil {
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
log.Error("validate state failed", "error", err)
bc.reportBlock(block, receipts, err)
return it.index, err
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *B
blockchain.reportBlock(block, receipts, err)
return err
}
err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas, pipelineCommit)
err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas)
if err != nil {
blockchain.reportBlock(block, receipts, err)
return err
Expand Down
3 changes: 3 additions & 0 deletions core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,9 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
headHeader = hc.GetHeader(headHash, headNumber)
)
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
if frozen, _ := hc.chainDb.Ancients(); frozen == headNumber {
break
}
rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber)
headHash = headHeader.ParentHash
headNumber = headHeader.Number.Uint64() - 1
Expand Down
5 changes: 4 additions & 1 deletion core/rawdb/chain_iterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,10 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
number uint64
rlp rlp.RawValue
}
if to == from {
if offset := db.AncientOffSet(); offset > from {
from = offset
}
if to <= from {
return nil
}
threads := to - from
Expand Down
39 changes: 39 additions & 0 deletions core/state/shared_pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package state

import (
"sync"

"github.com/ethereum/go-ethereum/common"
)

// sharedPool is used to store maps of originStorage of stateObjects
type StoragePool struct {
sync.RWMutex
sharedMap map[common.Address]*sync.Map
}

func NewStoragePool() *StoragePool {
sharedMap := make(map[common.Address]*sync.Map)
return &StoragePool{
sync.RWMutex{},
sharedMap,
}
}

// getStorage Check whether the storage exist in pool,
// new one if not exist, the content of storage will be fetched in stateObjects.GetCommittedState()
func (s *StoragePool) getStorage(address common.Address) *sync.Map {
s.RLock()
storageMap, ok := s.sharedMap[address]
s.RUnlock()
if !ok {
s.Lock()
defer s.Unlock()
if storageMap, ok = s.sharedMap[address]; !ok {
m := new(sync.Map)
s.sharedMap[address] = m
return m
}
}
return storageMap
}
40 changes: 38 additions & 2 deletions core/state/snapshot/difflayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,9 @@ type diffLayer struct {
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)

verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed
valid bool // mark the difflayer is valid or not.
verifiedCh chan struct{} // the difflayer is verified when verifiedCh is nil or closed
valid bool // mark the difflayer is valid or not.
accountCorrected bool // mark the accountData has been corrected ort not

diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer

Expand Down Expand Up @@ -182,6 +183,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
storageList: make(map[common.Hash][]common.Hash),
verifiedCh: verified,
}

switch parent := parent.(type) {
case *diskLayer:
dl.rebloom(parent)
Expand All @@ -190,6 +192,7 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
default:
panic("unknown parent type")
}

// Sanity check that accounts or storage slots are never nil
for accountHash, blob := range accounts {
if blob == nil {
Expand Down Expand Up @@ -286,6 +289,21 @@ func (dl *diffLayer) Verified() bool {
}
}

func (dl *diffLayer) CorrectAccounts(accounts map[common.Hash][]byte) {
dl.lock.Lock()
defer dl.lock.Unlock()

dl.accountData = accounts
dl.accountCorrected = true
}

func (dl *diffLayer) AccountsCorrected() bool {
dl.lock.RLock()
defer dl.lock.RUnlock()

return dl.accountCorrected
}

// Parent returns the subsequent layer of a diff layer.
func (dl *diffLayer) Parent() snapshot {
return dl.parent
Expand Down Expand Up @@ -314,6 +332,24 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
return account, nil
}

// Accounts directly retrieves all accounts in current snapshot in
// the snapshot slim data format.
func (dl *diffLayer) Accounts() (map[common.Hash]*Account, error) {
dl.lock.RLock()
defer dl.lock.RUnlock()

accounts := make(map[common.Hash]*Account, len(dl.accountData))
for hash, data := range dl.accountData {
account := new(Account)
if err := rlp.DecodeBytes(data, account); err != nil {
return nil, err
}
accounts[hash] = account
}

return accounts, nil
}

// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
//
Expand Down
13 changes: 13 additions & 0 deletions core/state/snapshot/disklayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,13 @@ func (dl *diskLayer) Verified() bool {
return true
}

func (dl *diskLayer) CorrectAccounts(map[common.Hash][]byte) {
}

func (dl *diskLayer) AccountsCorrected() bool {
return true
}

// Parent always returns nil as there's no layer below the disk.
func (dl *diskLayer) Parent() snapshot {
return nil
Expand All @@ -73,6 +80,12 @@ func (dl *diskLayer) Stale() bool {
return dl.stale
}

// Accounts directly retrieves all accounts in current snapshot in
// the snapshot slim data format.
func (dl *diskLayer) Accounts() (map[common.Hash]*Account, error) {
return nil, nil
}

// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
Expand Down
1 change: 1 addition & 0 deletions core/state/snapshot/journal.go
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
if dl.Stale() {
return common.Hash{}, ErrSnapshotStale
}

// Everything below was journalled, persist this layer too
if err := rlp.Encode(buffer, dl.root); err != nil {
return common.Hash{}, err
Expand Down
22 changes: 21 additions & 1 deletion core/state/snapshot/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,23 @@ type Snapshot interface {
// Verified returns whether the snapshot is verified
Verified() bool

// Store the verification result
// MarkValid stores the verification result
MarkValid()

// CorrectAccounts updates account data for storing the correct data during pipecommit
CorrectAccounts(map[common.Hash][]byte)

// AccountsCorrected checks whether the account data has been corrected during pipecommit
AccountsCorrected() bool

// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
Account(hash common.Hash) (*Account, error)

// Accounts directly retrieves all accounts in current snapshot in
// the snapshot slim data format.
Accounts() (map[common.Hash]*Account, error)

// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
AccountRLP(hash common.Hash) ([]byte, error)
Expand Down Expand Up @@ -240,6 +250,11 @@ func (t *Tree) waitBuild() {
}
}

// Layers returns the number of layers
func (t *Tree) Layers() int {
return len(t.layers)
}

// Disable interrupts any pending snapshot generator, deletes all the snapshot
// layers in memory and marks snapshots disabled globally. In order to resume
// the snapshot functionality, the caller must invoke Rebuild.
Expand Down Expand Up @@ -666,6 +681,11 @@ func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
if snap == nil {
return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
}
// Wait the snapshot(difflayer) is verified, it means the account data also been refreshed with the correct data
if !snap.WaitAndGetVerifyRes() {
return common.Hash{}, ErrSnapshotStale
}

// Run the journaling
t.lock.Lock()
defer t.lock.Unlock()
Expand Down
Loading

0 comments on commit 74ecbf2

Please sign in to comment.