Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[R4R] Release v1.1.4 #506

Merged
merged 12 commits into from
Nov 2, 2021
5 changes: 1 addition & 4 deletions .github/workflows/unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,10 @@ jobs:
restore-keys: |
${{ runner.os }}-go-

- name: Test Build
run: |
make geth

- name: Uint Test
env:
ANDROID_HOME: "" # Skip android test
run: |
go clean -testcache
make test

12 changes: 12 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,16 @@
# Changelog
## v1.1.4
Improvement
* [\#472](https://github.com/binance-chain/bsc/pull/472) add metrics for contract code bitmap cache
* [\#473](https://github.com/binance-chain/bsc/pull/473) fix ci test flow

BUGFIX
* [\#491](https://github.com/binance-chain/bsc/pull/491) fix prefetcher related bugs

FEATURES
* [\#480](https://github.com/binance-chain/bsc/pull/480) implement bep 95


## v1.1.3
Improvement
* [\#456](https://github.com/binance-chain/bsc/pull/456) git-flow support lint, unit test, and integration test
Expand Down
8 changes: 5 additions & 3 deletions core/block_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,13 +144,15 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
validateRes <- tmpFunc()
}()
}

var err error
for i := 0; i < len(validateFuns); i++ {
r := <-validateRes
if r != nil {
return r
if r != nil && err == nil {
err = r
}
}
return nil
return err
}

// CalcGasLimit computes the gas limit of the next block after parent. It aims
Expand Down
34 changes: 18 additions & 16 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -2486,9 +2486,12 @@ func (bc *BlockChain) update() {
}

func (bc *BlockChain) trustedDiffLayerLoop() {
recheck := time.Tick(diffLayerFreezerRecheckInterval)
recheck := time.NewTicker(diffLayerFreezerRecheckInterval)
bc.wg.Add(1)
defer bc.wg.Done()
defer func() {
bc.wg.Done()
recheck.Stop()
}()
for {
select {
case diff := <-bc.diffQueueBuffer:
Expand Down Expand Up @@ -2521,29 +2524,28 @@ func (bc *BlockChain) trustedDiffLayerLoop() {
batch.Reset()
}
return
case <-recheck:
case <-recheck.C:
currentHeight := bc.CurrentBlock().NumberU64()
var batch ethdb.Batch
for !bc.diffQueue.Empty() {
diff, prio := bc.diffQueue.Pop()
diffLayer := diff.(*types.DiffLayer)

// if the block old enough
if int64(currentHeight)+prio >= int64(bc.triesInMemory) {
canonicalHash := bc.GetCanonicalHash(uint64(-prio))
// on the canonical chain
if canonicalHash == diffLayer.BlockHash {
if batch == nil {
batch = bc.db.DiffStore().NewBatch()
}
rawdb.WriteDiffLayer(batch, diffLayer.BlockHash, diffLayer)
staleHash := bc.GetCanonicalHash(uint64(-prio) - bc.diffLayerFreezerBlockLimit)
rawdb.DeleteDiffLayer(batch, staleHash)
}
} else {
// if the block not old enough
if int64(currentHeight)+prio < int64(bc.triesInMemory) {
bc.diffQueue.Push(diffLayer, prio)
break
}
canonicalHash := bc.GetCanonicalHash(uint64(-prio))
// on the canonical chain
if canonicalHash == diffLayer.BlockHash {
if batch == nil {
batch = bc.db.DiffStore().NewBatch()
}
rawdb.WriteDiffLayer(batch, diffLayer.BlockHash, diffLayer)
staleHash := bc.GetCanonicalHash(uint64(-prio) - bc.diffLayerFreezerBlockLimit)
rawdb.DeleteDiffLayer(batch, staleHash)
}
if batch != nil && batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
panic(fmt.Sprintf("Failed to write diff layer, error %v", err))
Expand Down
6 changes: 5 additions & 1 deletion core/blockchain_diff_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,10 +280,14 @@ func TestFreezeDiffLayer(t *testing.T) {
blockNum := 1024
fullBackend := newTestBackend(blockNum, true)
defer fullBackend.close()
for len(fullBackend.chain.diffQueueBuffer) > 0 {
// Wait for the buffer to be zero.
}
// Minus one empty block.
if fullBackend.chain.diffQueue.Size() != blockNum-1 {
t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum, fullBackend.chain.diffQueue.Size())
t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size())
}

time.Sleep(diffLayerFreezerRecheckInterval + 1*time.Second)
if fullBackend.chain.diffQueue.Size() != int(fullBackend.chain.triesInMemory) {
t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum, fullBackend.chain.diffQueue.Size())
Expand Down
4 changes: 2 additions & 2 deletions core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1568,8 +1568,8 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to finalize competitor chain: %v", err)
}
for i, block := range competitor[:len(competitor)-TestTriesInMemory] {
if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
t.Fatalf("competitor %d: competing chain state missing", i)
if node, err := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
t.Fatalf("competitor %d: competing chain state missing, err: %v", i, err)
}
}
}
Expand Down
14 changes: 13 additions & 1 deletion core/state/snapshot/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -560,6 +560,12 @@ func (dl *diskLayer) generate(stats *generatorStats) {
default:
}
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
if bytes.Compare(currentLocation, dl.genMarker) < 0 {
log.Error("Snapshot generator went backwards",
"currentLocation", fmt.Sprintf("%x", currentLocation),
"genMarker", fmt.Sprintf("%x", dl.genMarker))
}

// Flush out the batch anyway no matter it's empty or not.
// It's possible that all the states are recovered and the
// generation indeed makes progress.
Expand Down Expand Up @@ -634,8 +640,14 @@ func (dl *diskLayer) generate(stats *generatorStats) {
stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
stats.accounts++
}
marker := accountHash[:]
// If the snap generation goes here after interrupted, genMarker may go backward
// when last genMarker is consisted of accountHash and storageHash
if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength {
marker = dl.genMarker[:]
}
// If we've exceeded our batch allowance or termination was requested, flush to disk
if err := checkAndFlush(accountHash[:]); err != nil {
if err := checkAndFlush(marker); err != nil {
return err
}
// If the iterated account is the contract, create a further loop to
Expand Down
1 change: 1 addition & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -958,6 +958,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
if s.lightProcessed {
s.StopPrefetcher()
return s.trie.Hash()
}
// Finalise all the dirty storage states and write them into the tries
Expand Down
39 changes: 39 additions & 0 deletions core/systemcontracts/upgrade.go

Large diffs are not rendered by default.

10 changes: 9 additions & 1 deletion core/vm/contract.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,18 @@ import (
lru "github.com/hashicorp/golang-lru"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/metrics"
"github.com/holiman/uint256"
)

const codeBitmapCacheSize = 2000

var codeBitmapCache, _ = lru.New(codeBitmapCacheSize)
var (
codeBitmapCache, _ = lru.New(codeBitmapCacheSize)

contractCodeBitmapHitMeter = metrics.NewRegisteredMeter("vm/contract/code/bitmap/hit", nil)
contractCodeBitmapMissMeter = metrics.NewRegisteredMeter("vm/contract/code/bitmap/miss", nil)
)

// ContractRef is a reference to the contract's backing object
type ContractRef interface {
Expand Down Expand Up @@ -117,12 +123,14 @@ func (c *Contract) isCode(udest uint64) bool {
analysis, exist := c.jumpdests[c.CodeHash]
if !exist {
if cached, ok := codeBitmapCache.Get(c.CodeHash); ok {
contractCodeBitmapHitMeter.Mark(1)
analysis = cached.(bitvec)
} else {
// Do the analysis and save in parent context
// We do not need to store it in c.analysis
analysis = codeBitmap(c.Code)
c.jumpdests[c.CodeHash] = analysis
contractCodeBitmapMissMeter.Mark(1)
codeBitmapCache.Add(c.CodeHash, analysis)
}
}
Expand Down
5 changes: 1 addition & 4 deletions docker/Dockerfile.truffle
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@ RUN git clone https://github.com/binance-chain/canonical-upgradeable-bep20.git /
WORKDIR /usr/app/canonical-upgradeable-bep20
COPY docker/truffle-config.js /usr/app/canonical-upgradeable-bep20

RUN npm install -g n
RUN n 12.18.3 && node -v

RUN npm install -g truffle@v5.1.14
RUN npm install -g --unsafe-perm truffle@v5.1.14
RUN npm install

ENTRYPOINT [ "/bin/bash" ]
1 change: 1 addition & 0 deletions eth/catalyst/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block,
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(0),
BrunoBlock: big.NewInt(0),

Ethash: new(params.EthashConfig),
}
Expand Down
4 changes: 2 additions & 2 deletions eth/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -1028,8 +1028,8 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode,
}
header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
if header == nil {
p.log.Error("header not found", "number", header.Number, "hash", header.Hash(), "request", check)
return 0, fmt.Errorf("%w: header no found (%d)", errBadPeer, header.Number)
p.log.Error("header not found", "hash", h, "request", check)
return 0, fmt.Errorf("%w: header no found (%s)", errBadPeer, h)
}
if header.Number.Uint64() != check {
p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
Expand Down
34 changes: 29 additions & 5 deletions params/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ var (

BSCGenesisHash = common.HexToHash("0x0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b")
ChapelGenesisHash = common.HexToHash("0x6d3c66c5357ec91d5c43af47e234a939b22557cbb552dc45bebbceeed90fbe34")
RialtoGenesisHash = common.HexToHash("0x005dc005bddd1967de6187c1c23be801eb7abdd80cebcc24f341b727b70311d6")
RialtoGenesisHash = common.HexToHash("0xaabe549bfa85c84f7aee9da7010b97453ad686f2c2d8ce00503d1a00c72cad54")
YoloV3GenesisHash = common.HexToHash("0xf1f2876e8500c77afcc03228757b39477eceffccf645b734967fe3c7e16967b7")
)

Expand Down Expand Up @@ -75,6 +75,7 @@ var (
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(0),
BrunoBlock: big.NewInt(0),
BerlinBlock: big.NewInt(12_244_000),
Ethash: new(EthashConfig),
}
Expand Down Expand Up @@ -118,6 +119,7 @@ var (
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(0),
BrunoBlock: big.NewInt(0),
BerlinBlock: big.NewInt(9_812_189),
Ethash: new(EthashConfig),
}
Expand Down Expand Up @@ -161,6 +163,7 @@ var (
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(0),
BrunoBlock: big.NewInt(0),
BerlinBlock: big.NewInt(8_290_928),
Clique: &CliqueConfig{
Period: 15,
Expand Down Expand Up @@ -203,6 +206,7 @@ var (
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(0),
BrunoBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(1_561_651),
MuirGlacierBlock: nil,
BerlinBlock: big.NewInt(4_460_644),
Expand Down Expand Up @@ -246,6 +250,7 @@ var (
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(5184000),
BrunoBlock: nil,
Parlia: &ParliaConfig{
Period: 3,
Epoch: 200,
Expand All @@ -266,6 +271,7 @@ var (
RamanujanBlock: big.NewInt(1010000),
NielsBlock: big.NewInt(1014369),
MirrorSyncBlock: big.NewInt(5582500),
BrunoBlock: big.NewInt(13837000),
Parlia: &ParliaConfig{
Period: 3,
Epoch: 200,
Expand All @@ -286,6 +292,7 @@ var (
RamanujanBlock: big.NewInt(400),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(400),
BrunoBlock: big.NewInt(400),
Parlia: &ParliaConfig{
Period: 3,
Epoch: 200,
Expand All @@ -308,6 +315,7 @@ var (
RamanujanBlock: big.NewInt(0),
NielsBlock: big.NewInt(0),
MirrorSyncBlock: big.NewInt(0),
BrunoBlock: big.NewInt(0),
MuirGlacierBlock: nil,
BerlinBlock: nil, // Don't enable Berlin directly, we're YOLOing it
YoloV3Block: big.NewInt(0),
Expand All @@ -322,16 +330,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil}
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil}

// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil}
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil}

TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil}

TestRules = TestChainConfig.Rules(new(big.Int))
)
Expand Down Expand Up @@ -418,6 +426,7 @@ type ChainConfig struct {
RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated)
MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated)
BrunoBlock *big.Int `json:"brunoBlock,omitempty" toml:",omitempty"` // brunoBlock switch block (nil = no fork, 0 = already activated)

// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty" toml:",omitempty"`
Expand Down Expand Up @@ -468,7 +477,7 @@ func (c *ChainConfig) String() string {
default:
engine = "unknown"
}
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Berlin: %v, YOLO v3: %v, Engine: %v}",
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Berlin: %v, YOLO v3: %v, Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
Expand All @@ -484,6 +493,7 @@ func (c *ChainConfig) String() string {
c.RamanujanBlock,
c.NielsBlock,
c.MirrorSyncBlock,
c.BrunoBlock,
c.BerlinBlock,
c.YoloV3Block,
engine,
Expand Down Expand Up @@ -555,6 +565,16 @@ func (c *ChainConfig) IsOnMirrorSync(num *big.Int) bool {
return configNumEqual(c.MirrorSyncBlock, num)
}

// IsBruno returns whether num is either equal to the Burn fork block or greater.
func (c *ChainConfig) IsBruno(num *big.Int) bool {
return isForked(c.BrunoBlock, num)
}

// IsOnBruno returns whether num is equal to the Burn fork block
func (c *ChainConfig) IsOnBruno(num *big.Int) bool {
return configNumEqual(c.BrunoBlock, num)
}

// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
return isForked(c.MuirGlacierBlock, num)
Expand Down Expand Up @@ -616,6 +636,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
var lastFork fork
for _, cur := range []fork{
{name: "mirrorSyncBlock", block: c.MirrorSyncBlock},
{name: "brunoBlock", block: c.BrunoBlock},
{name: "berlinBlock", block: c.BerlinBlock},
} {
if lastFork.name != "" {
Expand Down Expand Up @@ -695,6 +716,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.MirrorSyncBlock, newcfg.MirrorSyncBlock, head) {
return newCompatError("mirrorSync fork block", c.MirrorSyncBlock, newcfg.MirrorSyncBlock)
}
if isForkIncompatible(c.BrunoBlock, newcfg.BrunoBlock, head) {
return newCompatError("bruno fork block", c.BrunoBlock, newcfg.BrunoBlock)
}
return nil
}

Expand Down
Loading