diff --git a/build/ci.go b/build/ci.go
index d9f147ef0e..18172c327a 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -300,7 +300,10 @@ func doTest(cmdline []string) {
gotest.Args = append(gotest.Args, "-v")
}
- packages := []string{"./..."}
+ packages := []string{"./accounts/...", "./common/...", "./consensus/...", "./console/...", "./core/...",
+ "./crypto/...", "./eth/...", "./ethclient/...", "./ethdb/...", "./event/...", "./graphql/...", "./les/...",
+ "./light/...", "./log/...", "./metrics/...", "./miner/...", "./mobile/...", "./node/...",
+ "./p2p/...", "./params/...", "./rlp/...", "./rpc/...", "./tests/...", "./trie/..."}
if len(flag.CommandLine.Args()) > 0 {
packages = flag.CommandLine.Args()
}
diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go
deleted file mode 100644
index 6e3217151a..0000000000
--- a/cmd/devp2p/internal/ethtest/suite_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package ethtest
-
-import (
- "os"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/internal/utesting"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
-)
-
-var (
- genesisFile = "./testdata/genesis.json"
- halfchainFile = "./testdata/halfchain.rlp"
- fullchainFile = "./testdata/chain.rlp"
-)
-
-func TestEthSuite(t *testing.T) {
- geth, err := runGeth()
- if err != nil {
- t.Fatalf("could not run geth: %v", err)
- }
- defer geth.Close()
-
- suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
- if err != nil {
- t.Fatalf("could not create new test suite: %v", err)
- }
- for _, test := range suite.AllEthTests() {
- t.Run(test.Name, func(t *testing.T) {
- result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
- if result[0].Failed {
- t.Fatal()
- }
- })
- }
-}
-
-// runGeth creates and starts a geth node
-func runGeth() (*node.Node, error) {
- stack, err := node.New(&node.Config{
- P2P: p2p.Config{
- ListenAddr: "127.0.0.1:0",
- NoDiscovery: true,
- MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future
- NoDial: true,
- },
- })
- if err != nil {
- return nil, err
- }
-
- err = setupGeth(stack)
- if err != nil {
- stack.Close()
- return nil, err
- }
- if err = stack.Start(); err != nil {
- stack.Close()
- return nil, err
- }
- return stack, nil
-}
-
-func setupGeth(stack *node.Node) error {
- chain, err := loadChain(halfchainFile, genesisFile)
- if err != nil {
- return err
- }
-
- backend, err := eth.New(stack, ðconfig.Config{
- Genesis: &chain.genesis,
- NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763
- DatabaseCache: 10,
- TrieCleanCache: 10,
- TrieCleanCacheJournal: "",
- TrieCleanCacheRejournal: 60 * time.Minute,
- TrieDirtyCache: 16,
- TrieTimeout: 60 * time.Minute,
- SnapshotCache: 10,
- })
- if err != nil {
- return err
- }
-
- _, err = backend.BlockChain().InsertChain(chain.blocks[1:])
- return err
-}
diff --git a/cmd/faucet/faucet_test.go b/cmd/faucet/faucet_test.go
deleted file mode 100644
index 4f3e47084e..0000000000
--- a/cmd/faucet/faucet_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// go-ethereum is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// go-ethereum is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
-
-package main
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-func TestFacebook(t *testing.T) {
- for _, tt := range []struct {
- url string
- want common.Address
- }{
- {
- "https://www.facebook.com/fooz.gazonk/posts/2837228539847129",
- common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"),
- },
- } {
- _, _, gotAddress, err := authFacebook(tt.url)
- if err != nil {
- t.Fatal(err)
- }
- if gotAddress != tt.want {
- t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want)
- }
- }
-}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index ed54827c97..d383d99b7e 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -105,6 +105,7 @@ var (
utils.LightNoSyncServeFlag,
utils.WhitelistFlag,
utils.BloomFilterSizeFlag,
+ utils.TriesInMemoryFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
@@ -112,7 +113,6 @@ var (
utils.CacheTrieRejournalFlag,
utils.CacheGCFlag,
utils.CacheSnapshotFlag,
- utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go
index 1af458af20..2f615a2c18 100644
--- a/cmd/geth/snapshot.go
+++ b/cmd/geth/snapshot.go
@@ -63,6 +63,7 @@ var (
utils.GoerliFlag,
utils.CacheTrieJournalFlag,
utils.BloomFilterSizeFlag,
+ utils.TriesInMemoryFlag,
},
Description: `
geth snapshot prune-state
@@ -153,7 +154,7 @@ func pruneState(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, false)
- pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name))
+ pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name), ctx.GlobalUint64(utils.TriesInMemoryFlag.Name))
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
@@ -187,7 +188,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
- snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
+ snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, 128, headBlock.Root(), false, false, false)
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 1cc6693ec0..1450c29e84 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -56,6 +56,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.IdentityFlag,
utils.LightKDFFlag,
utils.WhitelistFlag,
+ utils.TriesInMemoryFlag,
},
},
{
@@ -118,7 +119,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.CacheTrieRejournalFlag,
utils.CacheGCFlag,
utils.CacheSnapshotFlag,
- utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
},
},
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index f02f326bcf..8197b8ceab 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -241,6 +241,11 @@ var (
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
Value: 2048,
}
+ TriesInMemoryFlag = cli.Uint64Flag{
+ Name: "triesInMemory",
+ Usage: "The layer of tries trees that keep in memory",
+ Value: 128,
+ }
OverrideBerlinFlag = cli.Uint64Flag{
Name: "override.berlin",
Usage: "Manually specify Berlin fork-block, overriding the bundled setting",
@@ -389,7 +394,7 @@ var (
CacheDatabaseFlag = cli.IntFlag{
Name: "cache.database",
Usage: "Percentage of cache memory allowance to use for database io",
- Value: 50,
+ Value: 40,
}
CacheTrieFlag = cli.IntFlag{
Name: "cache.trie",
@@ -413,12 +418,8 @@ var (
}
CacheSnapshotFlag = cli.IntFlag{
Name: "cache.snapshot",
- Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)",
- Value: 10,
- }
- CacheNoPrefetchFlag = cli.BoolFlag{
- Name: "cache.noprefetch",
- Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)",
+ Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 20%)",
+ Value: 20,
}
CachePreimagesFlag = cli.BoolFlag{
Name: "cache.preimages",
@@ -1576,9 +1577,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(RangeLimitFlag.Name) {
cfg.RangeLimit = ctx.GlobalBool(RangeLimitFlag.Name)
}
- if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) {
- cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name)
- }
// Read the value from the flag no matter if it's set or not.
cfg.Preimages = ctx.GlobalBool(CachePreimagesFlag.Name)
if cfg.NoPruning && !cfg.Preimages {
@@ -1905,13 +1903,13 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
cache := &core.CacheConfig{
- TrieCleanLimit: ethconfig.Defaults.TrieCleanCache,
- TrieCleanNoPrefetch: ctx.GlobalBool(CacheNoPrefetchFlag.Name),
- TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache,
- TrieDirtyDisabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
- TrieTimeLimit: ethconfig.Defaults.TrieTimeout,
- SnapshotLimit: ethconfig.Defaults.SnapshotCache,
- Preimages: ctx.GlobalBool(CachePreimagesFlag.Name),
+ TrieCleanLimit: ethconfig.Defaults.TrieCleanCache,
+ TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache,
+ TrieDirtyDisabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
+ TrieTimeLimit: ethconfig.Defaults.TrieTimeout,
+ TriesInMemory: ethconfig.Defaults.TriesInMemory,
+ SnapshotLimit: ethconfig.Defaults.SnapshotCache,
+ Preimages: ctx.GlobalBool(CachePreimagesFlag.Name),
}
if cache.TrieDirtyDisabled && !cache.Preimages {
cache.Preimages = true
diff --git a/common/gopool/pool.go b/common/gopool/pool.go
new file mode 100644
index 0000000000..b4fc1c459d
--- /dev/null
+++ b/common/gopool/pool.go
@@ -0,0 +1,48 @@
+package gopool
+
+import (
+ "time"
+
+ "github.com/panjf2000/ants/v2"
+)
+
+var (
+ // Init a instance pool when importing ants.
+ defaultPool, _ = ants.NewPool(ants.DefaultAntsPoolSize, ants.WithExpiryDuration(10*time.Second))
+)
+
+// Logger is used for logging formatted messages.
+type Logger interface {
+ // Printf must have the same semantics as log.Printf.
+ Printf(format string, args ...interface{})
+}
+
+// Submit submits a task to pool.
+func Submit(task func()) error {
+ return defaultPool.Submit(task)
+}
+
+// Running returns the number of the currently running goroutines.
+func Running() int {
+ return defaultPool.Running()
+}
+
+// Cap returns the capacity of this default pool.
+func Cap() int {
+ return defaultPool.Cap()
+}
+
+// Free returns the available goroutines to work.
+func Free() int {
+ return defaultPool.Free()
+}
+
+// Release Closes the default pool.
+func Release() {
+ defaultPool.Release()
+}
+
+// Reboot reboots the default pool.
+func Reboot() {
+ defaultPool.Reboot()
+}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 2355950424..dfec81f6ad 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -224,7 +225,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ
abort := make(chan struct{})
results := make(chan error, len(headers))
- go func() {
+ gopool.Submit(func() {
for i, header := range headers {
err := c.verifyHeader(chain, header, headers[:i])
@@ -234,7 +235,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ
case results <- err:
}
}
- }()
+ })
return abort, results
}
@@ -635,7 +636,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
// Wait until sealing is terminated or delay timeout.
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
- go func() {
+ gopool.Submit(func() {
select {
case <-stop:
return
@@ -647,7 +648,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
default:
log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header))
}
- }()
+ })
return nil
}
diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go
index 065e60b90b..f38397d5c1 100644
--- a/consensus/ethash/algorithm.go
+++ b/consensus/ethash/algorithm.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"golang.org/x/crypto/sha3"
@@ -168,7 +169,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
done := make(chan struct{})
defer close(done)
- go func() {
+ gopool.Submit(func() {
for {
select {
case <-done:
@@ -177,7 +178,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
}
}
- }()
+ })
// Create a hasher to reuse between invocations
keccak512 := makeHasher(sha3.NewLegacyKeccak512())
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index f8a84c3d00..c32e0ec3cf 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -26,6 +26,7 @@ import (
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -133,16 +134,16 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
unixNow = time.Now().Unix()
)
for i := 0; i < workers; i++ {
- go func() {
+ gopool.Submit(func() {
for index := range inputs {
errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index, unixNow)
done <- index
}
- }()
+ })
}
errorsOut := make(chan error, len(headers))
- go func() {
+ gopool.Submit(func() {
defer close(inputs)
var (
in, out = 0, 0
@@ -167,7 +168,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
return
}
}
- }()
+ })
return abort, errorsOut
}
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index ec06d02a54..9944daa8f5 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -34,6 +34,7 @@ import (
"unsafe"
"github.com/edsrzf/mmap-go"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -590,14 +591,14 @@ func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
// If async is specified, generate everything in a background thread
if async && !current.generated() {
- go func() {
+ gopool.Submit(func() {
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
if futureI != nil {
future := futureI.(*dataset)
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
}
- }()
+ })
} else {
// Either blocking generation was requested, or already done
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index 1830e672b1..d8954e0f1b 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -31,6 +31,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
@@ -100,7 +101,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block
}(i, uint64(ethash.rand.Int63()))
}
// Wait until sealing is terminated or a nonce is found
- go func() {
+ gopool.Submit(func() {
var result *types.Block
select {
case <-stop:
@@ -123,7 +124,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block
}
// Wait for all miners to terminate and return the block
pend.Wait()
- }()
+ })
return nil
}
diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go
index a8976fd4ad..62d109848f 100644
--- a/consensus/parlia/parlia.go
+++ b/consensus/parlia/parlia.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -301,7 +302,7 @@ func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ
abort := make(chan struct{})
results := make(chan error, len(headers))
- go func() {
+ gopool.Submit(func() {
for i, header := range headers {
err := p.verifyHeader(chain, header, headers[:i])
@@ -311,7 +312,7 @@ func (p *Parlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*typ
case results <- err:
}
}
- }()
+ })
return abort, results
}
@@ -651,7 +652,7 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
- panic(err)
+ return err
}
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number)
if !snap.isMajorityFork(hex.EncodeToString(nextForkHash[:])) {
@@ -705,13 +706,11 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
val := header.Coinbase
err = p.distributeIncoming(val, state, header, cx, txs, receipts, systemTxs, usedGas, false)
if err != nil {
- panic(err)
+ return err
}
if len(*systemTxs) > 0 {
return errors.New("the length of systemTxs do not match")
}
- header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
- header.UncleHash = types.CalcUncleHash(nil)
return nil
}
@@ -737,7 +736,7 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
- panic(err)
+ return nil, nil, err
}
spoiledVal := snap.supposeValidator()
signedRecently := false
@@ -757,17 +756,29 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
}
err := p.distributeIncoming(p.val, state, header, cx, &txs, &receipts, nil, &header.GasUsed, true)
if err != nil {
- panic(err)
+ return nil, nil, err
}
// should not happen. Once happen, stop the node is better than broadcast the block
if header.GasLimit < header.GasUsed {
- panic("Gas consumption of system txs exceed the gas limit")
+ return nil, nil, errors.New("gas consumption of system txs exceed the gas limit")
}
- header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
header.UncleHash = types.CalcUncleHash(nil)
-
+ var blk *types.Block
+ var rootHash common.Hash
+ wg := sync.WaitGroup{}
+ wg.Add(2)
+ go func() {
+ rootHash = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+ wg.Done()
+ }()
+ go func() {
+ blk = types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
+ wg.Done()
+ }()
+ wg.Wait()
+ blk.SetRoot(rootHash)
// Assemble and return the final block for sealing
- return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)), receipts, nil
+ return blk, receipts, nil
}
// Authorize injects a private key into the consensus engine to mint new blocks
@@ -1106,7 +1117,14 @@ func (p *Parlia) applyTransaction(
}
actualTx := (*receivedTxs)[0]
if !bytes.Equal(p.signer.Hash(actualTx).Bytes(), expectedHash.Bytes()) {
- return fmt.Errorf("expected tx hash %v, get %v", expectedHash.String(), actualTx.Hash().String())
+ return fmt.Errorf("expected tx hash %v, get %v, nonce %d, to %s, value %s, gas %d, gasPrice %s, data %s", expectedHash.String(), actualTx.Hash().String(),
+ expectedTx.Nonce(),
+ expectedTx.To().String(),
+ expectedTx.Value().String(),
+ expectedTx.Gas(),
+ expectedTx.GasPrice().String(),
+ hex.EncodeToString(expectedTx.Data()),
+ )
}
expectedTx = actualTx
// move to next
diff --git a/core/asm/lexer.go b/core/asm/lexer.go
index 9eb8f914ac..efea204d2d 100644
--- a/core/asm/lexer.go
+++ b/core/asm/lexer.go
@@ -22,6 +22,8 @@ import (
"strings"
"unicode"
"unicode/utf8"
+
+ "github.com/ethereum/go-ethereum/common/gopool"
)
// stateFn is used through the lifetime of the
@@ -103,14 +105,14 @@ func Lex(source []byte, debug bool) <-chan token {
state: lexLine,
debug: debug,
}
- go func() {
+ gopool.Submit(func() {
l.emit(lineStart)
for l.state != nil {
l.state = l.state(l)
}
l.emit(eof)
close(l.tokens)
- }()
+ })
return ch
}
diff --git a/core/block_validator.go b/core/block_validator.go
index 6f349b0d04..92be755199 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -64,14 +64,42 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
}
- if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
- return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
+
+ validateFuns := []func() error{
+ func() error {
+ if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
+ return ErrKnownBlock
+ }
+ return nil
+ },
+ func() error {
+ if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
+ return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
+ }
+ return nil
+ },
+ func() error {
+ if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
+ if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
+ return consensus.ErrUnknownAncestor
+ }
+ return consensus.ErrPrunedAncestor
+ }
+ return nil
+ },
+ }
+ validateRes := make(chan error, len(validateFuns))
+ for _, f := range validateFuns {
+ tmpFunc := f
+ go func() {
+ validateRes <- tmpFunc()
+ }()
}
- if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
- if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
- return consensus.ErrUnknownAncestor
+ for i := 0; i < len(validateFuns); i++ {
+ r := <-validateRes
+ if r != nil {
+ return r
}
- return consensus.ErrPrunedAncestor
}
return nil
}
@@ -87,20 +115,43 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
- rbloom := types.CreateBloom(receipts)
- if rbloom != header.Bloom {
- return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
+ validateFuns := []func() error{
+ func() error {
+ rbloom := types.CreateBloom(receipts)
+ if rbloom != header.Bloom {
+ return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
+ }
+ return nil
+ },
+ func() error {
+ receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
+ if receiptSha != header.ReceiptHash {
+ return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
+ } else {
+ return nil
+ }
+ },
+ func() error {
+ if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
+ statedb.IterativeDump(true, true, true, json.NewEncoder(os.Stdout))
+ return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
+ } else {
+ return nil
+ }
+ },
}
- // Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]]))
- receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
- if receiptSha != header.ReceiptHash {
- return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
+ validateRes := make(chan error, len(validateFuns))
+ for _, f := range validateFuns {
+ tmpFunc := f
+ go func() {
+ validateRes <- tmpFunc()
+ }()
}
- // Validate the state root against the received state root and throw
- // an error if they don't match.
- if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
- statedb.IterativeDump(true, true, true, json.NewEncoder(os.Stdout))
- return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
+ for i := 0; i < len(validateFuns); i++ {
+ r := <-validateRes
+ if r != nil {
+ return r
+ }
}
return nil
}
diff --git a/core/blockchain.go b/core/blockchain.go
index f8fffc091b..3cc44fc4e5 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -76,9 +76,6 @@ var (
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
blockReorgInvalidatedTx = metrics.NewRegisteredMeter("chain/reorg/invalidTx", nil)
- blockPrefetchExecuteTimer = metrics.NewRegisteredTimer("chain/prefetch/executes", nil)
- blockPrefetchInterruptMeter = metrics.NewRegisteredMeter("chain/prefetch/interrupts", nil)
-
errInsertionInterrupted = errors.New("insertion is interrupted")
)
@@ -90,7 +87,7 @@ const (
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
badBlockLimit = 10
- TriesInMemory = 128
+ maxBeyondBlocks = 2048
// BlockChainVersion ensures that an incompatible database forces a resync from scratch.
//
@@ -121,15 +118,15 @@ const (
// CacheConfig contains the configuration values for the trie caching/pruning
// that's resident in a blockchain.
type CacheConfig struct {
- TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
- TrieCleanJournal string // Disk journal for saving clean cache entries.
- TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically
- TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
- TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
- TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
- TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
- SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
- Preimages bool // Whether to store preimage of trie key to the disk
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieCleanJournal string // Disk journal for saving clean cache entries.
+ TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically
+ TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
+ TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+ SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
+ Preimages bool // Whether to store preimage of trie key to the disk
+ TriesInMemory uint64 // How many tries keeps in memory
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}
@@ -141,6 +138,7 @@ var defaultCacheConfig = &CacheConfig{
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
+ TriesInMemory: 128,
SnapshotWait: true,
}
@@ -173,6 +171,7 @@ type BlockChain struct {
// * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes
// * nil: disable tx reindexer/deleter, but still index new blocks
txLookupLimit uint64
+ triesInMemory uint64
hc *HeaderChain
rmLogsFeed event.Feed
@@ -202,11 +201,10 @@ type BlockChain struct {
running int32 // 0 if chain is running, 1 when stopped
procInterrupt int32 // interrupt signaler for block processing
- engine consensus.Engine
- validator Validator // Block and state validator interface
- prefetcher Prefetcher
- processor Processor // Block transaction processor interface
- vmConfig vm.Config
+ engine consensus.Engine
+ validator Validator // Block and state validator interface
+ processor Processor // Block transaction processor interface
+ vmConfig vm.Config
shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
@@ -231,11 +229,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
- stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
+ stateCache: state.NewDatabaseWithConfigAndCache(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
}),
+ triesInMemory: cacheConfig.TriesInMemory,
quit: make(chan struct{}),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
@@ -248,7 +247,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
vmConfig: vmConfig,
}
bc.validator = NewBlockValidator(chainConfig, bc, engine)
- bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
bc.processor = NewStateProcessor(chainConfig, bc, engine)
var err error
@@ -372,7 +370,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
recover = true
}
- bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
+ bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, int(bc.cacheConfig.TriesInMemory), head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
}
// Take ownership of this particular state
go bc.update()
@@ -407,6 +405,10 @@ func (bc *BlockChain) CacheReceipts(hash common.Hash, receipts types.Receipts) {
bc.receiptsCache.Add(hash, receipts)
}
+func (bc *BlockChain) CacheBlock(hash common.Hash, block *types.Block) {
+ bc.blockCache.Add(hash, block)
+}
+
// empty returns an indicator whether the blockchain is empty.
// Note, it's a special case that we connect a non-empty ancient
// database with an empty node, so that we can plugin the ancient
@@ -511,6 +513,7 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
// chain reparation mechanism without deleting any data!
if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
+ lastBlockNum := header.Number.Uint64()
if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
@@ -519,12 +522,17 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
// keeping rewinding until we exceed the optional threshold
// root hash
beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)
-
+ enoughBeyondCount := false
+ beyondCount := 0
for {
+ beyondCount++
// If a root threshold was requested but not yet crossed, check
if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
}
+
+ enoughBeyondCount = beyondCount > maxBeyondBlocks
+
if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
if pivot == nil || newHeadBlock.NumberU64() > *pivot {
@@ -540,7 +548,20 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
newHeadBlock = bc.genesisBlock
}
}
- if beyondRoot || newHeadBlock.NumberU64() == 0 {
+ if beyondRoot || (enoughBeyondCount && root != common.Hash{}) || newHeadBlock.NumberU64() == 0 {
+ if enoughBeyondCount && (root != common.Hash{}) && rootNumber == 0 {
+ for {
+ lastBlockNum++
+ block := bc.GetBlockByNumber(lastBlockNum)
+ if block == nil {
+ break
+ }
+ if block.Root() == root {
+ rootNumber = block.NumberU64()
+ break
+ }
+ }
+ }
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break
}
@@ -1019,7 +1040,7 @@ func (bc *BlockChain) Stop() {
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.stateCache.TrieDB()
- for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
+ for _, offset := range []uint64{0, 1, bc.triesInMemory - 1} {
if number := bc.CurrentBlock().NumberU64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
@@ -1036,7 +1057,7 @@ func (bc *BlockChain) Stop() {
}
}
for !bc.triegc.Empty() {
- triedb.Dereference(bc.triegc.PopItem().(common.Hash))
+ go triedb.Dereference(bc.triegc.PopItem().(common.Hash))
}
if size, _ := triedb.Size(); size != 0 {
log.Error("Dangling trie nodes after full cleanup")
@@ -1468,14 +1489,19 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
//
// Note all the components of block(td, hash->number map, header, body, receipts)
// should be written atomically. BlockBatch is used for containing all components.
- blockBatch := bc.db.NewBatch()
- rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
- rawdb.WriteBlock(blockBatch, block)
- rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
- rawdb.WritePreimages(blockBatch, state.Preimages())
- if err := blockBatch.Write(); err != nil {
- log.Crit("Failed to write block into disk", "err", err)
- }
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ blockBatch := bc.db.NewBatch()
+ rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
+ rawdb.WriteBlock(blockBatch, block)
+ rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
+ rawdb.WritePreimages(blockBatch, state.Preimages())
+ if err := blockBatch.Write(); err != nil {
+ log.Crit("Failed to write block into disk", "err", err)
+ }
+ wg.Done()
+ }()
// Commit all cached state changes into underlying memory database.
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
@@ -1493,7 +1519,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
bc.triegc.Push(root, -int64(block.NumberU64()))
- if current := block.NumberU64(); current > TriesInMemory {
+ if current := block.NumberU64(); current > bc.triesInMemory {
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
nodes, imgs = triedb.Size()
@@ -1503,7 +1529,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
- chosen := current - TriesInMemory
+ chosen := current - bc.triesInMemory
// If we exceeded out time allowance, flush an entire trie to disk
if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
@@ -1522,8 +1548,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
} else {
// If we're exceeding limits but haven't reached a large enough memory gap,
// warn the user that the system is becoming unstable.
- if chosen < lastWrite+TriesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
- log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory)
+ if chosen < lastWrite+bc.triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
+ log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/float64(bc.triesInMemory))
}
// Flush an entire trie and restart the counters
triedb.Commit(header.Root, true, nil)
@@ -1539,10 +1565,11 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
bc.triegc.Push(root, number)
break
}
- triedb.Dereference(root.(common.Hash))
+ go triedb.Dereference(root.(common.Hash))
}
}
}
+ wg.Wait()
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@@ -1681,7 +1708,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
return 0, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
- senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
+ signer := types.MakeSigner(bc.chainConfig, chain[0].Number())
+ go senderCacher.recoverFromBlocks(signer, chain)
var (
stats = insertStats{startTime: mclock.Now()}
@@ -1853,33 +1881,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Enable prefetching to pull in trie node paths while processing transactions
statedb.StartPrefetcher("chain")
activeState = statedb
+ statedb.TryPreload(block, signer)
- // If we have a followup block, run that against the current state to pre-cache
- // transactions and probabilistically some of the account/storage trie nodes.
- var followupInterrupt uint32
- if !bc.cacheConfig.TrieCleanNoPrefetch {
- if followup, err := it.peek(); followup != nil && err == nil {
- throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps)
-
- go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) {
- bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt)
-
- blockPrefetchExecuteTimer.Update(time.Since(start))
- if atomic.LoadUint32(interrupt) == 1 {
- blockPrefetchInterruptMeter.Mark(1)
- }
- }(time.Now(), followup, throwaway, &followupInterrupt)
- }
- }
- // Process block using the parent state as reference point
+ //Process block using the parent state as reference point
substart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
if err != nil {
bc.reportBlock(block, receipts, err)
- atomic.StoreUint32(&followupInterrupt, 1)
return it.index, err
}
bc.CacheReceipts(block.Hash(), receipts)
+ bc.CacheBlock(block.Hash(), block)
// Update the metrics touched during block processing
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete, we can mark them
@@ -1887,17 +1899,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
- triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates
- blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash)
+ blockExecutionTimer.Update(time.Since(substart))
// Validate the state using the default validator
substart = time.Now()
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
- atomic.StoreUint32(&followupInterrupt, 1)
+ log.Error("validate state failed", "error", err)
return it.index, err
}
proctime := time.Since(start)
@@ -1906,12 +1917,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them
- blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash))
+ blockValidationTimer.Update(time.Since(substart))
// Write the block to the chain and get the status.
substart = time.Now()
status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
- atomic.StoreUint32(&followupInterrupt, 1)
if err != nil {
return it.index, err
}
@@ -1920,7 +1930,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
- blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
+ blockWriteTimer.Update(time.Since(substart))
blockInsertTimer.UpdateSince(start)
switch status {
@@ -2488,6 +2498,8 @@ func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLook
return lookup
}
+func (bc *BlockChain) TriesInMemory() uint64 { return bc.triesInMemory }
+
// Config retrieves the chain's fork configuration.
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index 8bb39d2607..5ddb6d2e07 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1783,6 +1783,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
+ config.TriesInMemory = 128
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
@@ -1812,6 +1813,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
}
}
+
if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index e99b09cf8c..6caec36eab 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -1982,6 +1982,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
+ config.TriesInMemory = 128
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
@@ -2021,6 +2022,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
for _, block := range canonblocks {
chain.stateCache.TrieDB().Dereference(block.Root())
}
+ chain.stateCache.Purge()
// Force run a freeze cycle
type freezer interface {
Freeze(threshold uint64) error
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index 75c09b421d..b61eb741f0 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -298,6 +298,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
+ TriesInMemory: 128,
SnapshotLimit: 0,
}
newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
@@ -418,6 +419,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
+ TriesInMemory: 128,
}
newchain, err := NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
@@ -434,6 +436,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: false, // Don't wait rebuild
+ TriesInMemory: 128,
}
newchain, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 5004abd1c7..9395a379f5 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -44,6 +44,8 @@ import (
var (
canonicalSeed = 1
forkSeed = 2
+
+ TestTriesInMemory = 128
)
// newCanonical creates a chain database, and injects a deterministic canonical
@@ -1481,7 +1483,7 @@ func TestTrieForkGC(t *testing.T) {
db := rawdb.NewMemoryDatabase()
genesis := new(Genesis).MustCommit(db)
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TestTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
// Generate a bunch of fork blocks, each side forking from the canonical chain
forks := make([]*types.Block, len(blocks))
@@ -1510,7 +1512,7 @@ func TestTrieForkGC(t *testing.T) {
}
}
// Dereference all the recent tries and ensure no past trie is left in
- for i := 0; i < TriesInMemory; i++ {
+ for i := 0; i < TestTriesInMemory; i++ {
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
}
@@ -1529,8 +1531,8 @@ func TestLargeReorgTrieGC(t *testing.T) {
genesis := new(Genesis).MustCommit(db)
shared, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
- original, _ := GenerateChain(params.TestChainConfig, shared[len(shared)-1], engine, db, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
- competitor, _ := GenerateChain(params.TestChainConfig, shared[len(shared)-1], engine, db, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
+ original, _ := GenerateChain(params.TestChainConfig, shared[len(shared)-1], engine, db, 2*TestTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
+ competitor, _ := GenerateChain(params.TestChainConfig, shared[len(shared)-1], engine, db, 2*TestTriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
// Import the shared chain and the original canonical one
diskdb := rawdb.NewMemoryDatabase()
@@ -1565,7 +1567,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
t.Fatalf("failed to finalize competitor chain: %v", err)
}
- for i, block := range competitor[:len(competitor)-TriesInMemory] {
+ for i, block := range competitor[:len(competitor)-TestTriesInMemory] {
if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
t.Fatalf("competitor %d: competing chain state missing", i)
}
@@ -1702,7 +1704,7 @@ func TestLowDiffLongChain(t *testing.T) {
// We must use a pretty long chain to ensure that the fork doesn't overtake us
// until after at least 128 blocks post tip
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 6*TriesInMemory, func(i int, b *BlockGen) {
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 6*TestTriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
b.OffsetTime(-9)
})
@@ -1720,7 +1722,7 @@ func TestLowDiffLongChain(t *testing.T) {
}
// Generate fork chain, starting from an early block
parent := blocks[10]
- fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 8*TriesInMemory, func(i int, b *BlockGen) {
+ fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 8*TestTriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{2})
})
@@ -1755,7 +1757,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
genesis := new(Genesis).MustCommit(db)
// Generate and import the canonical chain
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TestTriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
@@ -1766,9 +1768,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
- lastPrunedIndex := len(blocks) - TriesInMemory - 1
+ lastPrunedIndex := len(blocks) - TestTriesInMemory - 1
lastPrunedBlock := blocks[lastPrunedIndex]
- firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
+ firstNonPrunedBlock := blocks[len(blocks)-TestTriesInMemory]
// Verify pruning of lastPrunedBlock
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
@@ -1785,7 +1787,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// Generate fork chain, make it longer than canon
parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
parent := blocks[parentIndex]
- fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 2*TriesInMemory, func(i int, b *BlockGen) {
+ fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 2*TestTriesInMemory, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{2})
})
// Prepend the parent(s)
@@ -2406,7 +2408,7 @@ func TestSideImportPrunedBlocks(t *testing.T) {
genesis := new(Genesis).MustCommit(db)
// Generate and import the canonical chain
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil)
+ blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TestTriesInMemory, nil)
diskdb := rawdb.NewMemoryDatabase()
new(Genesis).MustCommit(diskdb)
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
@@ -2417,14 +2419,14 @@ func TestSideImportPrunedBlocks(t *testing.T) {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
- lastPrunedIndex := len(blocks) - TriesInMemory - 1
+ lastPrunedIndex := len(blocks) - TestTriesInMemory - 1
lastPrunedBlock := blocks[lastPrunedIndex]
// Verify pruning of lastPrunedBlock
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
}
- firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
+ firstNonPrunedBlock := blocks[len(blocks)-TestTriesInMemory]
// Verify firstNonPrunedBlock is not pruned
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
index 927232be01..4e9df8a368 100644
--- a/core/bloombits/matcher.go
+++ b/core/bloombits/matcher.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/bitutil"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/crypto"
)
@@ -164,7 +165,7 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
// Read the output from the result sink and deliver to the user
session.pend.Add(1)
- go func() {
+ gopool.Submit(func() {
defer session.pend.Done()
defer close(results)
@@ -210,7 +211,7 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
}
}
}
- }()
+ })
return session, nil
}
@@ -226,7 +227,7 @@ func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) ch
source := make(chan *partialMatches, buffer)
session.pend.Add(1)
- go func() {
+ gopool.Submit(func() {
defer session.pend.Done()
defer close(source)
@@ -237,7 +238,7 @@ func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) ch
case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
}
}
- }()
+ })
// Assemble the daisy-chained filtering pipeline
next := source
dist := make(chan *request, buffer)
@@ -247,7 +248,9 @@ func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) ch
}
// Start the request distribution
session.pend.Add(1)
- go m.distributor(dist, session)
+ gopool.Submit(func() {
+ m.distributor(dist, session)
+ })
return next
}
@@ -273,7 +276,7 @@ func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloo
results := make(chan *partialMatches, cap(source))
session.pend.Add(2)
- go func() {
+ gopool.Submit(func() {
// Tear down the goroutine and terminate all source channels
defer session.pend.Done()
defer close(process)
@@ -314,9 +317,9 @@ func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloo
}
}
}
- }()
+ })
- go func() {
+ gopool.Submit(func() {
// Tear down the goroutine and terminate the final sink channel
defer session.pend.Done()
defer close(results)
@@ -372,7 +375,7 @@ func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloo
}
}
}
- }()
+ })
return results
}
diff --git a/core/bloombits/scheduler.go b/core/bloombits/scheduler.go
index 6449c7465a..5fa6248110 100644
--- a/core/bloombits/scheduler.go
+++ b/core/bloombits/scheduler.go
@@ -18,6 +18,8 @@ package bloombits
import (
"sync"
+
+ "github.com/ethereum/go-ethereum/common/gopool"
)
// request represents a bloom retrieval task to prioritize and pull from the local
@@ -63,8 +65,12 @@ func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []by
// Start the pipeline schedulers to forward between user -> distributor -> user
wg.Add(2)
- go s.scheduleRequests(sections, dist, pend, quit, wg)
- go s.scheduleDeliveries(pend, done, quit, wg)
+ gopool.Submit(func() {
+ s.scheduleRequests(sections, dist, pend, quit, wg)
+ })
+ gopool.Submit(func() {
+ s.scheduleDeliveries(pend, done, quit, wg)
+ })
}
// reset cleans up any leftovers from previous runs. This is required before a
diff --git a/core/genesis.go b/core/genesis.go
index 75052a19b5..9303522947 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -180,7 +180,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
// We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0)
- if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil {
+ if _, err := state.New(header.Root, state.NewDatabaseWithConfigAndCache(db, nil), nil); err != nil {
if genesis == nil {
genesis = DefaultGenesisBlock()
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 44c1ef253a..e8efdbffb9 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -76,15 +76,6 @@ func TestSetupGenesis(t *testing.T) {
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
},
- {
- name: "mainnet block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- DefaultGenesisBlock().MustCommit(db)
- return SetupGenesisBlock(db, nil)
- },
- wantHash: params.MainnetGenesisHash,
- wantConfig: params.MainnetChainConfig,
- },
{
name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
index ad222005be..4dca06765e 100644
--- a/core/rawdb/chain_iterator.go
+++ b/core/rawdb/chain_iterator.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
@@ -159,7 +160,9 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool
}
go lookup() // start the sequential db accessor
for i := 0; i < int(threads); i++ {
- go process()
+ gopool.Submit(func() {
+ process()
+ })
}
return hashesCh
}
diff --git a/core/state/database.go b/core/state/database.go
index 1a06e33409..ce37e73837 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -19,6 +19,7 @@ package state
import (
"errors"
"fmt"
+ "time"
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
@@ -32,8 +33,18 @@ const (
// Number of codehash->size associations to keep.
codeSizeCacheSize = 100000
+ // Number of state trie in cache
+ accountTrieCacheSize = 32
+
+ // Number of storage Trie in cache
+ storageTrieCacheSize = 2000
+
// Cache size granted for caching clean code.
codeCacheSize = 64 * 1024 * 1024
+
+ purgeInterval = 600
+
+ maxAccountTrieSize = 1024 * 1024
)
// Database wraps access to tries and contract code.
@@ -55,6 +66,15 @@ type Database interface {
// TrieDB retrieves the low level trie database used for data storage.
TrieDB() *trie.Database
+
+ // Cache the account trie tree
+ CacheAccount(root common.Hash, t Trie)
+
+ // Cache the storage trie tree
+ CacheStorage(addrHash common.Hash, root common.Hash, t Trie)
+
+ // Purge cache
+ Purge()
}
// Trie is a Ethereum Merkle Patricia trie.
@@ -121,14 +141,56 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
}
}
+func NewDatabaseWithConfigAndCache(db ethdb.Database, config *trie.Config) Database {
+ csc, _ := lru.New(codeSizeCacheSize)
+ atc, _ := lru.New(accountTrieCacheSize)
+ stc, _ := lru.New(storageTrieCacheSize)
+
+ database := &cachingDB{
+ db: trie.NewDatabaseWithConfig(db, config),
+ codeSizeCache: csc,
+ codeCache: fastcache.New(codeCacheSize),
+ accountTrieCache: atc,
+ storageTrieCache: stc,
+ }
+ go database.purgeLoop()
+ return database
+}
+
type cachingDB struct {
- db *trie.Database
- codeSizeCache *lru.Cache
- codeCache *fastcache.Cache
+ db *trie.Database
+ codeSizeCache *lru.Cache
+ codeCache *fastcache.Cache
+ accountTrieCache *lru.Cache
+ storageTrieCache *lru.Cache
+}
+
+type triePair struct {
+ root common.Hash
+ trie Trie
+}
+
+func (db *cachingDB) purgeLoop() {
+ for {
+ time.Sleep(purgeInterval * time.Second)
+ _, accounts, ok := db.accountTrieCache.GetOldest()
+ if !ok {
+ continue
+ }
+ tr := accounts.(*trie.SecureTrie).GetRawTrie()
+ if tr.Size() > maxAccountTrieSize {
+ db.Purge()
+ }
+ }
}
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
+ if db.accountTrieCache != nil {
+ if tr, exist := db.accountTrieCache.Get(root); exist {
+ return tr.(Trie).(*trie.SecureTrie).Copy(), nil
+ }
+ }
tr, err := trie.NewSecure(root, db.db)
if err != nil {
return nil, err
@@ -138,6 +200,17 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
+ if db.storageTrieCache != nil {
+ if tries, exist := db.storageTrieCache.Get(addrHash); exist {
+ triesPairs := tries.([3]*triePair)
+ for _, triePair := range triesPairs {
+ if triePair != nil && triePair.root == root {
+ return triePair.trie.(*trie.SecureTrie).Copy(), nil
+ }
+ }
+ }
+ }
+
tr, err := trie.NewSecure(root, db.db)
if err != nil {
return nil, err
@@ -145,6 +218,43 @@ func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
return tr, nil
}
+func (db *cachingDB) CacheAccount(root common.Hash, t Trie) {
+ if db.accountTrieCache == nil {
+ return
+ }
+ tr := t.(*trie.SecureTrie)
+ db.accountTrieCache.Add(root, tr.ResetCopy())
+}
+
+func (db *cachingDB) CacheStorage(addrHash common.Hash, root common.Hash, t Trie) {
+ if db.storageTrieCache == nil {
+ return
+ }
+ tr := t.(*trie.SecureTrie)
+ if tries, exist := db.storageTrieCache.Get(addrHash); exist {
+ triesArray := tries.([3]*triePair)
+ newTriesArray := [3]*triePair{
+ {root: root, trie: tr.ResetCopy()},
+ triesArray[0],
+ triesArray[1],
+ }
+ db.storageTrieCache.Add(addrHash, newTriesArray)
+ } else {
+ triesArray := [3]*triePair{{root: root, trie: tr.ResetCopy()}, nil, nil}
+ db.storageTrieCache.Add(addrHash, triesArray)
+ }
+ return
+}
+
+func (db *cachingDB) Purge() {
+ if db.storageTrieCache != nil {
+ db.storageTrieCache.Purge()
+ }
+ if db.accountTrieCache != nil {
+ db.accountTrieCache.Purge()
+ }
+}
+
// CopyTrie returns an independent copy of the given trie.
func (db *cachingDB) CopyTrie(t Trie) Trie {
switch t := t.(type) {
diff --git a/core/state/journal.go b/core/state/journal.go
index 2070f30875..366e0c9c26 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -43,7 +43,8 @@ type journal struct {
// newJournal create a new initialized journal.
func newJournal() *journal {
return &journal{
- dirties: make(map[common.Address]int),
+ dirties: make(map[common.Address]int, defaultNumOfSlots),
+ entries: make([]journalEntry, 0, defaultNumOfSlots),
}
}
@@ -90,7 +91,7 @@ type (
account *common.Address
}
resetObjectChange struct {
- prev *stateObject
+ prev *StateObject
prevdestruct bool
}
suicideChange struct {
@@ -150,7 +151,7 @@ func (ch createObjectChange) dirtied() *common.Address {
}
func (ch resetObjectChange) revert(s *StateDB) {
- s.setStateObject(ch.prev)
+ s.SetStateObject(ch.prev)
if !ch.prevdestruct && s.snap != nil {
delete(s.snapDestructs, ch.prev.addrHash)
}
@@ -253,7 +254,9 @@ func (ch accessListAddAccountChange) revert(s *StateDB) {
(addr) at this point, since no storage adds can remain when come upon
a single (addr) change.
*/
- s.accessList.DeleteAddress(*ch.address)
+ if s.accessList != nil {
+ s.accessList.DeleteAddress(*ch.address)
+ }
}
func (ch accessListAddAccountChange) dirtied() *common.Address {
@@ -261,7 +264,9 @@ func (ch accessListAddAccountChange) dirtied() *common.Address {
}
func (ch accessListAddSlotChange) revert(s *StateDB) {
- s.accessList.DeleteSlot(*ch.address, *ch.slot)
+ if s.accessList != nil {
+ s.accessList.DeleteSlot(*ch.address, *ch.slot)
+ }
}
func (ch accessListAddSlotChange) dirtied() *common.Address {
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 9e3c531707..bb599fcd87 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -82,15 +82,16 @@ type Pruner struct {
trieCachePath string
headHeader *types.Header
snaptree *snapshot.Tree
+ triesInMemory uint64
}
// NewPruner creates the pruner instance.
-func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) {
+func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize, triesInMemory uint64) (*Pruner, error) {
headBlock := rawdb.ReadHeadBlock(db)
if headBlock == nil {
return nil, errors.New("Failed to load head block")
}
- snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false)
+ snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, int(triesInMemory), headBlock.Root(), false, false, false)
if err != nil {
return nil, err // The relevant snapshot(s) might not exist
}
@@ -108,6 +109,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6
stateBloom: stateBloom,
datadir: datadir,
trieCachePath: trieCachePath,
+ triesInMemory: triesInMemory,
headHeader: headBlock.Header(),
snaptree: snaptree,
}, nil
@@ -244,23 +246,23 @@ func (p *Pruner) Prune(root common.Hash) error {
return err
}
if stateBloomRoot != (common.Hash{}) {
- return RecoverPruning(p.datadir, p.db, p.trieCachePath)
+ return RecoverPruning(p.datadir, p.db, p.trieCachePath, p.triesInMemory)
}
- // If the target state root is not specified, use the HEAD-127 as the
+ // If the target state root is not specified, use the HEAD-(n-1) as the
// target. The reason for picking it is:
// - in most of the normal cases, the related state is available
// - the probability of this layer being reorg is very low
var layers []snapshot.Snapshot
if root == (common.Hash{}) {
// Retrieve all snapshot layers from the current HEAD.
- // In theory there are 128 difflayers + 1 disk layer present,
- // so 128 diff layers are expected to be returned.
- layers = p.snaptree.Snapshots(p.headHeader.Root, 128, true)
- if len(layers) != 128 {
- // Reject if the accumulated diff layers are less than 128. It
+ // In theory there are n difflayers + 1 disk layer present,
+ // so n diff layers are expected to be returned.
+ layers = p.snaptree.Snapshots(p.headHeader.Root, int(p.triesInMemory), true)
+ if len(layers) != int(p.triesInMemory) {
+ // Reject if the accumulated diff layers are less than n. It
// means in most of normal cases, there is no associated state
// with bottom-most diff layer.
- return fmt.Errorf("snapshot not old enough yet: need %d more blocks", 128-len(layers))
+ return fmt.Errorf("snapshot not old enough yet: need %d more blocks", int(p.triesInMemory)-len(layers))
}
// Use the bottom-most diff layer as the target
root = layers[len(layers)-1].Root()
@@ -272,8 +274,8 @@ func (p *Pruner) Prune(root common.Hash) error {
// The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
- // difflayer won't be created. So HEAD-127 may not paired with
- // head-127 layer. Instead the paired layer is higher than the
+ // difflayer won't be created. So HEAD-(n-1) may not paired with
+ // head-(n-1) layer. Instead the paired layer is higher than the
// bottom-most diff layer. Try to find the bottom-most snapshot
// layer with state available.
//
@@ -352,7 +354,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// pruning can be resumed. What's more if the bloom filter is constructed, the
// pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left
// in the disk.
-func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error {
+func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string, triesInMemory uint64) error {
stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir)
if err != nil {
return err
@@ -372,7 +374,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
// In this case, even the state HEAD is not exactly matched with snapshot, it
// still feasible to recover the pruning correctly.
- snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true)
+ snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, int(triesInMemory), headBlock.Root(), false, false, true)
if err != nil {
return err // The relevant snapshot(s) might not exist
}
@@ -392,7 +394,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
// otherwise the dangling state will be left.
var (
found bool
- layers = snaptree.Snapshots(headBlock.Root(), 128, true)
+ layers = snaptree.Snapshots(headBlock.Root(), int(triesInMemory), true)
middleRoots = make(map[common.Hash]struct{})
)
for _, layer := range layers {
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index f70cbf1e68..250692422d 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -26,6 +26,8 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
@@ -315,7 +317,8 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
if err != nil {
return stop(err)
}
- go func(hash common.Hash) {
+ hash := it.Hash()
+ gopool.Submit(func() {
subroot, err := leafCallback(db, hash, common.BytesToHash(account.CodeHash), stats)
if err != nil {
results <- err
@@ -326,7 +329,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
return
}
results <- nil
- }(it.Hash())
+ })
fullData, err = rlp.EncodeToBytes(account)
if err != nil {
return stop(err)
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index ee88938b77..c0f0dab568 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -394,7 +394,7 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
if storage, ok := dl.storageData[accountHash]; ok {
if data, ok := storage[storageHash]; ok {
snapshotDirtyStorageHitMeter.Mark(1)
- snapshotDirtyStorageHitDepthHist.Update(int64(depth))
+ //snapshotDirtyStorageHitDepthHist.Update(int64(depth))
if n := len(data); n > 0 {
snapshotDirtyStorageReadMeter.Mark(int64(n))
} else {
@@ -407,7 +407,7 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// If the account is known locally, but deleted, return an empty slot
if _, ok := dl.destructSet[accountHash]; ok {
snapshotDirtyStorageHitMeter.Mark(1)
- snapshotDirtyStorageHitDepthHist.Update(int64(depth))
+ //snapshotDirtyStorageHitDepthHist.Update(int64(depth))
snapshotDirtyStorageInexMeter.Mark(1)
snapshotBloomStorageTrueHitMeter.Mark(1)
return nil, nil
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 3ed534461b..1b0d883439 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -158,11 +158,12 @@ type snapshot interface {
// storage data to avoid expensive multi-level trie lookups; and to allow sorted,
// cheap iteration of the account/storage tries for sync aid.
type Tree struct {
- diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
- triedb *trie.Database // In-memory cache to access the trie through
- cache int // Megabytes permitted to use for read caches
- layers map[common.Hash]snapshot // Collection of all known layers
- lock sync.RWMutex
+ diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
+ triedb *trie.Database // In-memory cache to access the trie through
+ cache int // Megabytes permitted to use for read caches
+ layers map[common.Hash]snapshot // Collection of all known layers
+ lock sync.RWMutex
+ capLimit int
}
// New attempts to load an already existing snapshot from a persistent key-value
@@ -174,13 +175,14 @@ type Tree struct {
// store, on a background thread. If the memory layers from the journal is not
// continuous with disk layer or the journal is missing, all diffs will be discarded
// iff it's in "recovery" mode, otherwise rebuild is mandatory.
-func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
+func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache, cap int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
// Create a new, empty snapshot tree
snap := &Tree{
- diskdb: diskdb,
- triedb: triedb,
- cache: cache,
- layers: make(map[common.Hash]snapshot),
+ diskdb: diskdb,
+ triedb: triedb,
+ cache: cache,
+ capLimit: cap,
+ layers: make(map[common.Hash]snapshot),
}
if !async {
defer snap.waitBuild()
@@ -348,6 +350,10 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m
return nil
}
+func (t *Tree) CapLimit() int {
+ return t.capLimit
+}
+
// Cap traverses downwards the snapshot tree from a head block hash until the
// number of allowed layers are crossed. All layers beyond the permitted number
// are flattened downwards.
diff --git a/core/state/state_object.go b/core/state/state_object.go
index f93f47d5f5..623d07ac13 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -56,13 +56,13 @@ func (s Storage) Copy() Storage {
return cpy
}
-// stateObject represents an Ethereum account which is being modified.
+// StateObject represents an Ethereum account which is being modified.
//
// The usage pattern is as follows:
// First you need to obtain a state object.
// Account values can be accessed and modified through the object.
// Finally, call CommitTrie to write the modified storage trie into a database.
-type stateObject struct {
+type StateObject struct {
address common.Address
addrHash common.Hash // hash of ethereum address of the account
data Account
@@ -90,10 +90,13 @@ type stateObject struct {
dirtyCode bool // true if the code was updated
suicided bool
deleted bool
+
+ //encode
+ encodeData []byte
}
// empty returns whether the account is considered empty.
-func (s *stateObject) empty() bool {
+func (s *StateObject) empty() bool {
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
}
@@ -107,7 +110,7 @@ type Account struct {
}
// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, data Account) *stateObject {
+func newObject(db *StateDB, address common.Address, data Account) *StateObject {
if data.Balance == nil {
data.Balance = new(big.Int)
}
@@ -117,7 +120,7 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
if data.Root == (common.Hash{}) {
data.Root = emptyRoot
}
- return &stateObject{
+ return &StateObject{
db: db,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
@@ -129,22 +132,22 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
}
// EncodeRLP implements rlp.Encoder.
-func (s *stateObject) EncodeRLP(w io.Writer) error {
+func (s *StateObject) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, s.data)
}
// setError remembers the first non-nil error it is called with.
-func (s *stateObject) setError(err error) {
+func (s *StateObject) setError(err error) {
if s.dbErr == nil {
s.dbErr = err
}
}
-func (s *stateObject) markSuicided() {
+func (s *StateObject) markSuicided() {
s.suicided = true
}
-func (s *stateObject) touch() {
+func (s *StateObject) touch() {
s.db.journal.append(touchChange{
account: &s.address,
})
@@ -155,7 +158,7 @@ func (s *stateObject) touch() {
}
}
-func (s *stateObject) getTrie(db Database) Trie {
+func (s *StateObject) getTrie(db Database) Trie {
if s.trie == nil {
// Try fetching from prefetcher first
// We don't prefetch empty tries
@@ -177,7 +180,7 @@ func (s *stateObject) getTrie(db Database) Trie {
}
// GetState retrieves a value from the account storage trie.
-func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
+func (s *StateObject) GetState(db Database, key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
return s.fakeStorage[key]
@@ -192,7 +195,7 @@ func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
}
// GetCommittedState retrieves a value from the committed account storage trie.
-func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
+func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
return s.fakeStorage[key]
@@ -265,7 +268,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// SetState updates a value in account storage.
-func (s *stateObject) SetState(db Database, key, value common.Hash) {
+func (s *StateObject) SetState(db Database, key, value common.Hash) {
// If the fake storage is set, put the temporary state update here.
if s.fakeStorage != nil {
s.fakeStorage[key] = value
@@ -291,7 +294,7 @@ func (s *stateObject) SetState(db Database, key, value common.Hash) {
// lookup only happens in the fake state storage.
//
// Note this function should only be used for debugging purpose.
-func (s *stateObject) SetStorage(storage map[common.Hash]common.Hash) {
+func (s *StateObject) SetStorage(storage map[common.Hash]common.Hash) {
// Allocate fake storage if it's nil.
if s.fakeStorage == nil {
s.fakeStorage = make(Storage)
@@ -303,13 +306,13 @@ func (s *stateObject) SetStorage(storage map[common.Hash]common.Hash) {
// debugging and the `fake` storage won't be committed to database.
}
-func (s *stateObject) setState(key, value common.Hash) {
+func (s *StateObject) setState(key, value common.Hash) {
s.dirtyStorage[key] = value
}
// finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction.
-func (s *stateObject) finalise(prefetch bool) {
+func (s *StateObject) finalise(prefetch bool) {
slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage))
for key, value := range s.dirtyStorage {
s.pendingStorage[key] = value
@@ -318,7 +321,7 @@ func (s *stateObject) finalise(prefetch bool) {
}
}
if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot {
- s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch)
+ s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch, s.addrHash)
}
if len(s.dirtyStorage) > 0 {
s.dirtyStorage = make(Storage)
@@ -327,7 +330,7 @@ func (s *stateObject) finalise(prefetch bool) {
// updateTrie writes cached storage modifications into the object's storage trie.
// It will return nil if the trie has not been loaded and no changes have been made
-func (s *stateObject) updateTrie(db Database) Trie {
+func (s *StateObject) updateTrie(db Database) Trie {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false) // Don't prefetch any more, pull directly if need be
if len(s.pendingStorage) == 0 {
@@ -335,7 +338,11 @@ func (s *stateObject) updateTrie(db Database) Trie {
}
// Track the amount of time wasted on updating the storage trie
if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
+ defer func(start time.Time) {
+ s.db.MetricsMux.Lock()
+ s.db.StorageUpdates += time.Since(start)
+ s.db.MetricsMux.Unlock()
+ }(time.Now())
}
// The snapshot storage map for the object
var storage map[common.Hash][]byte
@@ -361,6 +368,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
}
// If state snapshotting is active, cache the data til commit
if s.db.snap != nil {
+ s.db.snapMux.Lock()
if storage == nil {
// Retrieve the old storage map, if available, create a new one otherwise
if storage = s.db.snapStorage[s.addrHash]; storage == nil {
@@ -369,6 +377,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
}
}
storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00
+ s.db.snapMux.Unlock()
}
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
}
@@ -382,23 +391,30 @@ func (s *stateObject) updateTrie(db Database) Trie {
}
// UpdateRoot sets the trie root to the current root hash of
-func (s *stateObject) updateRoot(db Database) {
+func (s *StateObject) updateRoot(db Database) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
return
}
// Track the amount of time wasted on hashing the storage trie
if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
+ defer func(start time.Time) {
+ s.db.MetricsMux.Lock()
+ s.db.StorageHashes += time.Since(start)
+ s.db.MetricsMux.Unlock()
+ }(time.Now())
}
s.data.Root = s.trie.Hash()
}
// CommitTrie the storage trie of the object to db.
// This updates the trie root.
-func (s *stateObject) CommitTrie(db Database) error {
+func (s *StateObject) CommitTrie(db Database) error {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
+ if s.trie != nil && s.data.Root != emptyRoot {
+ db.CacheStorage(s.addrHash, s.data.Root, s.trie)
+ }
return nil
}
if s.dbErr != nil {
@@ -412,12 +428,15 @@ func (s *stateObject) CommitTrie(db Database) error {
if err == nil {
s.data.Root = root
}
+ if s.data.Root != emptyRoot {
+ db.CacheStorage(s.addrHash, s.data.Root, s.trie)
+ }
return err
}
// AddBalance adds amount to s's balance.
// It is used to add funds to the destination account of a transfer.
-func (s *stateObject) AddBalance(amount *big.Int) {
+func (s *StateObject) AddBalance(amount *big.Int) {
// EIP161: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
if amount.Sign() == 0 {
@@ -431,14 +450,14 @@ func (s *stateObject) AddBalance(amount *big.Int) {
// SubBalance removes amount from s's balance.
// It is used to remove funds from the origin account of a transfer.
-func (s *stateObject) SubBalance(amount *big.Int) {
+func (s *StateObject) SubBalance(amount *big.Int) {
if amount.Sign() == 0 {
return
}
s.SetBalance(new(big.Int).Sub(s.Balance(), amount))
}
-func (s *stateObject) SetBalance(amount *big.Int) {
+func (s *StateObject) SetBalance(amount *big.Int) {
s.db.journal.append(balanceChange{
account: &s.address,
prev: new(big.Int).Set(s.data.Balance),
@@ -446,14 +465,14 @@ func (s *stateObject) SetBalance(amount *big.Int) {
s.setBalance(amount)
}
-func (s *stateObject) setBalance(amount *big.Int) {
+func (s *StateObject) setBalance(amount *big.Int) {
s.data.Balance = amount
}
// Return the gas back to the origin. Used by the Virtual machine or Closures
-func (s *stateObject) ReturnGas(gas *big.Int) {}
+func (s *StateObject) ReturnGas(gas *big.Int) {}
-func (s *stateObject) deepCopy(db *StateDB) *stateObject {
+func (s *StateObject) deepCopy(db *StateDB) *StateObject {
stateObject := newObject(db, s.address, s.data)
if s.trie != nil {
stateObject.trie = db.db.CopyTrie(s.trie)
@@ -473,12 +492,12 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
//
// Returns the address of the contract/account
-func (s *stateObject) Address() common.Address {
+func (s *StateObject) Address() common.Address {
return s.address
}
// Code returns the contract code associated with this object, if any.
-func (s *stateObject) Code(db Database) []byte {
+func (s *StateObject) Code(db Database) []byte {
if s.code != nil {
return s.code
}
@@ -496,7 +515,7 @@ func (s *stateObject) Code(db Database) []byte {
// CodeSize returns the size of the contract code associated with this object,
// or zero if none. This method is an almost mirror of Code, but uses a cache
// inside the database to avoid loading codes seen recently.
-func (s *stateObject) CodeSize(db Database) int {
+func (s *StateObject) CodeSize(db Database) int {
if s.code != nil {
return len(s.code)
}
@@ -510,7 +529,7 @@ func (s *stateObject) CodeSize(db Database) int {
return size
}
-func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
+func (s *StateObject) SetCode(codeHash common.Hash, code []byte) {
prevcode := s.Code(s.db.db)
s.db.journal.append(codeChange{
account: &s.address,
@@ -520,13 +539,13 @@ func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
s.setCode(codeHash, code)
}
-func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
+func (s *StateObject) setCode(codeHash common.Hash, code []byte) {
s.code = code
s.data.CodeHash = codeHash[:]
s.dirtyCode = true
}
-func (s *stateObject) SetNonce(nonce uint64) {
+func (s *StateObject) SetNonce(nonce uint64) {
s.db.journal.append(nonceChange{
account: &s.address,
prev: s.data.Nonce,
@@ -534,25 +553,25 @@ func (s *stateObject) SetNonce(nonce uint64) {
s.setNonce(nonce)
}
-func (s *stateObject) setNonce(nonce uint64) {
+func (s *StateObject) setNonce(nonce uint64) {
s.data.Nonce = nonce
}
-func (s *stateObject) CodeHash() []byte {
+func (s *StateObject) CodeHash() []byte {
return s.data.CodeHash
}
-func (s *stateObject) Balance() *big.Int {
+func (s *StateObject) Balance() *big.Int {
return s.data.Balance
}
-func (s *stateObject) Nonce() uint64 {
+func (s *StateObject) Nonce() uint64 {
return s.data.Nonce
}
-// Never called, but must be present to allow stateObject to be used
+// Never called, but must be present to allow StateObject to be used
// as a vm.Account interface that also satisfies the vm.ContractRef
// interface. Interfaces are awesome.
-func (s *stateObject) Value() *big.Int {
- panic("Value on stateObject should never be called")
+func (s *StateObject) Value() *big.Int {
+ panic("Value on StateObject should never be called")
}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 9566531466..9f003fefb5 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -165,7 +165,7 @@ func TestSnapshot2(t *testing.T) {
so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'})
so0.suicided = false
so0.deleted = false
- state.setStateObject(so0)
+ state.SetStateObject(so0)
root, _ := state.Commit(false)
state, _ = New(root, state.db, state.snaps)
@@ -177,7 +177,7 @@ func TestSnapshot2(t *testing.T) {
so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'})
so1.suicided = true
so1.deleted = true
- state.setStateObject(so1)
+ state.SetStateObject(so1)
so1 = state.getStateObject(stateobjaddr1)
if so1 != nil {
@@ -201,7 +201,7 @@ func TestSnapshot2(t *testing.T) {
}
}
-func compareStateObjects(so0, so1 *stateObject, t *testing.T) {
+func compareStateObjects(so0, so1 *StateObject, t *testing.T) {
if so0.Address() != so1.Address() {
t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address)
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 90f4709bfc..7940613cd6 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -21,7 +21,9 @@ import (
"errors"
"fmt"
"math/big"
+ "runtime"
"sort"
+ "sync"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -29,12 +31,18 @@ import (
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
+const (
+ preLoadLimit = 64
+ defaultNumOfSlots = 100
+)
+
type revision struct {
id int
journalIndex int
@@ -43,6 +51,8 @@ type revision struct {
var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
+ emptyAddr = crypto.Keccak256Hash(common.Address{}.Bytes())
)
type proofList [][]byte
@@ -68,6 +78,7 @@ type StateDB struct {
trie Trie
hasher crypto.KeccakState
+ snapMux sync.Mutex
snaps *snapshot.Tree
snap snapshot.Snapshot
snapDestructs map[common.Hash]struct{}
@@ -75,7 +86,7 @@ type StateDB struct {
snapStorage map[common.Hash]map[common.Hash][]byte
// This map holds 'live' objects, which will get modified while processing a state transition.
- stateObjects map[common.Address]*stateObject
+ stateObjects map[common.Address]*StateObject
stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
@@ -106,6 +117,7 @@ type StateDB struct {
nextRevisionId int
// Measurements gathered during execution for debugging purposes
+ MetricsMux sync.Mutex
AccountReads time.Duration
AccountHashes time.Duration
AccountUpdates time.Duration
@@ -121,24 +133,27 @@ type StateDB struct {
// New creates a new state from a given trie.
func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
- tr, err := db.OpenTrie(root)
- if err != nil {
- return nil, err
- }
+ return newStateDB(root, db, snaps)
+}
+
+func newStateDB(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) {
sdb := &StateDB{
db: db,
- trie: tr,
originalRoot: root,
snaps: snaps,
- stateObjects: make(map[common.Address]*stateObject),
- stateObjectsPending: make(map[common.Address]struct{}),
- stateObjectsDirty: make(map[common.Address]struct{}),
- logs: make(map[common.Hash][]*types.Log),
+ stateObjects: make(map[common.Address]*StateObject, defaultNumOfSlots),
+ stateObjectsPending: make(map[common.Address]struct{}, defaultNumOfSlots),
+ stateObjectsDirty: make(map[common.Address]struct{}, defaultNumOfSlots),
+ logs: make(map[common.Hash][]*types.Log, defaultNumOfSlots),
preimages: make(map[common.Hash][]byte),
journal: newJournal(),
- accessList: newAccessList(),
hasher: crypto.NewKeccakState(),
}
+ tr, err := db.OpenTrie(root)
+ if err != nil {
+ return nil, err
+ }
+ sdb.trie = tr
if sdb.snaps != nil {
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
sdb.snapDestructs = make(map[common.Hash]struct{})
@@ -461,33 +476,28 @@ func (s *StateDB) Suicide(addr common.Address) bool {
//
// updateStateObject writes the given object to the trie.
-func (s *StateDB) updateStateObject(obj *stateObject) {
+func (s *StateDB) updateStateObject(obj *StateObject) {
// Track the amount of time wasted on updating the account from the trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
}
// Encode the account and update the account trie
addr := obj.Address()
-
- data, err := rlp.EncodeToBytes(obj)
- if err != nil {
- panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
+ data := obj.encodeData
+ var err error
+ if data == nil {
+ data, err = rlp.EncodeToBytes(obj)
+ if err != nil {
+ panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
+ }
}
if err = s.trie.TryUpdate(addr[:], data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
-
- // If state snapshotting is active, cache the data til commit. Note, this
- // update mechanism is not symmetric to the deletion, because whereas it is
- // enough to track account updates at commit time, deletions need tracking
- // at transaction boundary level to ensure we capture state clearing.
- if s.snap != nil {
- s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
- }
}
// deleteStateObject removes the given object from the state trie.
-func (s *StateDB) deleteStateObject(obj *stateObject) {
+func (s *StateDB) deleteStateObject(obj *StateObject) {
// Track the amount of time wasted on deleting the account from the trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
@@ -502,18 +512,92 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
// getStateObject retrieves a state object given by the address, returning nil if
// the object is not found or was deleted in this execution context. If you need
// to differentiate between non-existent/just-deleted, use getDeletedStateObject.
-func (s *StateDB) getStateObject(addr common.Address) *stateObject {
+func (s *StateDB) getStateObject(addr common.Address) *StateObject {
if obj := s.getDeletedStateObject(addr); obj != nil && !obj.deleted {
return obj
}
return nil
}
+func (s *StateDB) TryPreload(block *types.Block, signer types.Signer) {
+ accounts := make(map[common.Address]bool, block.Transactions().Len())
+ accountsSlice := make([]common.Address, 0, block.Transactions().Len())
+ for _, tx := range block.Transactions() {
+ from, err := types.Sender(signer, tx)
+ if err != nil {
+ break
+ }
+ accounts[from] = true
+ if tx.To() != nil {
+ accounts[*tx.To()] = true
+ }
+ }
+ for account, _ := range accounts {
+ accountsSlice = append(accountsSlice, account)
+ }
+ if len(accountsSlice) >= preLoadLimit && len(accountsSlice) > runtime.NumCPU() {
+ objsChan := make(chan []*StateObject, runtime.NumCPU())
+ for i := 0; i < runtime.NumCPU(); i++ {
+ start := i * len(accountsSlice) / runtime.NumCPU()
+ end := (i + 1) * len(accountsSlice) / runtime.NumCPU()
+ if i+1 == runtime.NumCPU() {
+ end = len(accountsSlice)
+ }
+ go func(start, end int) {
+ objs := s.preloadStateObject(accountsSlice[start:end])
+ objsChan <- objs
+ }(start, end)
+ }
+ for i := 0; i < runtime.NumCPU(); i++ {
+ objs := <-objsChan
+ if objs != nil {
+ for _, obj := range objs {
+ s.SetStateObject(obj)
+ }
+ }
+ }
+ }
+}
+
+func (s *StateDB) preloadStateObject(address []common.Address) []*StateObject {
+ // Prefer live objects if any is available
+ if s.snap == nil {
+ return nil
+ }
+ hasher := crypto.NewKeccakState()
+ objs := make([]*StateObject, 0, len(address))
+ for _, addr := range address {
+ // If no live objects are available, attempt to use snapshots
+ if acc, err := s.snap.Account(crypto.HashData(hasher, addr.Bytes())); err == nil {
+ if acc == nil {
+ continue
+ }
+ data := &Account{
+ Nonce: acc.Nonce,
+ Balance: acc.Balance,
+ CodeHash: acc.CodeHash,
+ Root: common.BytesToHash(acc.Root),
+ }
+ if len(data.CodeHash) == 0 {
+ data.CodeHash = emptyCodeHash
+ }
+ if data.Root == (common.Hash{}) {
+ data.Root = emptyRoot
+ }
+ // Insert into the live set
+ obj := newObject(s, addr, *data)
+ objs = append(objs, obj)
+ }
+ // Do not enable this feature when snapshot is not enabled.
+ }
+ return objs
+}
+
// getDeletedStateObject is similar to getStateObject, but instead of returning
// nil for a deleted state object, it returns the actual object with the deleted
// flag set. This is needed by the state journal to revert to the correct s-
// destructed object instead of wiping all knowledge about the state object.
-func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
+func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject {
// Prefer live objects if any is available
if obj := s.stateObjects[addr]; obj != nil {
return obj
@@ -548,6 +632,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
// If snapshot unavailable or reading from it failed, load from the database
if s.snap == nil || err != nil {
+ if s.trie == nil {
+ tr, err := s.db.OpenTrie(s.originalRoot)
+ if err != nil {
+ s.setError(fmt.Errorf("failed to open trie tree"))
+ return nil
+ }
+ s.trie = tr
+ }
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
}
@@ -567,16 +659,16 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
// Insert into the live set
obj := newObject(s, addr, *data)
- s.setStateObject(obj)
+ s.SetStateObject(obj)
return obj
}
-func (s *StateDB) setStateObject(object *stateObject) {
+func (s *StateDB) SetStateObject(object *StateObject) {
s.stateObjects[object.Address()] = object
}
// GetOrNewStateObject retrieves a state object or create a new state object if nil.
-func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
+func (s *StateDB) GetOrNewStateObject(addr common.Address) *StateObject {
stateObject := s.getStateObject(addr)
if stateObject == nil {
stateObject, _ = s.createObject(addr)
@@ -586,7 +678,7 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
-func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
+func (s *StateDB) createObject(addr common.Address) (newobj, prev *StateObject) {
prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
var prevdestruct bool
@@ -603,7 +695,7 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
} else {
s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
}
- s.setStateObject(newobj)
+ s.SetStateObject(newobj)
if prev != nil && !prev.deleted {
return newobj, prev
}
@@ -663,7 +755,7 @@ func (s *StateDB) Copy() *StateDB {
state := &StateDB{
db: s.db,
trie: s.db.CopyTrie(s.trie),
- stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
+ stateObjects: make(map[common.Address]*StateObject, len(s.journal.dirties)),
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
refund: s.refund,
@@ -720,7 +812,9 @@ func (s *StateDB) Copy() *StateDB {
// _between_ transactions/blocks, never in the middle of a transaction.
// However, it doesn't cost us much to copy an empty list, so we do it anyway
// to not blow up if we ever decide copy it in the middle of a transaction
- state.accessList = s.accessList.Copy()
+ if s.accessList != nil {
+ state.accessList = s.accessList.Copy()
+ }
// If there's a prefetcher running, make an inactive copy of it that can
// only access data but does not actively preload (since the user will not
@@ -816,16 +910,19 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
} else {
obj.finalise(true) // Prefetch slots in the background
}
- s.stateObjectsPending[addr] = struct{}{}
- s.stateObjectsDirty[addr] = struct{}{}
-
- // At this point, also ship the address off to the precacher. The precacher
- // will start loading tries, and when the change is eventually committed,
- // the commit-phase will be a lot faster
- addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
+ if _, exist := s.stateObjectsPending[addr]; !exist {
+ s.stateObjectsPending[addr] = struct{}{}
+ }
+ if _, exist := s.stateObjectsDirty[addr]; !exist {
+ s.stateObjectsDirty[addr] = struct{}{}
+ // At this point, also ship the address off to the precacher. The precacher
+ // will start loading tries, and when the change is eventually committed,
+ // the commit-phase will be a lot faster
+ addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
+ }
}
if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
- s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch)
+ s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch, emptyAddr)
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
@@ -852,6 +949,23 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.prefetcher = nil
}()
}
+
+ tasks := make(chan func())
+ finishCh := make(chan struct{})
+ defer close(finishCh)
+ wg := sync.WaitGroup{}
+ for i := 0; i < runtime.NumCPU(); i++ {
+ go func() {
+ for {
+ select {
+ case task := <-tasks:
+ task()
+ case <-finishCh:
+ return
+ }
+ }
+ }()
+ }
// Although naively it makes sense to retrieve the account trie and then do
// the contract storage and account updates sequentially, that short circuits
// the account prefetcher. Instead, let's process all the storage updates
@@ -859,9 +973,29 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// to pull useful data from disk.
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
- obj.updateRoot(s.db)
+ wg.Add(1)
+ tasks <- func() {
+ obj.updateRoot(s.db)
+
+ // If state snapshotting is active, cache the data til commit. Note, this
+ // update mechanism is not symmetric to the deletion, because whereas it is
+ // enough to track account updates at commit time, deletions need tracking
+ // at transaction boundary level to ensure we capture state clearing.
+ if s.snap != nil && !obj.deleted {
+ s.snapMux.Lock()
+ s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ s.snapMux.Unlock()
+ }
+ data, err := rlp.EncodeToBytes(obj)
+ if err != nil {
+ panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
+ }
+ obj.encodeData = data
+ wg.Done()
+ }
}
}
+ wg.Wait()
// Now we're about to start to write changes to the trie. The trie is so far
// _untouched_. We can check with the prefetcher, if it can give us a trie
// which has the same root, but also has some content loaded into it.
@@ -870,6 +1004,13 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.trie = trie
}
}
+ if s.trie == nil {
+ tr, err := s.db.OpenTrie(s.originalRoot)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to open trie tree"))
+ }
+ s.trie = tr
+ }
usedAddrs := make([][]byte, 0, len(s.stateObjectsPending))
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; obj.deleted {
@@ -889,7 +1030,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
}
- return s.trie.Hash()
+ root := s.trie.Hash()
+ return root
}
// Prepare sets the current transaction hash and index and block hash which is
@@ -898,7 +1040,7 @@ func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) {
s.thash = thash
s.bhash = bhash
s.txIndex = ti
- s.accessList = newAccessList()
+ s.accessList = nil
}
func (s *StateDB) clearJournalAndRefund() {
@@ -915,72 +1057,130 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
// Finalize any pending changes and merge everything into the tries
- s.IntermediateRoot(deleteEmptyObjects)
+ root := s.IntermediateRoot(deleteEmptyObjects)
+
+ commitFuncs := []func() error{
+ func() error {
+ // Commit objects to the trie, measuring the elapsed time
+ tasks := make(chan func(batch ethdb.KeyValueWriter))
+ taskResults := make(chan error, len(s.stateObjectsDirty))
+ tasksNum := 0
+ finishCh := make(chan struct{})
+ defer close(finishCh)
+ for i := 0; i < runtime.NumCPU(); i++ {
+ go func() {
+ codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+ for {
+ select {
+ case task := <-tasks:
+ task(codeWriter)
+ case <-finishCh:
+ if codeWriter.ValueSize() > 0 {
+ if err := codeWriter.Write(); err != nil {
+ log.Crit("Failed to commit dirty codes", "error", err)
+ }
+ }
+ return
+ }
+ }
+ }()
+ }
- // Commit objects to the trie, measuring the elapsed time
- codeWriter := s.db.TrieDB().DiskDB().NewBatch()
- for addr := range s.stateObjectsDirty {
- if obj := s.stateObjects[addr]; !obj.deleted {
- // Write any contract code associated with the state object
- if obj.code != nil && obj.dirtyCode {
- rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
- obj.dirtyCode = false
+ for addr := range s.stateObjectsDirty {
+ if obj := s.stateObjects[addr]; !obj.deleted {
+ // Write any contract code associated with the state object
+ tasks <- func(codeWriter ethdb.KeyValueWriter) {
+ if obj.code != nil && obj.dirtyCode {
+ rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
+ obj.dirtyCode = false
+ }
+ // Write any storage changes in the state object to its storage trie
+ if err := obj.CommitTrie(s.db); err != nil {
+ taskResults <- err
+ }
+ taskResults <- nil
+ }
+ tasksNum++
+ }
}
- // Write any storage changes in the state object to its storage trie
- if err := obj.CommitTrie(s.db); err != nil {
- return common.Hash{}, err
+
+ for i := 0; i < tasksNum; i++ {
+ err := <-taskResults
+ if err != nil {
+ return err
+ }
}
- }
- }
- if len(s.stateObjectsDirty) > 0 {
- s.stateObjectsDirty = make(map[common.Address]struct{})
- }
- if codeWriter.ValueSize() > 0 {
- if err := codeWriter.Write(); err != nil {
- log.Crit("Failed to commit dirty codes", "error", err)
- }
- }
- // Write the account trie changes, measuing the amount of wasted time
- var start time.Time
- if metrics.EnabledExpensive {
- start = time.Now()
- }
- // The onleaf func is called _serially_, so we can reuse the same account
- // for unmarshalling every time.
- var account Account
- root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
- if err := rlp.DecodeBytes(leaf, &account); err != nil {
- return nil
- }
- if account.Root != emptyRoot {
- s.db.TrieDB().Reference(account.Root, parent)
- }
- return nil
- })
- if metrics.EnabledExpensive {
- s.AccountCommits += time.Since(start)
- }
- // If snapshotting is enabled, update the snapshot tree with this new version
- if s.snap != nil {
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
- }
- // Only update if there's a state transition (skip empty Clique blocks)
- if parent := s.snap.Root(); parent != root {
- if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
- log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
+
+ if len(s.stateObjectsDirty) > 0 {
+ s.stateObjectsDirty = make(map[common.Address]struct{}, len(s.stateObjectsDirty)/2)
+ }
+ // Write the account trie changes, measuing the amount of wasted time
+ var start time.Time
+ if metrics.EnabledExpensive {
+ start = time.Now()
+ }
+ // The onleaf func is called _serially_, so we can reuse the same account
+ // for unmarshalling every time.
+ var account Account
+ root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
+ if err := rlp.DecodeBytes(leaf, &account); err != nil {
+ return nil
+ }
+ if account.Root != emptyRoot {
+ s.db.TrieDB().Reference(account.Root, parent)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if metrics.EnabledExpensive {
+ s.AccountCommits += time.Since(start)
}
- // Keep 128 diff layers in the memory, persistent layer is 129th.
- // - head layer is paired with HEAD state
- // - head-1 layer is paired with HEAD-1 state
- // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
- if err := s.snaps.Cap(root, 128); err != nil {
- log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
+ if root != emptyRoot {
+ s.db.CacheAccount(root, s.trie)
}
+ return nil
+ },
+ func() error {
+ // If snapshotting is enabled, update the snapshot tree with this new version
+ if s.snap != nil {
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
+ }
+ // Only update if there's a state transition (skip empty Clique blocks)
+ if parent := s.snap.Root(); parent != root {
+ if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
+ log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
+ }
+ // Keep n diff layers in the memory
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - head-(n-1) layer(bottom-most diff layer) is paired with HEAD-(n-1)state
+ if err := s.snaps.Cap(root, s.snaps.CapLimit()); err != nil {
+ log.Warn("Failed to cap snapshot tree", "root", root, "layers", s.snaps.CapLimit(), "err", err)
+ }
+ }
+ s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
+ }
+ return nil
+ },
+ }
+ commitRes := make(chan error, len(commitFuncs))
+ for _, f := range commitFuncs {
+ tmpFunc := f
+ go func() {
+ commitRes <- tmpFunc()
+ }()
+ }
+ for i := 0; i < len(commitFuncs); i++ {
+ r := <-commitRes
+ if r != nil {
+ return common.Hash{}, r
}
- s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
}
- return root, err
+
+ return root, nil
}
// PrepareAccessList handles the preparatory steps for executing a state transition with
@@ -1011,6 +1211,9 @@ func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address,
// AddAddressToAccessList adds the given address to the access list
func (s *StateDB) AddAddressToAccessList(addr common.Address) {
+ if s.accessList == nil {
+ s.accessList = newAccessList()
+ }
if s.accessList.AddAddress(addr) {
s.journal.append(accessListAddAccountChange{&addr})
}
@@ -1018,6 +1221,9 @@ func (s *StateDB) AddAddressToAccessList(addr common.Address) {
// AddSlotToAccessList adds the given (address, slot)-tuple to the access list
func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
+ if s.accessList == nil {
+ s.accessList = newAccessList()
+ }
addrMod, slotMod := s.accessList.AddSlot(addr, slot)
if addrMod {
// In practice, this should not happen, since there is no way to enter the
@@ -1036,10 +1242,16 @@ func (s *StateDB) AddSlotToAccessList(addr common.Address, slot common.Hash) {
// AddressInAccessList returns true if the given address is in the access list.
func (s *StateDB) AddressInAccessList(addr common.Address) bool {
+ if s.accessList == nil {
+ return false
+ }
return s.accessList.ContainsAddress(addr)
}
// SlotInAccessList returns true if the given (address, slot)-tuple is in the access list.
func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addressPresent bool, slotPresent bool) {
+ if s.accessList == nil {
+ return false, false
+ }
return s.accessList.Contains(addr, slot)
}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index ac5e95c5c2..c8667deae3 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -20,6 +20,7 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
@@ -139,7 +140,7 @@ func (p *triePrefetcher) copy() *triePrefetcher {
}
// prefetch schedules a batch of trie items to prefetch.
-func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte) {
+func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte, accountHash common.Hash) {
// If the prefetcher is an inactive one, bail out
if p.fetches != nil {
return
@@ -147,7 +148,7 @@ func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte) {
// Active fetcher, schedule the retrievals
fetcher := p.fetchers[root]
if fetcher == nil {
- fetcher = newSubfetcher(p.db, root)
+ fetcher = newSubfetcher(p.db, root, accountHash)
p.fetchers[root] = fetcher
}
fetcher.schedule(keys)
@@ -211,21 +212,26 @@ type subfetcher struct {
seen map[string]struct{} // Tracks the entries already loaded
dups int // Number of duplicate preload tasks
used [][]byte // Tracks the entries used in the end
+
+ accountHash common.Hash
}
// newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash.
-func newSubfetcher(db Database, root common.Hash) *subfetcher {
+func newSubfetcher(db Database, root common.Hash, accountHash common.Hash) *subfetcher {
sf := &subfetcher{
- db: db,
- root: root,
- wake: make(chan struct{}, 1),
- stop: make(chan struct{}),
- term: make(chan struct{}),
- copy: make(chan chan Trie),
- seen: make(map[string]struct{}),
+ db: db,
+ root: root,
+ wake: make(chan struct{}, 1),
+ stop: make(chan struct{}),
+ term: make(chan struct{}),
+ copy: make(chan chan Trie),
+ seen: make(map[string]struct{}),
+ accountHash: accountHash,
}
- go sf.loop()
+ gopool.Submit(func() {
+ sf.loop()
+ })
return sf
}
@@ -279,7 +285,14 @@ func (sf *subfetcher) loop() {
defer close(sf.term)
// Start by opening the trie and stop processing if it fails
- trie, err := sf.db.OpenTrie(sf.root)
+ var trie Trie
+ var err error
+ if sf.accountHash == emptyAddr {
+ trie, err = sf.db.OpenTrie(sf.root)
+ } else {
+ // address is useless
+ trie, err = sf.db.OpenStorageTrie(sf.accountHash, sf.root)
+ }
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return
diff --git a/core/state_processor.go b/core/state_processor.go
index 84372082d2..858796b67a 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -79,6 +79,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
commonTxs := make([]*types.Transaction, 0, len(block.Transactions()))
// usually do have two tx, one for validator set contract, another for system reward contract.
systemTxs := make([]*types.Transaction, 0, 2)
+ signer := types.MakeSigner(p.config, header.Number)
for i, tx := range block.Transactions() {
if isPoSA {
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
@@ -89,7 +90,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
}
}
- msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number))
+ msg, err := tx.AsMessage(signer)
if err != nil {
return nil, nil, 0, err
}
@@ -102,6 +103,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
commonTxs = append(commonTxs, tx)
receipts = append(receipts, receipt)
}
+
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
err := p.engine.Finalize(p.bc, header, statedb, &commonTxs, block.Uncles(), &receipts, &systemTxs, usedGas)
if err != nil {
@@ -171,5 +173,10 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
+ defer func() {
+ ite := vmenv.Interpreter()
+ vm.EVMInterpreterPool.Put(ite)
+ vm.EvmPool.Put(vmenv)
+ }()
return applyTransaction(msg, config, bc, author, gp, statedb, header, tx, usedGas, vmenv)
}
diff --git a/core/tx_list.go b/core/tx_list.go
index 894640d570..ec122a7384 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -21,11 +21,18 @@ import (
"math"
"math/big"
"sort"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
+var txSortedMapPool = sync.Pool{
+ New: func() interface{} {
+ return make(types.Transactions, 0, 10)
+ },
+}
+
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
// retrieving sorted transactions from the possibly gapped future queue.
type nonceHeap []uint64
@@ -74,6 +81,9 @@ func (m *txSortedMap) Put(tx *types.Transaction) {
if m.items[nonce] == nil {
heap.Push(m.index, nonce)
}
+ if m.cache != nil {
+ txSortedMapPool.Put(m.cache)
+ }
m.items[nonce], m.cache = tx, nil
}
@@ -132,7 +142,10 @@ func (m *txSortedMap) filter(filter func(*types.Transaction) bool) types.Transac
}
}
if len(removed) > 0 {
- m.cache = nil
+ if m.cache != nil {
+ txSortedMapPool.Put(m.cache)
+ m.cache = nil
+ }
}
return removed
}
@@ -178,7 +191,10 @@ func (m *txSortedMap) Remove(nonce uint64) bool {
}
}
delete(m.items, nonce)
- m.cache = nil
+ if m.cache != nil {
+ txSortedMapPool.Put(m.cache)
+ m.cache = nil
+ }
return true
}
@@ -202,7 +218,10 @@ func (m *txSortedMap) Ready(start uint64) types.Transactions {
delete(m.items, next)
heap.Pop(m.index)
}
- m.cache = nil
+ if m.cache != nil {
+ txSortedMapPool.Put(m.cache)
+ m.cache = nil
+ }
return ready
}
@@ -215,7 +234,13 @@ func (m *txSortedMap) Len() int {
func (m *txSortedMap) flatten() types.Transactions {
// If the sorting was not cached yet, create and cache it
if m.cache == nil {
- m.cache = make(types.Transactions, 0, len(m.items))
+ cache := txSortedMapPool.Get()
+ if cache != nil {
+ m.cache = cache.(types.Transactions)
+ m.cache = m.cache[:0]
+ } else {
+ m.cache = make(types.Transactions, 0, len(m.items))
+ }
for _, tx := range m.items {
m.cache = append(m.cache, tx)
}
@@ -384,7 +409,7 @@ func (l *txList) Ready(start uint64) types.Transactions {
// Len returns the length of the transaction list.
func (l *txList) Len() int {
- return l.txs.Len()
+ return len(l.txs.items)
}
// Empty returns whether the list of transactions is empty or not.
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 5db1d3df32..d0304857c3 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -456,11 +456,11 @@ func (pool *TxPool) Stats() (int, int) {
func (pool *TxPool) stats() (int, int) {
pending := 0
for _, list := range pool.pending {
- pending += list.Len()
+ pending += len(list.txs.items)
}
queued := 0
for _, list := range pool.queue {
- queued += list.Len()
+ queued += len(list.txs.items)
}
return pending, queued
}
@@ -580,7 +580,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// If the transaction is already known, discard it
hash := tx.Hash()
if pool.all.Get(hash) != nil {
- log.Trace("Discarding already known transaction", "hash", hash)
+ //log.Trace("Discarding already known transaction", "hash", hash)
knownTxMeter.Mark(1)
return false, ErrAlreadyKnown
}
@@ -590,7 +590,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// If the transaction fails basic validation, discard it
if err := pool.validateTx(tx, isLocal); err != nil {
- log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
+ //log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
invalidTxMeter.Mark(1)
return false, err
}
@@ -598,7 +598,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it
if !isLocal && pool.priced.Underpriced(tx) {
- log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
+ //log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
underpricedTxMeter.Mark(1)
return false, ErrUnderpriced
}
@@ -609,13 +609,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// Special case, we still can't make the room for the new remote one.
if !isLocal && !success {
- log.Trace("Discarding overflown transaction", "hash", hash)
+ //log.Trace("Discarding overflown transaction", "hash", hash)
overflowedTxMeter.Mark(1)
return false, ErrTxPoolOverflow
}
// Kick out the underpriced remote transactions.
for _, tx := range drop {
- log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
+ //log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
underpricedTxMeter.Mark(1)
pool.removeTx(tx.Hash(), false)
}
@@ -639,7 +639,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
pool.priced.Put(tx, isLocal)
pool.journalTx(from, tx)
pool.queueTxEvent(tx)
- log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
+ //log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
// Successful promotion, bump the heartbeat
pool.beats[from] = time.Now()
@@ -652,7 +652,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
}
// Mark local addresses and journal local transactions
if local && !pool.locals.contains(from) {
- log.Info("Setting new local account", "address", from)
+ //log.Info("Setting new local account", "address", from)
pool.locals.add(from)
pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
}
@@ -661,7 +661,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
}
pool.journalTx(from, tx)
- log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
+ //log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
return replaced, nil
}
@@ -1070,7 +1070,7 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
// Nonces were reset, discard any events that became stale
for addr := range events {
events[addr].Forward(pool.pendingNonces.get(addr))
- if events[addr].Len() == 0 {
+ if len(events[addr].items) == 0 {
delete(events, addr)
}
}
@@ -1279,7 +1279,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
func (pool *TxPool) truncatePending() {
pending := uint64(0)
for _, list := range pool.pending {
- pending += uint64(list.Len())
+ pending += uint64(len(list.txs.items))
}
if pending <= pool.config.GlobalSlots {
return
@@ -1290,8 +1290,8 @@ func (pool *TxPool) truncatePending() {
spammers := prque.New(nil)
for addr, list := range pool.pending {
// Only evict transactions from high rollers
- if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots {
- spammers.Push(addr, int64(list.Len()))
+ if !pool.locals.contains(addr) && uint64(len(list.txs.items)) > pool.config.AccountSlots {
+ spammers.Push(addr, int64(len(list.txs.items)))
}
}
// Gradually drop transactions from offenders
@@ -1304,14 +1304,14 @@ func (pool *TxPool) truncatePending() {
// Equalize balances until all the same or below threshold
if len(offenders) > 1 {
// Calculate the equalization threshold for all current offenders
- threshold := pool.pending[offender.(common.Address)].Len()
+ threshold := len(pool.pending[offender.(common.Address)].txs.items)
// Iteratively reduce all offenders until below limit or threshold reached
- for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
+ for pending > pool.config.GlobalSlots && len(pool.pending[offenders[len(offenders)-2]].txs.items) > threshold {
for i := 0; i < len(offenders)-1; i++ {
list := pool.pending[offenders[i]]
- caps := list.Cap(list.Len() - 1)
+ caps := list.Cap(len(list.txs.items) - 1)
for _, tx := range caps {
// Drop the transaction from the global pools too
hash := tx.Hash()
@@ -1334,11 +1334,11 @@ func (pool *TxPool) truncatePending() {
// If still above threshold, reduce to limit or min allowance
if pending > pool.config.GlobalSlots && len(offenders) > 0 {
- for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
+ for pending > pool.config.GlobalSlots && uint64(len(pool.pending[offenders[len(offenders)-1]].txs.items)) > pool.config.AccountSlots {
for _, addr := range offenders {
list := pool.pending[addr]
- caps := list.Cap(list.Len() - 1)
+ caps := list.Cap(len(list.txs.items) - 1)
for _, tx := range caps {
// Drop the transaction from the global pools too
hash := tx.Hash()
@@ -1364,7 +1364,7 @@ func (pool *TxPool) truncatePending() {
func (pool *TxPool) truncateQueue() {
queued := uint64(0)
for _, list := range pool.queue {
- queued += uint64(list.Len())
+ queued += uint64(len(list.txs.items))
}
if queued <= pool.config.GlobalQueue {
return
@@ -1387,7 +1387,7 @@ func (pool *TxPool) truncateQueue() {
addresses = addresses[:len(addresses)-1]
// Drop all transactions if they are less than the overflow
- if size := uint64(list.Len()); size <= drop {
+ if size := uint64(len(list.txs.items)); size <= drop {
for _, tx := range list.Flatten() {
pool.removeTx(tx.Hash(), true)
}
@@ -1442,7 +1442,7 @@ func (pool *TxPool) demoteUnexecutables() {
localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
}
// If there's a gap in front, alert (should never happen) and postpone all transactions
- if list.Len() > 0 && list.txs.Get(nonce) == nil {
+ if len(list.txs.items) > 0 && list.txs.Get(nonce) == nil {
gapped := list.Cap(0)
for _, tx := range gapped {
hash := tx.Hash()
diff --git a/core/types/block.go b/core/types/block.go
index a3318f8779..b33493ef7d 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -306,6 +306,8 @@ func (b *Block) Size() common.StorageSize {
return common.StorageSize(c)
}
+func (b *Block) SetRoot(root common.Hash) { b.header.Root = root }
+
// SanityCheck can be used to prevent that unbounded fields are
// stuffed with junk data to add processing overhead
func (b *Block) SanityCheck() error {
diff --git a/core/types/transaction.go b/core/types/transaction.go
index a35e07a5a3..1bb43d805f 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -260,6 +260,9 @@ func (tx *Transaction) Gas() uint64 { return tx.inner.gas() }
// GasPrice returns the gas price of the transaction.
func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) }
+// The return value of ImmutableGasPrice can not been modified.
+func (tx *Transaction) ImmutableGasPrice() *big.Int { return tx.inner.gasPrice() }
+
// Value returns the ether amount of the transaction.
func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) }
@@ -394,7 +397,7 @@ func (s TxByPriceAndTime) Len() int { return len(s) }
func (s TxByPriceAndTime) Less(i, j int) bool {
// If the prices are equal, use the time the transaction was first seen for
// deterministic sorting
- cmp := s[i].GasPrice().Cmp(s[j].GasPrice())
+ cmp := s[i].ImmutableGasPrice().Cmp(s[j].ImmutableGasPrice())
if cmp == 0 {
return s[i].time.Before(s[j].time)
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index bd54e855c6..26a5fa7c45 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -19,6 +19,7 @@ package vm
import (
"errors"
"math/big"
+ "sync"
"sync/atomic"
"time"
@@ -32,6 +33,12 @@ import (
// deployed contract addresses (relevant after the account abstraction).
var emptyCodeHash = crypto.Keccak256Hash(nil)
+var EvmPool = sync.Pool{
+ New: func() interface{} {
+ return &EVM{}
+ },
+}
+
type (
// CanTransferFunc is the signature of a transfer guard function
CanTransferFunc func(StateDB, common.Address, *big.Int) bool
@@ -144,15 +151,17 @@ type EVM struct {
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
// only ever be used *once*.
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM {
- evm := &EVM{
- Context: blockCtx,
- TxContext: txCtx,
- StateDB: statedb,
- vmConfig: vmConfig,
- chainConfig: chainConfig,
- chainRules: chainConfig.Rules(blockCtx.BlockNumber),
- interpreters: make([]Interpreter, 0, 1),
- }
+ evm := EvmPool.Get().(*EVM)
+ evm.Context = blockCtx
+ evm.TxContext = txCtx
+ evm.StateDB = statedb
+ evm.vmConfig = vmConfig
+ evm.chainConfig = chainConfig
+ evm.chainRules = chainConfig.Rules(blockCtx.BlockNumber)
+ evm.interpreters = make([]Interpreter, 0, 1)
+ evm.abort = 0
+ evm.callGasTemp = 0
+ evm.depth = 0
if chainConfig.IsEWASM(blockCtx.BlockNumber) {
// to be implemented by EVM-C and Wagon PRs.
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 08da68ea2a..c004672cae 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -18,6 +18,7 @@ package vm
import (
"hash"
+ "sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
@@ -25,6 +26,12 @@ import (
"github.com/ethereum/go-ethereum/log"
)
+var EVMInterpreterPool = sync.Pool{
+ New: func() interface{} {
+ return &EVMInterpreter{}
+ },
+}
+
// Config are the configuration options for the Interpreter
type Config struct {
Debug bool // Enables debugging
@@ -124,11 +131,12 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
}
cfg.JumpTable = jt
}
-
- return &EVMInterpreter{
- evm: evm,
- cfg: cfg,
- }
+ evmInterpreter := EVMInterpreterPool.Get().(*EVMInterpreter)
+ evmInterpreter.evm = evm
+ evmInterpreter.cfg = cfg
+ evmInterpreter.readOnly = false
+ evmInterpreter.returnData = nil
+ return evmInterpreter
}
// Run loops and evaluates the contract's code with the given input data and returns
diff --git a/crypto/crypto.go b/crypto/crypto.go
index 40969a2895..88c44d0e22 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -29,6 +29,9 @@ import (
"io/ioutil"
"math/big"
"os"
+ "sync"
+
+ "github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
@@ -48,10 +51,17 @@ const DigestLength = 32
var (
secp256k1N, _ = new(big.Int).SetString("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 16)
secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2))
+
+ keccakState256Cache = fastcache.New(100 * 1024 * 1024)
)
var errInvalidPubkey = errors.New("invalid secp256k1 public key")
+var keccakState256Pool = sync.Pool{
+ New: func() interface{} {
+ return sha3.NewLegacyKeccak256().(KeccakState)
+ }}
+
// KeccakState wraps sha3.state. In addition to the usual hash methods, it also supports
// Read to get a variable amount of data from the hash state. Read is faster than Sum
// because it doesn't copy the internal state, but also modifies the internal state.
@@ -67,31 +77,55 @@ func NewKeccakState() KeccakState {
// HashData hashes the provided data using the KeccakState and returns a 32 byte hash
func HashData(kh KeccakState, data []byte) (h common.Hash) {
+ if hash, ok := keccakState256Cache.HasGet(nil, data); ok {
+ return common.BytesToHash(hash)
+ }
kh.Reset()
kh.Write(data)
kh.Read(h[:])
+ keccakState256Cache.Set(data, h.Bytes())
return h
}
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
+ if len(data) == 1 {
+ if hash, ok := keccakState256Cache.HasGet(nil, data[0]); ok {
+ return hash
+ }
+ }
b := make([]byte, 32)
- d := NewKeccakState()
+ d := keccakState256Pool.Get().(KeccakState)
+ defer keccakState256Pool.Put(d)
+ d.Reset()
for _, b := range data {
d.Write(b)
}
d.Read(b)
+ if len(data) == 1 {
+ keccakState256Cache.Set(data[0], b)
+ }
return b
}
// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
// converting it to an internal Hash data structure.
func Keccak256Hash(data ...[]byte) (h common.Hash) {
- d := NewKeccakState()
+ if len(data) == 1 {
+ if hash, ok := keccakState256Cache.HasGet(nil, data[0]); ok {
+ return common.BytesToHash(hash)
+ }
+ }
+ d := keccakState256Pool.Get().(KeccakState)
+ defer keccakState256Pool.Put(d)
+ d.Reset()
for _, b := range data {
d.Write(b)
}
d.Read(h[:])
+ if len(data) == 1 {
+ keccakState256Cache.Set(data[0], h.Bytes())
+ }
return h
}
diff --git a/eth/backend.go b/eth/backend.go
index 2247b94b5d..b52591fd71 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -138,7 +138,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
log.Info("Initialised chain configuration", "config", chainConfig)
- if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil {
+ if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal), config.TriesInMemory); err != nil {
log.Error("Failed to recover state", "error", err)
}
eth := &Ethereum{
@@ -186,15 +186,15 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
EVMInterpreter: config.EVMInterpreter,
}
cacheConfig = &core.CacheConfig{
- TrieCleanLimit: config.TrieCleanCache,
- TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
- TrieCleanRejournal: config.TrieCleanCacheRejournal,
- TrieCleanNoPrefetch: config.NoPrefetch,
- TrieDirtyLimit: config.TrieDirtyCache,
- TrieDirtyDisabled: config.NoPruning,
- TrieTimeLimit: config.TrieTimeout,
- SnapshotLimit: config.SnapshotCache,
- Preimages: config.Preimages,
+ TrieCleanLimit: config.TrieCleanCache,
+ TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
+ TrieCleanRejournal: config.TrieCleanCacheRejournal,
+ TrieDirtyLimit: config.TrieDirtyCache,
+ TrieDirtyDisabled: config.NoPruning,
+ TrieTimeLimit: config.TrieTimeout,
+ SnapshotLimit: config.SnapshotCache,
+ TriesInMemory: config.TriesInMemory,
+ Preimages: config.Preimages,
}
)
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
diff --git a/eth/bloombits.go b/eth/bloombits.go
index 0cb7050d23..314317ae4f 100644
--- a/eth/bloombits.go
+++ b/eth/bloombits.go
@@ -20,6 +20,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/bitutil"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/core/rawdb"
)
@@ -45,7 +46,7 @@ const (
// retrievals from possibly a range of filters and serving the data to satisfy.
func (eth *Ethereum) startBloomHandlers(sectionSize uint64) {
for i := 0; i < bloomServiceThreads; i++ {
- go func() {
+ gopool.Submit(func() {
for {
select {
case <-eth.closeBloomHandler:
@@ -69,6 +70,6 @@ func (eth *Ethereum) startBloomHandlers(sectionSize uint64) {
request <- task
}
}
- }()
+ })
}
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index b8a6e43fcd..001b27147f 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -79,7 +79,11 @@ func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block,
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
CatalystBlock: big.NewInt(0),
- Ethash: new(params.EthashConfig),
+ RamanujanBlock: big.NewInt(0),
+ NielsBlock: big.NewInt(0),
+ MirrorSyncBlock: big.NewInt(0),
+
+ Ethash: new(params.EthashConfig),
}
genesis := &core.Genesis{
Config: config,
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index 2024d23dea..0fea49f7bc 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -21,6 +21,7 @@ import (
"sync"
"github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
)
@@ -98,7 +99,7 @@ func (api *PublicDownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription,
rpcSub := notifier.CreateSubscription()
- go func() {
+ gopool.Submit(func() {
statuses := make(chan interface{})
sub := api.SubscribeSyncStatus(statuses)
@@ -114,7 +115,7 @@ func (api *PublicDownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription,
return
}
}
- }()
+ })
return rpcSub, nil
}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index b3b6cc95a0..4d76988f71 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -150,8 +150,8 @@ func (p *peerConnection) FetchHeaders(from uint64, count int) error {
p.headerStarted = time.Now()
// Issue the header retrieval request (absolute upwards without gaps)
- go p.peer.RequestHeadersByNumber(from, count, 0, false)
+ go p.peer.RequestHeadersByNumber(from, count, 0, false)
return nil
}
@@ -202,7 +202,6 @@ func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
return errAlreadyFetching
}
p.stateStarted = time.Now()
-
go p.peer.RequestNodeData(hashes)
return nil
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 1c4b796e9f..40dece429a 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -18,8 +18,6 @@
package ethconfig
import (
- "github.com/ethereum/go-ethereum/consensus/parlia"
- "github.com/ethereum/go-ethereum/internal/ethapi"
"math/big"
"os"
"os/user"
@@ -31,10 +29,12 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/consensus/parlia"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
@@ -78,6 +78,7 @@ var Defaults = Config{
TrieCleanCacheRejournal: 60 * time.Minute,
TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute,
+ TriesInMemory: 128,
SnapshotCache: 102,
Miner: miner.Config{
GasFloor: 8000000,
@@ -131,7 +132,6 @@ type Config struct {
SnapDiscoveryURLs []string
NoPruning bool // Whether to disable pruning and flush everything to disk
- NoPrefetch bool // Whether to disable prefetching and only load state on demand
DirectBroadcast bool
RangeLimit bool
@@ -166,6 +166,7 @@ type Config struct {
TrieDirtyCache int
TrieTimeout time.Duration
SnapshotCache int
+ TriesInMemory uint64
Preimages bool
// Mining options
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index ca93b2ad00..fa31b78335 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -45,6 +45,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
TrieDirtyCache int
TrieTimeout time.Duration
+ TriesInMemory uint64 `toml:",omitempty"`
SnapshotCache int
Preimages bool
Miner miner.Config
@@ -67,7 +68,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.EthDiscoveryURLs = c.EthDiscoveryURLs
enc.SnapDiscoveryURLs = c.SnapDiscoveryURLs
enc.NoPruning = c.NoPruning
- enc.NoPrefetch = c.NoPrefetch
enc.TxLookupLimit = c.TxLookupLimit
enc.Whitelist = c.Whitelist
enc.LightServ = c.LightServ
@@ -89,6 +89,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.TrieCleanCacheRejournal = c.TrieCleanCacheRejournal
enc.TrieDirtyCache = c.TrieDirtyCache
enc.TrieTimeout = c.TrieTimeout
+ enc.TriesInMemory = c.TriesInMemory
enc.SnapshotCache = c.SnapshotCache
enc.Preimages = c.Preimages
enc.Miner = c.Miner
@@ -137,6 +138,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
TrieDirtyCache *int
TrieTimeout *time.Duration
+ TriesInMemory *uint64 `toml:",omitempty"`
SnapshotCache *int
Preimages *bool
Miner *miner.Config
@@ -174,9 +176,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.NoPruning != nil {
c.NoPruning = *dec.NoPruning
}
- if dec.NoPrefetch != nil {
- c.NoPrefetch = *dec.NoPrefetch
- }
if dec.TxLookupLimit != nil {
c.TxLookupLimit = *dec.TxLookupLimit
}
@@ -240,6 +239,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TrieTimeout != nil {
c.TrieTimeout = *dec.TrieTimeout
}
+ if dec.TriesInMemory != nil {
+ c.TriesInMemory = *dec.TriesInMemory
+ }
if dec.SnapshotCache != nil {
c.SnapshotCache = *dec.SnapshotCache
}
diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go
index fc75dd4310..05e5833ce2 100644
--- a/eth/fetcher/block_fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -23,6 +23,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
@@ -460,7 +461,7 @@ func (f *BlockFetcher) loop() {
// Create a closure of the fetch and schedule in on a new thread
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
- go func() {
+ gopool.Submit(func() {
if f.fetchingHook != nil {
f.fetchingHook(hashes)
}
@@ -468,7 +469,7 @@ func (f *BlockFetcher) loop() {
headerFetchMeter.Mark(1)
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
}
- }()
+ })
}
// Schedule the next fetch if blocks are still pending
f.rescheduleFetch(fetchTimer)
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
index 3ba7753916..d0c1348014 100644
--- a/eth/fetcher/tx_fetcher.go
+++ b/eth/fetcher/tx_fetcher.go
@@ -25,6 +25,7 @@ import (
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
@@ -794,15 +795,15 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
if len(hashes) > 0 {
f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
txRequestOutMeter.Mark(int64(len(hashes)))
-
- go func(peer string, hashes []common.Hash) {
+ p := peer
+ gopool.Submit(func() {
// Try to fetch the transactions, but in case of a request
// failure (e.g. peer disconnected), reschedule the hashes.
- if err := f.fetchTxs(peer, hashes); err != nil {
+ if err := f.fetchTxs(p, hashes); err != nil {
txRequestFailMeter.Mark(int64(len(hashes)))
- f.Drop(peer)
+ f.Drop(p)
}
- }(peer, hashes)
+ })
}
})
// If a new request was fired, schedule a timeout timer
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 00e28d8955..91477a9170 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
@@ -121,7 +122,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
api.filters[pendingTxSub.ID] = f
api.filtersMu.Unlock()
- go func() {
+ gopool.Submit(func() {
for {
select {
case ph := <-pendingTxs:
@@ -133,7 +134,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {
return
}
}
- }()
+ })
return pendingTxSub.ID
}
@@ -148,7 +149,7 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su
rpcSub := notifier.CreateSubscription()
- go func() {
+ gopool.Submit(func() {
txHashes := make(chan []common.Hash, 128)
pendingTxSub := api.events.SubscribePendingTxs(txHashes)
@@ -168,7 +169,7 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su
return
}
}
- }()
+ })
return rpcSub, nil
}
@@ -187,7 +188,7 @@ func (api *PublicFilterAPI) NewBlockFilter() rpc.ID {
api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub}
api.filtersMu.Unlock()
- go func() {
+ gopool.Submit(func() {
for {
select {
case h := <-headers:
@@ -203,7 +204,7 @@ func (api *PublicFilterAPI) NewBlockFilter() rpc.ID {
return
}
}
- }()
+ })
return headerSub.ID
}
@@ -217,7 +218,7 @@ func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, er
rpcSub := notifier.CreateSubscription()
- go func() {
+ gopool.Submit(func() {
headers := make(chan *types.Header)
headersSub := api.events.SubscribeNewHeads(headers)
@@ -233,7 +234,7 @@ func (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, er
return
}
}
- }()
+ })
return rpcSub, nil
}
@@ -255,7 +256,7 @@ func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc
return nil, err
}
- go func() {
+ gopool.Submit(func() {
for {
select {
@@ -271,7 +272,7 @@ func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc
return
}
}
- }()
+ })
return rpcSub, nil
}
@@ -304,7 +305,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub}
api.filtersMu.Unlock()
- go func() {
+ gopool.Submit(func() {
for {
select {
case l := <-logs:
@@ -320,7 +321,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
return
}
}
- }()
+ })
return logsSub.ID, nil
}
diff --git a/eth/protocols/eth/broadcast.go b/eth/protocols/eth/broadcast.go
index 328396d510..e0ee2a1cfa 100644
--- a/eth/protocols/eth/broadcast.go
+++ b/eth/protocols/eth/broadcast.go
@@ -20,6 +20,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/core/types"
)
@@ -158,14 +159,14 @@ func (p *Peer) announceTransactions() {
// If there's anything available to transfer, fire up an async writer
if len(pending) > 0 {
done = make(chan struct{})
- go func() {
+ gopool.Submit(func() {
if err := p.sendPooledTransactionHashes(pending); err != nil {
fail <- err
return
}
close(done)
- p.Log().Trace("Sent transaction announcements", "count", len(pending))
- }()
+ //p.Log().Trace("Sent transaction announcements", "count", len(pending))
+ })
}
}
// Transfer goroutine may or may not have been started, listen for events
diff --git a/eth/protocols/eth/handshake.go b/eth/protocols/eth/handshake.go
index 57a4e0bc34..b634f18e00 100644
--- a/eth/protocols/eth/handshake.go
+++ b/eth/protocols/eth/handshake.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/p2p"
)
@@ -40,7 +41,7 @@ func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
var status StatusPacket // safe to read after two values have been received from errc
- go func() {
+ gopool.Submit(func() {
errc <- p2p.Send(p.rw, StatusMsg, &StatusPacket{
ProtocolVersion: uint32(p.version),
NetworkID: network,
@@ -49,10 +50,10 @@ func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
Genesis: genesis,
ForkID: forkID,
})
- }()
- go func() {
+ })
+ gopool.Submit(func() {
errc <- p.readStatus(network, &status, genesis, forkFilter)
- }()
+ })
timeout := time.NewTimer(handshakeTimeout)
defer timeout.Stop()
for i := 0; i < 2; i++ {
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index 62c018ef8e..de1b0ed1ee 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -155,19 +155,19 @@ func (hn *HashOrNumber) EncodeRLP(w io.Writer) error {
// DecodeRLP is a specialized decoder for HashOrNumber to decode the contents
// into either a block hash or a block number.
func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {
- _, size, _ := s.Kind()
- origin, err := s.Raw()
- if err == nil {
- switch {
- case size == 32:
- err = rlp.DecodeBytes(origin, &hn.Hash)
- case size <= 8:
- err = rlp.DecodeBytes(origin, &hn.Number)
- default:
- err = fmt.Errorf("invalid input size %d for origin", size)
- }
+ _, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case size == 32:
+ hn.Number = 0
+ return s.Decode(&hn.Hash)
+ case size <= 8:
+ hn.Hash = common.Hash{}
+ return s.Decode(&hn.Number)
+ default:
+ return fmt.Errorf("invalid input size %d for origin", size)
}
- return err
}
// BlockHeadersPacket represents a block header response.
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index e283473207..f37bf14df7 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -28,6 +28,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
@@ -911,7 +912,8 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac
delete(s.accountIdlers, idle)
s.pend.Add(1)
- go func(root common.Hash) {
+ root := s.root
+ gopool.Submit(func() {
defer s.pend.Done()
// Attempt to send the remote request and revert if it fails
@@ -919,7 +921,7 @@ func (s *Syncer) assignAccountTasks(success chan *accountResponse, fail chan *ac
peer.Log().Debug("Failed to request account range", "err", err)
s.scheduleRevertAccountRequest(req)
}
- }(s.root)
+ })
// Inject the request into the task to block further assignments
task.req = req
@@ -1002,7 +1004,7 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *
delete(s.bytecodeIdlers, idle)
s.pend.Add(1)
- go func() {
+ gopool.Submit(func() {
defer s.pend.Done()
// Attempt to send the remote request and revert if it fails
@@ -1010,7 +1012,7 @@ func (s *Syncer) assignBytecodeTasks(success chan *bytecodeResponse, fail chan *
log.Debug("Failed to request bytecodes", "err", err)
s.scheduleRevertBytecodeRequest(req)
}
- }()
+ })
}
}
@@ -1130,7 +1132,8 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st
delete(s.storageIdlers, idle)
s.pend.Add(1)
- go func(root common.Hash) {
+ root := s.root
+ gopool.Submit(func() {
defer s.pend.Done()
// Attempt to send the remote request and revert if it fails
@@ -1142,7 +1145,7 @@ func (s *Syncer) assignStorageTasks(success chan *storageResponse, fail chan *st
log.Debug("Failed to request storage", "err", err)
s.scheduleRevertStorageRequest(req)
}
- }(s.root)
+ })
// Inject the request into the subtask to block further assignments
if subtask != nil {
@@ -1249,7 +1252,8 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai
delete(s.trienodeHealIdlers, idle)
s.pend.Add(1)
- go func(root common.Hash) {
+ root := s.root
+ gopool.Submit(func() {
defer s.pend.Done()
// Attempt to send the remote request and revert if it fails
@@ -1257,7 +1261,7 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai
log.Debug("Failed to request trienode healers", "err", err)
s.scheduleRevertTrienodeHealRequest(req)
}
- }(s.root)
+ })
}
}
@@ -1351,7 +1355,7 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai
delete(s.bytecodeHealIdlers, idle)
s.pend.Add(1)
- go func() {
+ gopool.Submit(func() {
defer s.pend.Done()
// Attempt to send the remote request and revert if it fails
@@ -1359,7 +1363,7 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai
log.Debug("Failed to request bytecode healers", "err", err)
s.scheduleRevertBytecodeHealRequest(req)
}
- }()
+ })
}
}
diff --git a/eth/sync.go b/eth/sync.go
index 4520ec6879..2256c7cb99 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -23,6 +23,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
@@ -114,7 +115,7 @@ func (h *handler) txsyncLoop64() {
// Send the pack in the background.
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
sending = true
- go func() { done <- pack.p.SendTransactions(pack.txs) }()
+ gopool.Submit(func() { done <- pack.p.SendTransactions(pack.txs) })
}
// pick chooses the next pending sync.
pick := func() *txsync {
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 40def8d288..eee27e9a0c 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -30,6 +30,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
@@ -263,7 +264,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
)
for th := 0; th < threads; th++ {
pend.Add(1)
- go func() {
+ gopool.Submit(func() {
defer pend.Done()
// Fetch and execute the next block trace tasks
@@ -295,12 +296,12 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
return
}
}
- }()
+ })
}
// Start a goroutine to feed all the blocks into the tracers
begin := time.Now()
- go func() {
+ gopool.Submit(func() {
var (
logged time.Time
number uint64
@@ -375,10 +376,10 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
}
traced += uint64(len(txs))
}
- }()
+ })
// Keep reading the trace results and stream the to the user
- go func() {
+ gopool.Submit(func() {
var (
done = make(map[uint64]*blockTraceResult)
next = start.NumberU64() + 1
@@ -405,7 +406,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
next++
}
}
- }()
+ })
return sub, nil
}
@@ -520,7 +521,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
blockHash := block.Hash()
for th := 0; th < threads; th++ {
pend.Add(1)
- go func() {
+ gopool.Submit(func() {
defer pend.Done()
// Fetch and execute the next transaction trace tasks
for task := range jobs {
@@ -537,7 +538,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
}
results[task.index] = &txTraceResult{Result: res}
}
- }()
+ })
}
// Feed the transactions into the tracers and return
var failed error
@@ -814,12 +815,12 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTrac
}
// Handle timeouts and RPC cancellations
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
- go func() {
+ gopool.Submit(func() {
<-deadlineCtx.Done()
if deadlineCtx.Err() == context.DeadlineExceeded {
tracer.(*Tracer).Stop(errors.New("execution timeout"))
}
- }()
+ })
defer cancel()
case config == nil:
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index 4c0240cd2c..598c84c3b1 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -79,6 +79,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
+ TriesInMemory: 128,
TrieDirtyDisabled: true, // Archive mode
}
chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, backend.chainConfig, backend.engine, vm.Config{}, nil, nil)
diff --git a/event/subscription.go b/event/subscription.go
index 6c62874719..080985d1d4 100644
--- a/event/subscription.go
+++ b/event/subscription.go
@@ -21,6 +21,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/mclock"
)
@@ -48,7 +49,7 @@ type Subscription interface {
// error, it is sent on the subscription's error channel.
func NewSubscription(producer func(<-chan struct{}) error) Subscription {
s := &funcSub{unsub: make(chan struct{}), err: make(chan error, 1)}
- go func() {
+ gopool.Submit(func() {
defer close(s.err)
err := producer(s.unsub)
s.mu.Lock()
@@ -59,7 +60,7 @@ func NewSubscription(producer func(<-chan struct{}) error) Subscription {
}
s.unsubscribed = true
}
- }()
+ })
return s
}
@@ -171,11 +172,11 @@ func (s *resubscribeSub) subscribe() Subscription {
for {
s.lastTry = mclock.Now()
ctx, cancel := context.WithCancel(context.Background())
- go func() {
+ gopool.Submit(func() {
rsub, err := s.fn(ctx, s.lastSubErr)
sub = rsub
subscribed <- err
- }()
+ })
select {
case err := <-subscribed:
cancel()
diff --git a/go.mod b/go.mod
index 524078e203..fc5ec88fec 100644
--- a/go.mod
+++ b/go.mod
@@ -55,6 +55,8 @@ require (
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
github.com/olekukonko/tablewriter v0.0.5
+ github.com/panjf2000/ants/v2 v2.4.5
+ github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/prometheus/tsdb v0.7.1
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
diff --git a/go.sum b/go.sum
index c22848f2f8..5fbb839b90 100644
--- a/go.sum
+++ b/go.sum
@@ -8,16 +8,24 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigtable v1.2.0 h1:F4cCmA4nuV84V5zYQ3MKY+M1Cw1avHDuf3S/LcZPA9c=
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
+cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
@@ -42,8 +50,11 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
@@ -51,13 +62,19 @@ github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQu
github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8=
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db h1:nxAtV4VajJDhKysp2kdcJZsq8Ss1xSA0vZTkVHHJd0E=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/aws/aws-sdk-go-v2 v1.4.0 h1:Ryh4fNebT9SwLyCKPSk83dyEZj+KB6KzDyb1gXii7EI=
github.com/aws/aws-sdk-go-v2 v1.4.0/go.mod h1:tI4KhsR5VkzlUa2DZAdwx7wCAYGwkZZ1H31PYrBFx1w=
@@ -80,19 +97,29 @@ github.com/aws/smithy-go v1.4.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
+github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
+github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
+github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
+github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
+github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
+github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
+github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
+github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
@@ -100,16 +127,23 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA=
github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304=
+github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572 h1:+R8G1+Ftumd0DaveLgMIjrFPcAS4G8MsVXWXiyZL5BY=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f h1:C43yEtQ6NIf4ftFXD/V55gnGFgPbMQobd//YlnLjUJ8=
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -119,7 +153,9 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
@@ -127,10 +163,13 @@ github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmak
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 h1:Y9vTBSsV4hSwPSj4bacAU/eSnV3dAxVpepaghAdhGoQ=
github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
+github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
@@ -138,6 +177,7 @@ github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90 h1:WXb3TSNmHp2vHoCroCIB1foO/yQ36swABL8aOVeDpgg=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
@@ -146,9 +186,13 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
+github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8=
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -159,21 +203,27 @@ github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=
github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
+github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -191,7 +241,9 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg=
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=
github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -202,15 +254,20 @@ github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -224,55 +281,85 @@ github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZ
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw=
github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88 h1:bcAj8KroPf552TScjFPIakjH2/tdIrIH8F+cc4v4SRo=
github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo=
+github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
+github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385 h1:ED4e5Cc3z5vSN2Tz2GkOHN7vs4Sxe2yds6CXvDnvZFE=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
+github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e h1:/o3vQtpWJhvnIbXley4/jwzzqNeigJK9z+LZcJZ9zfM=
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
+github.com/influxdata/promql/v2 v2.12.0 h1:kXn3p0D7zPw16rOtfDR+wo6aaiH8tSMfhPwONTxrlEc=
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
+github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6 h1:UzJnB7VRL4PSkUJHwsyzseGOmrO/r4yA+AuxGJxiZmA=
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
+github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9 h1:MHTrDWmQpHq/hkq+7cw9oYAt2PqUw52TZazRA0N7PGE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
+github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
+github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
+github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
+github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jsternberg/zap-logfmt v1.0.0 h1:0Dz2s/eturmdUS34GM82JwNEdQ9hPoJgqptcEKcbpzY=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw=
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
+github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
+github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@@ -280,6 +367,7 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
@@ -295,13 +383,19 @@ github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXT
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
@@ -309,6 +403,7 @@ github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcou
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
@@ -325,16 +420,24 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/panjf2000/ants/v2 v2.4.5 h1:kcGvjXB7ea0MrzzszpnlVFthhYKoFxLi75nRbsq01HY=
+github.com/panjf2000/ants/v2 v2.4.5/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
+github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw=
+github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0=
+github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
+github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -356,29 +459,43 @@ github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
+github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@@ -394,6 +511,7 @@ github.com/tendermint/iavl v0.12.0 h1:xcaFAr+ycqCj7WN1RzL2EfcBioRDOHcU1oWcg83K02
github.com/tendermint/iavl v0.12.0/go.mod h1:EoKMMv++tDOL5qKKVnoIqtVPshRrEPeJ0WsgDOLAauM=
github.com/tendermint/tendermint v0.31.11 h1:TIs//4WfEAG4TOZc2eUfJPI3T8KrywXQCCPnGAaM1Wo=
github.com/tendermint/tendermint v0.31.11/go.mod h1:ymcPyWblXCplCPQjbOYbrF1fWnpslATMVqiGgWbZrlc=
+github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
@@ -401,17 +519,25 @@ github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefld
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
+github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
+github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
+github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -432,9 +558,11 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@@ -443,13 +571,16 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -477,6 +608,7 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -555,6 +687,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -563,9 +696,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -573,11 +709,13 @@ google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -608,11 +746,14 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
@@ -626,6 +767,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -638,6 +780,9 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
+rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 56c8eb206e..d95a121142 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -25,19 +25,20 @@ import (
"strings"
"time"
- "github.com/ethereum/go-ethereum/core/rawdb"
-
"github.com/davecgh/go-spew/spew"
+
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -896,10 +897,10 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
- go func() {
+ gopool.Submit(func() {
<-ctx.Done()
evm.Cancel()
- }()
+ })
// Execute the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
diff --git a/les/handler_test.go b/les/handler_test.go
index d1dbee6bdf..c4f54cb941 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -316,7 +316,7 @@ func TestGetStaleCodeLes4(t *testing.T) { testGetStaleCode(t, 4) }
func testGetStaleCode(t *testing.T, protocol int) {
netconfig := testnetConfig{
- blocks: core.TriesInMemory + 4,
+ blocks: 128 + 4,
protocol: protocol,
nopruning: true,
}
@@ -430,7 +430,7 @@ func TestGetStaleProofLes4(t *testing.T) { testGetStaleProof(t, 4) }
func testGetStaleProof(t *testing.T, protocol int) {
netconfig := testnetConfig{
- blocks: core.TriesInMemory + 4,
+ blocks: 128 + 4,
protocol: protocol,
nopruning: true,
}
diff --git a/les/peer.go b/les/peer.go
index 25a3bb1556..5cdd557a90 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
- "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/les/flowcontrol"
@@ -1055,7 +1054,7 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge
// If local ethereum node is running in archive mode, advertise ourselves we have
// all version state data. Otherwise only recent state is available.
- stateRecent := uint64(core.TriesInMemory - blockSafetyMargin)
+ stateRecent := uint64(server.handler.blockchain.TriesInMemory() - blockSafetyMargin)
if server.archiveMode {
stateRecent = 0
}
diff --git a/les/protocol.go b/les/protocol.go
index 07a4452f40..06db9024eb 100644
--- a/les/protocol.go
+++ b/les/protocol.go
@@ -307,19 +307,19 @@ func (hn *hashOrNumber) EncodeRLP(w io.Writer) error {
// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents
// into either a block hash or a block number.
func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {
- _, size, _ := s.Kind()
- origin, err := s.Raw()
- if err == nil {
- switch {
- case size == 32:
- err = rlp.DecodeBytes(origin, &hn.Hash)
- case size <= 8:
- err = rlp.DecodeBytes(origin, &hn.Number)
- default:
- err = fmt.Errorf("invalid input size %d for origin", size)
- }
+ _, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case size == 32:
+ hn.Number = 0
+ return s.Decode(&hn.Hash)
+ case size <= 8:
+ hn.Hash = common.Hash{}
+ return s.Decode(&hn.Number)
+ default:
+ return fmt.Errorf("invalid input size %d for origin", size)
}
- return err
}
// CodeData is the network response packet for a node data retrieval.
diff --git a/les/server_requests.go b/les/server_requests.go
index bab5f733d5..7564420ce6 100644
--- a/les/server_requests.go
+++ b/les/server_requests.go
@@ -297,7 +297,7 @@ func handleGetCode(msg Decoder) (serveRequestFn, uint64, uint64, error) {
// Refuse to search stale state data in the database since looking for
// a non-exist key is kind of expensive.
local := bc.CurrentHeader().Number.Uint64()
- if !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local {
+ if !backend.ArchiveMode() && header.Number.Uint64()+bc.TriesInMemory() <= local {
p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local)
p.bumpInvalid()
continue
@@ -396,7 +396,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
// Refuse to search stale state data in the database since looking for
// a non-exist key is kind of expensive.
local := bc.CurrentHeader().Number.Uint64()
- if !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local {
+ if !backend.ArchiveMode() && header.Number.Uint64()+bc.TriesInMemory() <= local {
p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local)
p.bumpInvalid()
continue
diff --git a/les/test_helper.go b/les/test_helper.go
index fc85ed957f..70d0c294a8 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -373,7 +373,7 @@ func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Ha
sendList = sendList.add("serveHeaders", nil)
sendList = sendList.add("serveChainSince", uint64(0))
sendList = sendList.add("serveStateSince", uint64(0))
- sendList = sendList.add("serveRecentState", uint64(core.TriesInMemory-4))
+ sendList = sendList.add("serveRecentState", uint64(128-4))
sendList = sendList.add("txRelay", nil)
sendList = sendList.add("flowControl/BL", testBufLimit)
sendList = sendList.add("flowControl/MRR", testBufRecharge)
diff --git a/light/trie.go b/light/trie.go
index 0516b94486..e189634e1c 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -95,6 +95,18 @@ func (db *odrDatabase) TrieDB() *trie.Database {
return nil
}
+func (db *odrDatabase) CacheAccount(_ common.Hash, _ state.Trie) {
+ return
+}
+
+func (db *odrDatabase) CacheStorage(_ common.Hash, _ common.Hash, _ state.Trie) {
+ return
+}
+
+func (db *odrDatabase) Purge() {
+ return
+}
+
type odrTrie struct {
db *odrDatabase
id *TrieID
diff --git a/miner/worker.go b/miner/worker.go
index 950efc7e63..b93b8752af 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -474,22 +474,7 @@ func (w *worker) mainLoop() {
start := time.Now()
if err := w.commitUncle(w.current, ev.Block.Header()); err == nil {
var uncles []*types.Header
- w.current.uncles.Each(func(item interface{}) bool {
- hash, ok := item.(common.Hash)
- if !ok {
- return false
- }
- uncle, exist := w.localUncles[hash]
- if !exist {
- uncle, exist = w.remoteUncles[hash]
- }
- if !exist {
- return false
- }
- uncles = append(uncles, uncle.Header())
- return false
- })
- w.commit(uncles, nil, true, start)
+ w.commit(uncles, nil, false, start)
}
}
@@ -676,14 +661,6 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
uncles: mapset.NewSet(),
header: header,
}
- // when 08 is processed ancestors contain 07 (quick block)
- for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) {
- for _, uncle := range ancestor.Uncles() {
- env.family.Add(uncle.Hash())
- }
- env.family.Add(ancestor.Hash())
- env.ancestors.Add(ancestor.Hash())
- }
// Keep track of transactions which return errors so they can be removed
env.tcount = 0
@@ -825,12 +802,11 @@ LOOP:
// during transaction acceptance is the transaction pool.
//
// We use the eip155 signer regardless of the current hf.
- from, _ := types.Sender(w.current.signer, tx)
+ //from, _ := types.Sender(w.current.signer, tx)
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) {
- log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
-
+ //log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
txs.Pop()
continue
}
@@ -841,17 +817,17 @@ LOOP:
switch {
case errors.Is(err, core.ErrGasLimitReached):
// Pop the current out-of-gas transaction without shifting in the next from the account
- log.Trace("Gas limit exceeded for current block", "sender", from)
+ //log.Trace("Gas limit exceeded for current block", "sender", from)
txs.Pop()
case errors.Is(err, core.ErrNonceTooLow):
// New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
+ //log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
txs.Shift()
case errors.Is(err, core.ErrNonceTooHigh):
// Reorg notification data race between the transaction pool and miner, skip account =
- log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce())
+ //log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce())
txs.Pop()
case errors.Is(err, nil):
@@ -862,13 +838,13 @@ LOOP:
case errors.Is(err, core.ErrTxTypeNotSupported):
// Pop the unsupported transaction without shifting in the next from the account
- log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
+ //log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type())
txs.Pop()
default:
// Strange error, discard the transaction and get the next in line (note, the
// nonce-too-high clause will prevent us from executing in vain).
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
+ //log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
txs.Shift()
}
}
@@ -953,30 +929,7 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
}
systemcontracts.UpgradeBuildInSystemContract(w.chainConfig, header.Number, env.state)
// Accumulate the uncles for the current block
- uncles := make([]*types.Header, 0, 2)
- commitUncles := func(blocks map[common.Hash]*types.Block) {
- // Clean up stale uncle blocks first
- for hash, uncle := range blocks {
- if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
- delete(blocks, hash)
- }
- }
- for hash, uncle := range blocks {
- if len(uncles) == 2 {
- break
- }
- if err := w.commitUncle(env, uncle.Header()); err != nil {
- log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
- } else {
- log.Debug("Committing new uncle to block", "hash", hash)
- uncles = append(uncles, uncle.Header())
- }
- }
- }
- // Prefer to locally generated uncle
- commitUncles(w.localUncles)
- commitUncles(w.remoteUncles)
-
+ uncles := make([]*types.Header, 0)
// Create an empty block based on temporary copied state for
// sealing in advance without waiting block execution finished.
if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
@@ -1014,13 +967,13 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
commitTxsTimer.UpdateSince(start)
log.Info("Gas pool", "height", header.Number.String(), "pool", w.current.gasPool.String())
}
- w.commit(uncles, w.fullTaskHook, true, tstart)
+ w.commit(uncles, w.fullTaskHook, false, tstart)
}
// commit runs any post-transaction state modifications, assembles the final block
// and commits new work if consensus engine is running.
func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error {
- s := w.current.state.Copy()
+ s := w.current.state
block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, types.CopyHeader(w.current.header), s, w.current.txs, uncles, w.current.receipts)
if err != nil {
return err
@@ -1034,7 +987,7 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
w.unconfirmed.Shift(block.NumberU64() - 1)
log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
"uncles", len(uncles), "txs", w.current.tcount,
- "gas", block.GasUsed(), "fees", totalFees(block, receipts),
+ "gas", block.GasUsed(),
"elapsed", common.PrettyDuration(time.Since(start)))
case <-w.exitCh:
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 0fe62316e1..4015e7294f 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -260,169 +260,6 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
}
}
-func TestEmptyWorkEthash(t *testing.T) {
- testEmptyWork(t, ethashChainConfig, ethash.NewFaker())
-}
-func TestEmptyWorkClique(t *testing.T) {
- testEmptyWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase()))
-}
-
-func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
- defer engine.Close()
-
- w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
- defer w.close()
-
- var (
- taskIndex int
- taskCh = make(chan struct{}, 2)
- )
- checkEqual := func(t *testing.T, task *task, index int) {
- // The first empty work without any txs included
- receiptLen, balance := 0, big.NewInt(0)
- if index == 1 {
- // The second full work with 1 tx included
- receiptLen, balance = 1, big.NewInt(1000)
- }
- if len(task.receipts) != receiptLen {
- t.Fatalf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen)
- }
- if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 {
- t.Fatalf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance)
- }
- }
- w.newTaskHook = func(task *task) {
- if task.block.NumberU64() == 1 {
- checkEqual(t, task, taskIndex)
- taskIndex += 1
- taskCh <- struct{}{}
- }
- }
- w.skipSealHook = func(task *task) bool { return true }
- w.fullTaskHook = func() {
- time.Sleep(100 * time.Millisecond)
- }
- w.start() // Start mining!
- for i := 0; i < 2; i += 1 {
- select {
- case <-taskCh:
- case <-time.NewTimer(3 * time.Second).C:
- t.Error("new task timeout")
- }
- }
-}
-
-func TestStreamUncleBlock(t *testing.T) {
- ethash := ethash.NewFaker()
- defer ethash.Close()
-
- w, b := newTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1)
- defer w.close()
-
- var taskCh = make(chan struct{})
-
- taskIndex := 0
- w.newTaskHook = func(task *task) {
- if task.block.NumberU64() == 2 {
- // The first task is an empty task, the second
- // one has 1 pending tx, the third one has 1 tx
- // and 1 uncle.
- if taskIndex == 2 {
- have := task.block.Header().UncleHash
- want := types.CalcUncleHash([]*types.Header{b.uncleBlock.Header()})
- if have != want {
- t.Errorf("uncle hash mismatch: have %s, want %s", have.Hex(), want.Hex())
- }
- }
- taskCh <- struct{}{}
- taskIndex += 1
- }
- }
- w.skipSealHook = func(task *task) bool {
- return true
- }
- w.fullTaskHook = func() {
- time.Sleep(100 * time.Millisecond)
- }
- w.start()
-
- for i := 0; i < 2; i += 1 {
- select {
- case <-taskCh:
- case <-time.NewTimer(time.Second).C:
- t.Error("new task timeout")
- }
- }
-
- w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock})
-
- select {
- case <-taskCh:
- case <-time.NewTimer(time.Second).C:
- t.Error("new task timeout")
- }
-}
-
-func TestRegenerateMiningBlockEthash(t *testing.T) {
- testRegenerateMiningBlock(t, ethashChainConfig, ethash.NewFaker())
-}
-
-func TestRegenerateMiningBlockClique(t *testing.T) {
- testRegenerateMiningBlock(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase()))
-}
-
-func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) {
- defer engine.Close()
-
- w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
- defer w.close()
-
- var taskCh = make(chan struct{})
-
- taskIndex := 0
- w.newTaskHook = func(task *task) {
- if task.block.NumberU64() == 1 {
- // The first task is an empty task, the second
- // one has 1 pending tx, the third one has 2 txs
- if taskIndex == 2 {
- receiptLen, balance := 2, big.NewInt(2000)
- if len(task.receipts) != receiptLen {
- t.Errorf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen)
- }
- if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 {
- t.Errorf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance)
- }
- }
- taskCh <- struct{}{}
- taskIndex += 1
- }
- }
- w.skipSealHook = func(task *task) bool {
- return true
- }
- w.fullTaskHook = func() {
- time.Sleep(100 * time.Millisecond)
- }
-
- w.start()
- // Ignore the first two works
- for i := 0; i < 2; i += 1 {
- select {
- case <-taskCh:
- case <-time.NewTimer(time.Second).C:
- t.Error("new task timeout")
- }
- }
- b.txPool.AddLocals(newTxs)
- time.Sleep(time.Second)
-
- select {
- case <-taskCh:
- case <-time.NewTimer(time.Second).C:
- t.Error("new task timeout")
- }
-}
-
func TestAdjustIntervalEthash(t *testing.T) {
testAdjustInterval(t, ethashChainConfig, ethash.NewFaker())
}
diff --git a/node/api.go b/node/api.go
index be20b89d95..023d5d27b3 100644
--- a/node/api.go
+++ b/node/api.go
@@ -21,6 +21,7 @@ import (
"fmt"
"strings"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/debug"
@@ -141,7 +142,7 @@ func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription,
}
rpcSub := notifier.CreateSubscription()
- go func() {
+ gopool.Submit(func() {
events := make(chan *p2p.PeerEvent)
sub := server.SubscribeEvents(events)
defer sub.Unsubscribe()
@@ -158,7 +159,7 @@ func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription,
return
}
}
- }()
+ })
return rpcSub, nil
}
diff --git a/p2p/dial.go b/p2p/dial.go
index d36d665501..f7f48916ea 100644
--- a/p2p/dial.go
+++ b/p2p/dial.go
@@ -27,6 +27,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -177,8 +178,13 @@ func newDialScheduler(config dialConfig, it enode.Iterator, setupFunc dialSetupF
d.lastStatsLog = d.clock.Now()
d.ctx, d.cancel = context.WithCancel(context.Background())
d.wg.Add(2)
- go d.readNodes(it)
- go d.loop(it)
+ gopool.Submit(func() {
+ d.readNodes(it)
+ })
+ gopool.Submit(
+ func() {
+ d.loop(it)
+ })
return d
}
@@ -455,10 +461,10 @@ func (d *dialScheduler) startDial(task *dialTask) {
hkey := string(task.dest.ID().Bytes())
d.history.add(hkey, d.clock.Now().Add(dialHistoryExpiration))
d.dialing[task.dest.ID()] = task
- go func() {
+ gopool.Submit(func() {
task.run(d)
d.doneCh <- task
- }()
+ })
}
// A dialTask generated for each node that is dialed.
diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go
index 9ab4a71ce7..f6f125d944 100644
--- a/p2p/discover/lookup.go
+++ b/p2p/discover/lookup.go
@@ -20,6 +20,7 @@ import (
"context"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/p2p/enode"
)
@@ -122,7 +123,9 @@ func (it *lookup) startQueries() bool {
if !it.asked[n.ID()] {
it.asked[n.ID()] = true
it.queries++
- go it.query(n, it.replyCh)
+ gopool.Submit(func() {
+ it.query(n, it.replyCh)
+ })
}
}
// The lookup ends when no more nodes can be asked.
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index d08f8a6c69..bf136cf48f 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -33,6 +33,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil"
@@ -229,8 +230,9 @@ func (tab *Table) loop() {
defer copyNodes.Stop()
// Start initial refresh.
- go tab.doRefresh(refreshDone)
-
+ gopool.Submit(func() {
+ tab.doRefresh(refreshDone)
+ })
loop:
for {
select {
@@ -238,13 +240,18 @@ loop:
tab.seedRand()
if refreshDone == nil {
refreshDone = make(chan struct{})
- go tab.doRefresh(refreshDone)
+ gopool.Submit(func() {
+ tab.doRefresh(refreshDone)
+ })
}
case req := <-tab.refreshReq:
waiting = append(waiting, req)
if refreshDone == nil {
refreshDone = make(chan struct{})
- go tab.doRefresh(refreshDone)
+ gopool.Submit(
+ func() {
+ tab.doRefresh(refreshDone)
+ })
}
case <-refreshDone:
for _, ch := range waiting {
@@ -253,12 +260,17 @@ loop:
waiting, refreshDone = nil, nil
case <-revalidate.C:
revalidateDone = make(chan struct{})
- go tab.doRevalidate(revalidateDone)
+ gopool.Submit(func() {
+ tab.doRevalidate(revalidateDone)
+ })
case <-revalidateDone:
revalidate.Reset(tab.nextRevalidateTime())
revalidateDone = nil
case <-copyNodes.C:
- go tab.copyLiveNodes()
+ gopool.Submit(func() {
+ tab.copyLiveNodes()
+ })
+
case <-tab.closeReq:
break loop
}
diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go
index ad23eee6b4..88580d7cee 100644
--- a/p2p/discover/v4_udp.go
+++ b/p2p/discover/v4_udp.go
@@ -29,6 +29,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
@@ -490,7 +491,9 @@ func (t *UDPv4) loop() {
if contTimeouts > ntpFailureThreshold {
if time.Since(ntpWarnTime) >= ntpWarningCooldown {
ntpWarnTime = time.Now()
- go checkClockDrift()
+ gopool.Submit(func() {
+ checkClockDrift()
+ })
}
contTimeouts = 0
}
diff --git a/p2p/enode/iter.go b/p2p/enode/iter.go
index 664964f534..b14ce4656f 100644
--- a/p2p/enode/iter.go
+++ b/p2p/enode/iter.go
@@ -19,6 +19,8 @@ package enode
import (
"sync"
"time"
+
+ "github.com/ethereum/go-ethereum/common/gopool"
)
// Iterator represents a sequence of nodes. The Next method moves to the next node in the
@@ -177,7 +179,9 @@ func (m *FairMix) AddSource(it Iterator) {
m.wg.Add(1)
source := &mixSource{it, make(chan *Node), m.timeout}
m.sources = append(m.sources, source)
- go m.runSource(m.closed, source)
+ gopool.Submit(func() {
+ m.runSource(m.closed, source)
+ })
}
// Close shuts down the mixer and all current sources.
diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go
index d62f383f0b..18570c3659 100644
--- a/p2p/enode/nodedb.go
+++ b/p2p/enode/nodedb.go
@@ -26,6 +26,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
@@ -303,7 +304,11 @@ func deleteRange(db *leveldb.DB, prefix []byte) {
// convergence, it's simpler to "ensure" the correct state when an appropriate
// condition occurs (i.e. a successful bonding), and discard further events.
func (db *DB) ensureExpirer() {
- db.runner.Do(func() { go db.expirer() })
+ db.runner.Do(func() {
+ gopool.Submit(func() {
+ db.expirer()
+ })
+ })
}
// expirer should be started in a go routine, and is responsible for looping ad
diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go
index 9d5519b9c4..8458452ded 100644
--- a/p2p/nat/nat.go
+++ b/p2p/nat/nat.go
@@ -25,6 +25,8 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
+
"github.com/ethereum/go-ethereum/log"
natpmp "github.com/jackpal/go-nat-pmp"
)
@@ -145,8 +147,8 @@ func Any() Interface {
// Internet-class address. Return ExtIP in this case.
return startautodisc("UPnP or NAT-PMP", func() Interface {
found := make(chan Interface, 2)
- go func() { found <- discoverUPnP() }()
- go func() { found <- discoverPMP() }()
+ gopool.Submit(func() { found <- discoverUPnP() })
+ gopool.Submit(func() { found <- discoverPMP() })
for i := 0; i < cap(found); i++ {
if c := <-found; c != nil {
return c
diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go
index 7f85543f8e..ac9aa1ec83 100644
--- a/p2p/nat/natpmp.go
+++ b/p2p/nat/natpmp.go
@@ -22,6 +22,7 @@ import (
"strings"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
natpmp "github.com/jackpal/go-nat-pmp"
)
@@ -68,14 +69,14 @@ func discoverPMP() Interface {
found := make(chan *pmp, len(gws))
for i := range gws {
gw := gws[i]
- go func() {
+ gopool.Submit(func() {
c := natpmp.NewClient(gw)
if _, err := c.GetExternalAddress(); err != nil {
found <- nil
} else {
found <- &pmp{gw, c}
}
- }()
+ })
}
// return the one that responds first.
// discovery needs to be quick, so we stop caring about
diff --git a/p2p/peer.go b/p2p/peer.go
index 8ebc858392..e057e689f6 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -25,6 +25,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
@@ -308,7 +309,9 @@ func (p *Peer) handle(msg Msg) error {
switch {
case msg.Code == pingMsg:
msg.Discard()
- go SendItems(p.rw, pongMsg)
+ gopool.Submit(func() {
+ SendItems(p.rw, pongMsg)
+ })
case msg.Code == discMsg:
var reason [1]DiscReason
// This is the last message. We don't need to discard or
diff --git a/p2p/rlpx/rlpx.go b/p2p/rlpx/rlpx.go
index 2021bf08be..ab79c26a28 100644
--- a/p2p/rlpx/rlpx.go
+++ b/p2p/rlpx/rlpx.go
@@ -34,13 +34,22 @@ import (
"net"
"time"
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/golang/snappy"
+ "github.com/oxtoacart/bpool"
+ "golang.org/x/crypto/sha3"
+
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/golang/snappy"
- "golang.org/x/crypto/sha3"
)
+var snappyCache *fastcache.Cache
+
+func init() {
+ snappyCache = fastcache.New(50 * 1024 * 1024)
+}
+
// Conn is an RLPx network connection. It wraps a low-level network connection. The
// underlying connection should not be used for other activity when it is wrapped by Conn.
//
@@ -179,7 +188,14 @@ func (c *Conn) Write(code uint64, data []byte) (uint32, error) {
return 0, errPlainMessageTooLarge
}
if c.snappy {
- data = snappy.Encode(nil, data)
+ if encodedResult, ok := snappyCache.HasGet(nil, data); ok {
+ data = encodedResult
+ } else {
+ encodedData := snappy.Encode(nil, data)
+ snappyCache.Set(data, encodedData)
+
+ data = encodedData
+ }
}
wireSize := uint32(len(data))
@@ -239,15 +255,20 @@ func putInt24(v uint32, b []byte) {
b[2] = byte(v)
}
+const BpoolMaxSize = 4
+
+var bytepool = bpool.NewBytePool(BpoolMaxSize, aes.BlockSize)
+
// updateMAC reseeds the given hash with encrypted seed.
// it returns the first 16 bytes of the hash sum after seeding.
func updateMAC(mac hash.Hash, block cipher.Block, seed []byte) []byte {
- aesbuf := make([]byte, aes.BlockSize)
+ aesbuf := bytepool.Get()
block.Encrypt(aesbuf, mac.Sum(nil))
for i := range aesbuf {
aesbuf[i] ^= seed[i]
}
mac.Write(aesbuf)
+ bytepool.Put(aesbuf)
return mac.Sum(nil)[:16]
}
diff --git a/p2p/server.go b/p2p/server.go
index f70ebf7216..dbaee12ea1 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -30,6 +30,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
@@ -562,10 +563,10 @@ func (srv *Server) setupDiscovery() error {
if srv.NAT != nil {
if !realaddr.IP.IsLoopback() {
srv.loopWG.Add(1)
- go func() {
+ gopool.Submit(func() {
nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
srv.loopWG.Done()
- }()
+ })
}
}
srv.localnode.SetFallbackUDP(realaddr.Port)
@@ -669,10 +670,10 @@ func (srv *Server) setupListening() error {
srv.localnode.Set(enr.TCP(tcp.Port))
if !tcp.IP.IsLoopback() && srv.NAT != nil {
srv.loopWG.Add(1)
- go func() {
+ gopool.Submit(func() {
nat.Map(srv.NAT, srv.quit, "tcp", tcp.Port, tcp.Port, "ethereum p2p")
srv.loopWG.Done()
- }()
+ })
}
}
@@ -890,10 +891,10 @@ func (srv *Server) listenLoop() {
fd = newMeteredConn(fd, true, addr)
srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr())
}
- go func() {
+ gopool.Submit(func() {
srv.SetupConn(fd, inboundConn, nil)
slots <- struct{}{}
- }()
+ })
}
}
@@ -1019,7 +1020,9 @@ func (srv *Server) launchPeer(c *conn) *Peer {
// to the peer.
p.events = &srv.peerFeed
}
- go srv.runPeer(p)
+ gopool.Submit(func() {
+ srv.runPeer(p)
+ })
return p
}
diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go
index 0cddd9b505..3780b8635a 100644
--- a/p2p/simulations/examples/ping-pong.go
+++ b/p2p/simulations/examples/ping-pong.go
@@ -25,6 +25,7 @@ import (
"sync/atomic"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
@@ -140,7 +141,7 @@ func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
log := p.log.New("peer.id", peer.ID())
errC := make(chan error)
- go func() {
+ gopool.Submit(func() {
for range time.Tick(10 * time.Second) {
log.Info("sending ping")
if err := p2p.Send(rw, pingMsgCode, "PING"); err != nil {
@@ -148,8 +149,8 @@ func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
return
}
}
- }()
- go func() {
+ })
+ gopool.Submit(func() {
for {
msg, err := rw.ReadMsg()
if err != nil {
@@ -165,9 +166,9 @@ func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
atomic.AddInt64(&p.received, 1)
if msg.Code == pingMsgCode {
log.Info("sending pong")
- go p2p.Send(rw, pongMsgCode, "PONG")
+ gopool.Submit(func() { p2p.Send(rw, pongMsgCode, "PONG") })
}
}
- }()
+ })
return <-errC
}
diff --git a/p2p/transport.go b/p2p/transport.go
index 3f1cd7d64f..502983a11b 100644
--- a/p2p/transport.go
+++ b/p2p/transport.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/bitutil"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/rlpx"
"github.com/ethereum/go-ethereum/rlp"
@@ -132,7 +133,7 @@ func (t *rlpxTransport) doProtoHandshake(our *protoHandshake) (their *protoHands
// disconnects us early with a valid reason, we should return it
// as the error so it can be tracked elsewhere.
werr := make(chan error, 1)
- go func() { werr <- Send(t, handshakeMsg, our) }()
+ gopool.Submit(func() { werr <- Send(t, handshakeMsg, our) })
if their, err = readProtocolHandshake(t); err != nil {
<-werr // make sure the write terminates too
return nil, err
diff --git a/params/config.go b/params/config.go
index 1a43d835e4..81ff4da9c3 100644
--- a/params/config.go
+++ b/params/config.go
@@ -72,6 +72,9 @@ var (
PetersburgBlock: big.NewInt(7_280_000),
IstanbulBlock: big.NewInt(9_069_000),
MuirGlacierBlock: big.NewInt(9_200_000),
+ RamanujanBlock: big.NewInt(0),
+ NielsBlock: big.NewInt(0),
+ MirrorSyncBlock: big.NewInt(0),
BerlinBlock: big.NewInt(12_244_000),
Ethash: new(EthashConfig),
}
@@ -112,6 +115,9 @@ var (
PetersburgBlock: big.NewInt(4_939_394),
IstanbulBlock: big.NewInt(6_485_846),
MuirGlacierBlock: big.NewInt(7_117_117),
+ RamanujanBlock: big.NewInt(0),
+ NielsBlock: big.NewInt(0),
+ MirrorSyncBlock: big.NewInt(0),
BerlinBlock: big.NewInt(9_812_189),
Ethash: new(EthashConfig),
}
@@ -152,6 +158,9 @@ var (
PetersburgBlock: big.NewInt(4_321_234),
IstanbulBlock: big.NewInt(5_435_345),
MuirGlacierBlock: nil,
+ RamanujanBlock: big.NewInt(0),
+ NielsBlock: big.NewInt(0),
+ MirrorSyncBlock: big.NewInt(0),
BerlinBlock: big.NewInt(8_290_928),
Clique: &CliqueConfig{
Period: 15,
@@ -191,6 +200,9 @@ var (
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
+ RamanujanBlock: big.NewInt(0),
+ NielsBlock: big.NewInt(0),
+ MirrorSyncBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(1_561_651),
MuirGlacierBlock: nil,
BerlinBlock: big.NewInt(4_460_644),
@@ -293,6 +305,9 @@ var (
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
+ RamanujanBlock: big.NewInt(0),
+ NielsBlock: big.NewInt(0),
+ MirrorSyncBlock: big.NewInt(0),
MuirGlacierBlock: nil,
BerlinBlock: nil, // Don't enable Berlin directly, we're YOLOing it
YoloV3Block: big.NewInt(0),
@@ -307,16 +322,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil,nil, nil, nil, new(EthashConfig), nil, nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil}
- TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil,nil, nil, new(EthashConfig), nil, nil}
+ TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
@@ -389,21 +404,20 @@ type ChainConfig struct {
EIP155Block *big.Int `json:"eip155Block,omitempty"` // EIP155 HF block
EIP158Block *big.Int `json:"eip158Block,omitempty"` // EIP158 HF block
- ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
- ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
- PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
- IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul)
- MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
- BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
+ ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
+ ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
+ PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
+ IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul)
+ MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
+ BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin)
- YoloV3Block *big.Int `json:"yoloV3Block,omitempty"` // YOLO v3: Gas repricings TODO @holiman add EIP references
- EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated) RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
+ YoloV3Block *big.Int `json:"yoloV3Block,omitempty"` // YOLO v3: Gas repricings TODO @holiman add EIP references
+ EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated) RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
CatalystBlock *big.Int `json:"catalystBlock,omitempty"` // Catalyst switch block (nil = no fork, 0 = already on catalyst)
- RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
- NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated)
- MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated)
-
+ RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
+ NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated)
+ MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated)
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty" toml:",omitempty"`
@@ -601,17 +615,6 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
}
var lastFork fork
for _, cur := range []fork{
- {name: "homesteadBlock", block: c.HomesteadBlock},
- {name: "daoForkBlock", block: c.DAOForkBlock, optional: true},
- {name: "eip150Block", block: c.EIP150Block},
- {name: "eip155Block", block: c.EIP155Block},
- {name: "eip158Block", block: c.EIP158Block},
- {name: "byzantiumBlock", block: c.ByzantiumBlock},
- {name: "constantinopleBlock", block: c.ConstantinopleBlock},
- {name: "petersburgBlock", block: c.PetersburgBlock},
- {name: "istanbulBlock", block: c.IstanbulBlock},
- {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true},
- {name: "ramanujanBlock", block: c.RamanujanBlock},
{name: "mirrorSyncBlock", block: c.MirrorSyncBlock},
{name: "berlinBlock", block: c.BerlinBlock},
} {
diff --git a/rlp/decode.go b/rlp/decode.go
index 79b7ef0626..a7054edc44 100644
--- a/rlp/decode.go
+++ b/rlp/decode.go
@@ -220,20 +220,51 @@ func decodeBigIntNoPtr(s *Stream, val reflect.Value) error {
}
func decodeBigInt(s *Stream, val reflect.Value) error {
- b, err := s.Bytes()
- if err != nil {
+ var buffer []byte
+ kind, size, err := s.Kind()
+ switch {
+ case err != nil:
return wrapStreamError(err, val.Type())
+ case kind == List:
+ return wrapStreamError(ErrExpectedString, val.Type())
+ case kind == Byte:
+ buffer = s.uintbuf[:1]
+ buffer[0] = s.byteval
+ s.kind = -1 // re-arm Kind
+ case size == 0:
+ // Avoid zero-length read.
+ s.kind = -1
+ case size <= uint64(len(s.uintbuf)):
+ // For integers smaller than s.uintbuf, allocating a buffer
+ // can be avoided.
+ buffer = s.uintbuf[:size]
+ if err := s.readFull(buffer); err != nil {
+ return wrapStreamError(err, val.Type())
+ }
+ // Reject inputs where single byte encoding should have been used.
+ if size == 1 && buffer[0] < 128 {
+ return wrapStreamError(ErrCanonSize, val.Type())
+ }
+ default:
+ // For large integers, a temporary buffer is needed.
+ buffer = make([]byte, size)
+ if err := s.readFull(buffer); err != nil {
+ return wrapStreamError(err, val.Type())
+ }
}
+
+ // Reject leading zero bytes.
+ if len(buffer) > 0 && buffer[0] == 0 {
+ return wrapStreamError(ErrCanonInt, val.Type())
+ }
+
+ // Set the integer bytes.
i := val.Interface().(*big.Int)
if i == nil {
i = new(big.Int)
val.Set(reflect.ValueOf(i))
}
- // Reject leading zero bytes
- if len(b) > 0 && b[0] == 0 {
- return wrapStreamError(ErrCanonInt, val.Type())
- }
- i.SetBytes(b)
+ i.SetBytes(buffer)
return nil
}
@@ -245,7 +276,7 @@ func makeListDecoder(typ reflect.Type, tag tags) (decoder, error) {
}
return decodeByteSlice, nil
}
- etypeinfo := cachedTypeInfo1(etype, tags{})
+ etypeinfo := theTC.infoWhileGenerating(etype, tags{})
if etypeinfo.decoderErr != nil {
return nil, etypeinfo.decoderErr
}
@@ -348,25 +379,23 @@ func decodeByteArray(s *Stream, val reflect.Value) error {
if err != nil {
return err
}
- vlen := val.Len()
+ slice := byteArrayBytes(val)
switch kind {
case Byte:
- if vlen == 0 {
+ if len(slice) == 0 {
return &decodeError{msg: "input string too long", typ: val.Type()}
- }
- if vlen > 1 {
+ } else if len(slice) > 1 {
return &decodeError{msg: "input string too short", typ: val.Type()}
}
- bv, _ := s.Uint()
- val.Index(0).SetUint(bv)
+ slice[0] = s.byteval
+ s.kind = -1
case String:
- if uint64(vlen) < size {
+ if uint64(len(slice)) < size {
return &decodeError{msg: "input string too long", typ: val.Type()}
}
- if uint64(vlen) > size {
+ if uint64(len(slice)) > size {
return &decodeError{msg: "input string too short", typ: val.Type()}
}
- slice := val.Slice(0, vlen).Interface().([]byte)
if err := s.readFull(slice); err != nil {
return err
}
@@ -410,7 +439,7 @@ func makeStructDecoder(typ reflect.Type) (decoder, error) {
// makePtrDecoder creates a decoder that decodes into the pointer's element type.
func makePtrDecoder(typ reflect.Type, tag tags) (decoder, error) {
etype := typ.Elem()
- etypeinfo := cachedTypeInfo1(etype, tags{})
+ etypeinfo := theTC.infoWhileGenerating(etype, tags{})
switch {
case etypeinfo.decoderErr != nil:
return nil, etypeinfo.decoderErr
@@ -504,7 +533,7 @@ func decodeDecoder(s *Stream, val reflect.Value) error {
}
// Kind represents the kind of value contained in an RLP stream.
-type Kind int
+type Kind int8
const (
Byte Kind = iota
@@ -547,22 +576,16 @@ type ByteReader interface {
type Stream struct {
r ByteReader
- // number of bytes remaining to be read from r.
- remaining uint64
- limited bool
-
- // auxiliary buffer for integer decoding
- uintbuf []byte
-
- kind Kind // kind of value ahead
- size uint64 // size of value ahead
- byteval byte // value of single byte in type tag
- kinderr error // error from last readKind
- stack []listpos
+ remaining uint64 // number of bytes remaining to be read from r
+ size uint64 // size of value ahead
+ kinderr error // error from last readKind
+ stack []uint64 // list sizes
+ uintbuf [32]byte // auxiliary buffer for integer decoding
+ kind Kind // kind of value ahead
+ byteval byte // value of single byte in type tag
+ limited bool // true if input limit is in effect
}
-type listpos struct{ pos, size uint64 }
-
// NewStream creates a new decoding stream reading from r.
//
// If r implements the ByteReader interface, Stream will
@@ -632,8 +655,8 @@ func (s *Stream) Raw() ([]byte, error) {
s.kind = -1 // rearm Kind
return []byte{s.byteval}, nil
}
- // the original header has already been read and is no longer
- // available. read content and put a new header in front of it.
+ // The original header has already been read and is no longer
+ // available. Read content and put a new header in front of it.
start := headsize(size)
buf := make([]byte, uint64(start)+size)
if err := s.readFull(buf[start:]); err != nil {
@@ -716,7 +739,14 @@ func (s *Stream) List() (size uint64, err error) {
if kind != List {
return 0, ErrExpectedList
}
- s.stack = append(s.stack, listpos{0, size})
+
+ // Remove size of inner list from outer list before pushing the new size
+ // onto the stack. This ensures that the remaining outer list size will
+ // be correct after the matching call to ListEnd.
+ if inList, limit := s.listLimit(); inList {
+ s.stack[len(s.stack)-1] = limit - size
+ }
+ s.stack = append(s.stack, size)
s.kind = -1
s.size = 0
return size, nil
@@ -725,17 +755,13 @@ func (s *Stream) List() (size uint64, err error) {
// ListEnd returns to the enclosing list.
// The input reader must be positioned at the end of a list.
func (s *Stream) ListEnd() error {
- if len(s.stack) == 0 {
+ // Ensure that no more data is remaining in the current list.
+ if inList, listLimit := s.listLimit(); !inList {
return errNotInList
- }
- tos := s.stack[len(s.stack)-1]
- if tos.pos != tos.size {
+ } else if listLimit > 0 {
return errNotAtEOL
}
s.stack = s.stack[:len(s.stack)-1] // pop
- if len(s.stack) > 0 {
- s.stack[len(s.stack)-1].pos += tos.size
- }
s.kind = -1
s.size = 0
return nil
@@ -763,7 +789,7 @@ func (s *Stream) Decode(val interface{}) error {
err = decoder(s, rval.Elem())
if decErr, ok := err.(*decodeError); ok && len(decErr.ctx) > 0 {
- // add decode target type to error so context has more meaning
+ // Add decode target type to error so context has more meaning.
decErr.ctx = append(decErr.ctx, fmt.Sprint("(", rtyp.Elem(), ")"))
}
return err
@@ -786,6 +812,9 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) {
case *bytes.Reader:
s.remaining = uint64(br.Len())
s.limited = true
+ case *bytes.Buffer:
+ s.remaining = uint64(br.Len())
+ s.limited = true
case *strings.Reader:
s.remaining = uint64(br.Len())
s.limited = true
@@ -804,10 +833,8 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) {
s.size = 0
s.kind = -1
s.kinderr = nil
- if s.uintbuf == nil {
- s.uintbuf = make([]byte, 8)
- }
s.byteval = 0
+ s.uintbuf = [32]byte{}
}
// Kind returns the kind and size of the next value in the
@@ -822,35 +849,29 @@ func (s *Stream) Reset(r io.Reader, inputLimit uint64) {
// the value. Subsequent calls to Kind (until the value is decoded)
// will not advance the input reader and return cached information.
func (s *Stream) Kind() (kind Kind, size uint64, err error) {
- var tos *listpos
- if len(s.stack) > 0 {
- tos = &s.stack[len(s.stack)-1]
- }
- if s.kind < 0 {
- s.kinderr = nil
- // Don't read further if we're at the end of the
- // innermost list.
- if tos != nil && tos.pos == tos.size {
- return 0, 0, EOL
- }
- s.kind, s.size, s.kinderr = s.readKind()
- if s.kinderr == nil {
- if tos == nil {
- // At toplevel, check that the value is smaller
- // than the remaining input length.
- if s.limited && s.size > s.remaining {
- s.kinderr = ErrValueTooLarge
- }
- } else {
- // Inside a list, check that the value doesn't overflow the list.
- if s.size > tos.size-tos.pos {
- s.kinderr = ErrElemTooLarge
- }
- }
+ if s.kind >= 0 {
+ return s.kind, s.size, s.kinderr
+ }
+
+ // Check for end of list. This needs to be done here because readKind
+ // checks against the list size, and would return the wrong error.
+ inList, listLimit := s.listLimit()
+ if inList && listLimit == 0 {
+ return 0, 0, EOL
+ }
+ // Read the actual size tag.
+ s.kind, s.size, s.kinderr = s.readKind()
+ if s.kinderr == nil {
+ // Check the data size of the value ahead against input limits. This
+ // is done here because many decoders require allocating an input
+ // buffer matching the value size. Checking it here protects those
+ // decoders from inputs declaring very large value size.
+ if inList && s.size > listLimit {
+ s.kinderr = ErrElemTooLarge
+ } else if s.limited && s.size > s.remaining {
+ s.kinderr = ErrValueTooLarge
}
}
- // Note: this might return a sticky error generated
- // by an earlier call to readKind.
return s.kind, s.size, s.kinderr
}
@@ -877,37 +898,35 @@ func (s *Stream) readKind() (kind Kind, size uint64, err error) {
s.byteval = b
return Byte, 0, nil
case b < 0xB8:
- // Otherwise, if a string is 0-55 bytes long,
- // the RLP encoding consists of a single byte with value 0x80 plus the
- // length of the string followed by the string. The range of the first
- // byte is thus [0x80, 0xB7].
+ // Otherwise, if a string is 0-55 bytes long, the RLP encoding consists
+ // of a single byte with value 0x80 plus the length of the string
+ // followed by the string. The range of the first byte is thus [0x80, 0xB7].
return String, uint64(b - 0x80), nil
case b < 0xC0:
- // If a string is more than 55 bytes long, the
- // RLP encoding consists of a single byte with value 0xB7 plus the length
- // of the length of the string in binary form, followed by the length of
- // the string, followed by the string. For example, a length-1024 string
- // would be encoded as 0xB90400 followed by the string. The range of
- // the first byte is thus [0xB8, 0xBF].
+ // If a string is more than 55 bytes long, the RLP encoding consists of a
+ // single byte with value 0xB7 plus the length of the length of the
+ // string in binary form, followed by the length of the string, followed
+ // by the string. For example, a length-1024 string would be encoded as
+ // 0xB90400 followed by the string. The range of the first byte is thus
+ // [0xB8, 0xBF].
size, err = s.readUint(b - 0xB7)
if err == nil && size < 56 {
err = ErrCanonSize
}
return String, size, err
case b < 0xF8:
- // If the total payload of a list
- // (i.e. the combined length of all its items) is 0-55 bytes long, the
- // RLP encoding consists of a single byte with value 0xC0 plus the length
- // of the list followed by the concatenation of the RLP encodings of the
- // items. The range of the first byte is thus [0xC0, 0xF7].
+ // If the total payload of a list (i.e. the combined length of all its
+ // items) is 0-55 bytes long, the RLP encoding consists of a single byte
+ // with value 0xC0 plus the length of the list followed by the
+ // concatenation of the RLP encodings of the items. The range of the
+ // first byte is thus [0xC0, 0xF7].
return List, uint64(b - 0xC0), nil
default:
- // If the total payload of a list is more than 55 bytes long,
- // the RLP encoding consists of a single byte with value 0xF7
- // plus the length of the length of the payload in binary
- // form, followed by the length of the payload, followed by
- // the concatenation of the RLP encodings of the items. The
- // range of the first byte is thus [0xF8, 0xFF].
+ // If the total payload of a list is more than 55 bytes long, the RLP
+ // encoding consists of a single byte with value 0xF7 plus the length of
+ // the length of the payload in binary form, followed by the length of
+ // the payload, followed by the concatenation of the RLP encodings of
+ // the items. The range of the first byte is thus [0xF8, 0xFF].
size, err = s.readUint(b - 0xF7)
if err == nil && size < 56 {
err = ErrCanonSize
@@ -925,23 +944,24 @@ func (s *Stream) readUint(size byte) (uint64, error) {
b, err := s.readByte()
return uint64(b), err
default:
- start := int(8 - size)
- for i := 0; i < start; i++ {
- s.uintbuf[i] = 0
+ buffer := s.uintbuf[:8]
+ for i := range buffer {
+ buffer[i] = 0
}
- if err := s.readFull(s.uintbuf[start:]); err != nil {
+ start := int(8 - size)
+ if err := s.readFull(buffer[start:]); err != nil {
return 0, err
}
- if s.uintbuf[start] == 0 {
- // Note: readUint is also used to decode integer
- // values. The error needs to be adjusted to become
- // ErrCanonInt in this case.
+ if buffer[start] == 0 {
+ // Note: readUint is also used to decode integer values.
+ // The error needs to be adjusted to become ErrCanonInt in this case.
return 0, ErrCanonSize
}
- return binary.BigEndian.Uint64(s.uintbuf), nil
+ return binary.BigEndian.Uint64(buffer[:]), nil
}
}
+// readFull reads into buf from the underlying stream.
func (s *Stream) readFull(buf []byte) (err error) {
if err := s.willRead(uint64(len(buf))); err != nil {
return err
@@ -963,6 +983,7 @@ func (s *Stream) readFull(buf []byte) (err error) {
return err
}
+// readByte reads a single byte from the underlying stream.
func (s *Stream) readByte() (byte, error) {
if err := s.willRead(1); err != nil {
return 0, err
@@ -974,16 +995,16 @@ func (s *Stream) readByte() (byte, error) {
return b, err
}
+// willRead is called before any read from the underlying stream. It checks
+// n against size limits, and updates the limits if n doesn't overflow them.
func (s *Stream) willRead(n uint64) error {
s.kind = -1 // rearm Kind
- if len(s.stack) > 0 {
- // check list overflow
- tos := s.stack[len(s.stack)-1]
- if n > tos.size-tos.pos {
+ if inList, limit := s.listLimit(); inList {
+ if n > limit {
return ErrElemTooLarge
}
- s.stack[len(s.stack)-1].pos += n
+ s.stack[len(s.stack)-1] = limit - n
}
if s.limited {
if n > s.remaining {
@@ -993,3 +1014,11 @@ func (s *Stream) willRead(n uint64) error {
}
return nil
}
+
+// listLimit returns the amount of data remaining in the innermost list.
+func (s *Stream) listLimit() (inList bool, limit uint64) {
+ if len(s.stack) == 0 {
+ return false, 0
+ }
+ return true, s.stack[len(s.stack)-1]
+}
diff --git a/rlp/decode_test.go b/rlp/decode_test.go
index d94c3969b2..f8af3897c0 100644
--- a/rlp/decode_test.go
+++ b/rlp/decode_test.go
@@ -26,6 +26,8 @@ import (
"reflect"
"strings"
"testing"
+
+ "github.com/ethereum/go-ethereum/common/math"
)
func TestStreamKind(t *testing.T) {
@@ -327,6 +329,11 @@ type recstruct struct {
Child *recstruct `rlp:"nil"`
}
+type bigIntStruct struct {
+ I *big.Int
+ B string
+}
+
type invalidNilTag struct {
X []byte `rlp:"nil"`
}
@@ -370,10 +377,11 @@ type intField struct {
}
var (
- veryBigInt = big.NewInt(0).Add(
+ veryBigInt = new(big.Int).Add(
big.NewInt(0).Lsh(big.NewInt(0xFFFFFFFFFFFFFF), 16),
big.NewInt(0xFFFF),
)
+ veryVeryBigInt = new(big.Int).Exp(veryBigInt, big.NewInt(8), nil)
)
type hasIgnoredField struct {
@@ -450,12 +458,15 @@ var decodeTests = []decodeTest{
{input: "C0", ptr: new(string), error: "rlp: expected input string or byte for string"},
// big ints
+ {input: "80", ptr: new(*big.Int), value: big.NewInt(0)},
{input: "01", ptr: new(*big.Int), value: big.NewInt(1)},
{input: "89FFFFFFFFFFFFFFFFFF", ptr: new(*big.Int), value: veryBigInt},
+ {input: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001", ptr: new(*big.Int), value: veryVeryBigInt},
{input: "10", ptr: new(big.Int), value: *big.NewInt(16)}, // non-pointer also works
{input: "C0", ptr: new(*big.Int), error: "rlp: expected input string or byte for *big.Int"},
- {input: "820001", ptr: new(big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"},
- {input: "8105", ptr: new(big.Int), error: "rlp: non-canonical size information for *big.Int"},
+ {input: "00", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"},
+ {input: "820001", ptr: new(*big.Int), error: "rlp: non-canonical integer (leading zero bytes) for *big.Int"},
+ {input: "8105", ptr: new(*big.Int), error: "rlp: non-canonical size information for *big.Int"},
// structs
{
@@ -468,6 +479,13 @@ var decodeTests = []decodeTest{
ptr: new(recstruct),
value: recstruct{1, &recstruct{2, &recstruct{3, nil}}},
},
+ {
+ // This checks that empty big.Int works correctly in struct context. It's easy to
+ // miss the update of s.kind for this case, so it needs its own test.
+ input: "C58083343434",
+ ptr: new(bigIntStruct),
+ value: bigIntStruct{new(big.Int), "444"},
+ },
// struct errors
{
@@ -898,7 +916,7 @@ func ExampleStream() {
// [102 111 111 98 97 114]
}
-func BenchmarkDecode(b *testing.B) {
+func BenchmarkDecodeUints(b *testing.B) {
enc := encodeTestSlice(90000)
b.SetBytes(int64(len(enc)))
b.ReportAllocs()
@@ -913,7 +931,7 @@ func BenchmarkDecode(b *testing.B) {
}
}
-func BenchmarkDecodeIntSliceReuse(b *testing.B) {
+func BenchmarkDecodeUintsReused(b *testing.B) {
enc := encodeTestSlice(100000)
b.SetBytes(int64(len(enc)))
b.ReportAllocs()
@@ -928,6 +946,44 @@ func BenchmarkDecodeIntSliceReuse(b *testing.B) {
}
}
+func BenchmarkDecodeByteArrayStruct(b *testing.B) {
+ enc, err := EncodeToBytes(&byteArrayStruct{})
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(enc)))
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ var out byteArrayStruct
+ for i := 0; i < b.N; i++ {
+ if err := DecodeBytes(enc, &out); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkDecodeBigInts(b *testing.B) {
+ ints := make([]*big.Int, 200)
+ for i := range ints {
+ ints[i] = math.BigPow(2, int64(i))
+ }
+ enc, err := EncodeToBytes(ints)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(enc)))
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ var out []*big.Int
+ for i := 0; i < b.N; i++ {
+ if err := DecodeBytes(enc, &out); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func encodeTestSlice(n uint) []byte {
s := make([]uint, n)
for i := uint(0); i < n; i++ {
diff --git a/rlp/encode.go b/rlp/encode.go
index 77b591045d..9fc270ef6d 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -124,19 +124,15 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
}
type encbuf struct {
- str []byte // string data, contains everything except list headers
- lheads []listhead // all list headers
- lhsize int // sum of sizes of all encoded list headers
- sizebuf [9]byte // auxiliary buffer for uint encoding
- bufvalue reflect.Value // used in writeByteArrayCopy
+ str []byte // string data, contains everything except list headers
+ lheads []listhead // all list headers
+ lhsize int // sum of sizes of all encoded list headers
+ sizebuf [9]byte // auxiliary buffer for uint encoding
}
// encbufs are pooled.
var encbufPool = sync.Pool{
- New: func() interface{} {
- var bytes []byte
- return &encbuf{bufvalue: reflect.ValueOf(&bytes).Elem()}
- },
+ New: func() interface{} { return new(encbuf) },
}
func (w *encbuf) reset() {
@@ -429,21 +425,14 @@ func writeBytes(val reflect.Value, w *encbuf) error {
return nil
}
-var byteType = reflect.TypeOf(byte(0))
-
func makeByteArrayWriter(typ reflect.Type) writer {
- length := typ.Len()
- if length == 0 {
+ switch typ.Len() {
+ case 0:
return writeLengthZeroByteArray
- } else if length == 1 {
+ case 1:
return writeLengthOneByteArray
- }
- if typ.Elem() != byteType {
- return writeNamedByteArray
- }
- return func(val reflect.Value, w *encbuf) error {
- writeByteArrayCopy(length, val, w)
- return nil
+ default:
+ return writeByteArray
}
}
@@ -462,29 +451,18 @@ func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
return nil
}
-// writeByteArrayCopy encodes byte arrays using reflect.Copy. This is
-// the fast path for [N]byte where N > 1.
-func writeByteArrayCopy(length int, val reflect.Value, w *encbuf) {
- w.encodeStringHeader(length)
- offset := len(w.str)
- w.str = append(w.str, make([]byte, length)...)
- w.bufvalue.SetBytes(w.str[offset:])
- reflect.Copy(w.bufvalue, val)
-}
-
-// writeNamedByteArray encodes byte arrays with named element type.
-// This exists because reflect.Copy can't be used with such types.
-func writeNamedByteArray(val reflect.Value, w *encbuf) error {
+func writeByteArray(val reflect.Value, w *encbuf) error {
if !val.CanAddr() {
- // Slice requires the value to be addressable.
- // Make it addressable by copying.
+ // Getting the byte slice of val requires it to be addressable. Make it
+ // addressable by copying.
copy := reflect.New(val.Type()).Elem()
copy.Set(val)
val = copy
}
- size := val.Len()
- slice := val.Slice(0, size).Bytes()
- w.encodeString(slice)
+
+ slice := byteArrayBytes(val)
+ w.encodeStringHeader(len(slice))
+ w.str = append(w.str, slice...)
return nil
}
@@ -517,7 +495,7 @@ func writeInterface(val reflect.Value, w *encbuf) error {
}
func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
- etypeinfo := cachedTypeInfo1(typ.Elem(), tags{})
+ etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
@@ -560,7 +538,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
}
func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
- etypeinfo := cachedTypeInfo1(typ.Elem(), tags{})
+ etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index 418ee10a35..523ea97cf2 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -23,6 +23,7 @@ import (
"io"
"io/ioutil"
"math/big"
+ "runtime"
"sync"
"testing"
@@ -130,6 +131,14 @@ var encTests = []encTest{
val: big.NewInt(0).SetBytes(unhex("010000000000000000000000000000000000000000000000000000000000000000")),
output: "A1010000000000000000000000000000000000000000000000000000000000000000",
},
+ {
+ val: veryBigInt,
+ output: "89FFFFFFFFFFFFFFFFFF",
+ },
+ {
+ val: veryVeryBigInt,
+ output: "B848FFFFFFFFFFFFFFFFF800000000000000001BFFFFFFFFFFFFFFFFC8000000000000000045FFFFFFFFFFFFFFFFC800000000000000001BFFFFFFFFFFFFFFFFF8000000000000000001",
+ },
// non-pointer big.Int
{val: *big.NewInt(0), output: "80"},
@@ -462,3 +471,54 @@ func BenchmarkEncodeBigInts(b *testing.B) {
}
}
}
+
+func BenchmarkEncodeConcurrentInterface(b *testing.B) {
+ type struct1 struct {
+ A string
+ B *big.Int
+ C [20]byte
+ }
+ value := []interface{}{
+ uint(999),
+ &struct1{A: "hello", B: big.NewInt(0xFFFFFFFF)},
+ [10]byte{1, 2, 3, 4, 5, 6},
+ []string{"yeah", "yeah", "yeah"},
+ }
+
+ var wg sync.WaitGroup
+ for cpu := 0; cpu < runtime.NumCPU(); cpu++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ var buffer bytes.Buffer
+ for i := 0; i < b.N; i++ {
+ buffer.Reset()
+ err := Encode(&buffer, value)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+type byteArrayStruct struct {
+ A [20]byte
+ B [32]byte
+ C [32]byte
+}
+
+func BenchmarkEncodeByteArrayStruct(b *testing.B) {
+ var out bytes.Buffer
+ var value byteArrayStruct
+
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ out.Reset()
+ if err := Encode(&out, &value); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/rlp/safe.go b/rlp/safe.go
new file mode 100644
index 0000000000..c881650a0d
--- /dev/null
+++ b/rlp/safe.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// +build nacl js !cgo
+
+package rlp
+
+import "reflect"
+
+// byteArrayBytes returns a slice of the byte array v.
+func byteArrayBytes(v reflect.Value) []byte {
+ return v.Slice(0, v.Len()).Bytes()
+}
diff --git a/rlp/typecache.go b/rlp/typecache.go
index 6026e1a649..62553d3b55 100644
--- a/rlp/typecache.go
+++ b/rlp/typecache.go
@@ -21,13 +21,10 @@ import (
"reflect"
"strings"
"sync"
+ "sync/atomic"
)
-var (
- typeCacheMutex sync.RWMutex
- typeCache = make(map[typekey]*typeinfo)
-)
-
+// typeinfo is an entry in the type cache.
type typeinfo struct {
decoder decoder
decoderErr error // error from makeDecoder
@@ -38,15 +35,16 @@ type typeinfo struct {
// tags represents struct tags.
type tags struct {
// rlp:"nil" controls whether empty input results in a nil pointer.
- nilOK bool
-
- // This controls whether nil pointers are encoded/decoded as empty strings
- // or empty lists.
+ // nilKind is the kind of empty value allowed for the field.
nilKind Kind
+ nilOK bool
+
+ // rlp:"optional" allows for a field to be missing in the input list.
+ // If this is set, all subsequent fields must also be optional.
+ optional bool
- // rlp:"tail" controls whether this field swallows additional list
- // elements. It can only be set for the last field, which must be
- // of slice type.
+ // rlp:"tail" controls whether this field swallows additional list elements. It can
+ // only be set for the last field, which must be of slice type.
tail bool
// rlp:"-" ignores fields.
@@ -64,68 +62,126 @@ type decoder func(*Stream, reflect.Value) error
type writer func(reflect.Value, *encbuf) error
+var theTC = newTypeCache()
+
+type typeCache struct {
+ cur atomic.Value
+
+ // This lock synchronizes writers.
+ mu sync.Mutex
+ next map[typekey]*typeinfo
+}
+
+func newTypeCache() *typeCache {
+ c := new(typeCache)
+ c.cur.Store(make(map[typekey]*typeinfo))
+ return c
+}
+
func cachedDecoder(typ reflect.Type) (decoder, error) {
- info := cachedTypeInfo(typ, tags{})
+ info := theTC.info(typ)
return info.decoder, info.decoderErr
}
func cachedWriter(typ reflect.Type) (writer, error) {
- info := cachedTypeInfo(typ, tags{})
+ info := theTC.info(typ)
return info.writer, info.writerErr
}
-func cachedTypeInfo(typ reflect.Type, tags tags) *typeinfo {
- typeCacheMutex.RLock()
- info := typeCache[typekey{typ, tags}]
- typeCacheMutex.RUnlock()
- if info != nil {
+func (c *typeCache) info(typ reflect.Type) *typeinfo {
+ key := typekey{Type: typ}
+ if info := c.cur.Load().(map[typekey]*typeinfo)[key]; info != nil {
return info
}
- // not in the cache, need to generate info for this type.
- typeCacheMutex.Lock()
- defer typeCacheMutex.Unlock()
- return cachedTypeInfo1(typ, tags)
+
+ // Not in the cache, need to generate info for this type.
+ return c.generate(typ, tags{})
}
-func cachedTypeInfo1(typ reflect.Type, tags tags) *typeinfo {
+func (c *typeCache) generate(typ reflect.Type, tags tags) *typeinfo {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ cur := c.cur.Load().(map[typekey]*typeinfo)
+ if info := cur[typekey{typ, tags}]; info != nil {
+ return info
+ }
+
+ // Copy cur to next.
+ c.next = make(map[typekey]*typeinfo, len(cur)+1)
+ for k, v := range cur {
+ c.next[k] = v
+ }
+
+ // Generate.
+ info := c.infoWhileGenerating(typ, tags)
+
+ // next -> cur
+ c.cur.Store(c.next)
+ c.next = nil
+ return info
+}
+
+func (c *typeCache) infoWhileGenerating(typ reflect.Type, tags tags) *typeinfo {
key := typekey{typ, tags}
- info := typeCache[key]
- if info != nil {
- // another goroutine got the write lock first
+ if info := c.next[key]; info != nil {
return info
}
- // put a dummy value into the cache before generating.
- // if the generator tries to lookup itself, it will get
+ // Put a dummy value into the cache before generating.
+ // If the generator tries to lookup itself, it will get
// the dummy value and won't call itself recursively.
- info = new(typeinfo)
- typeCache[key] = info
+ info := new(typeinfo)
+ c.next[key] = info
info.generate(typ, tags)
return info
}
type field struct {
- index int
- info *typeinfo
+ index int
+ info *typeinfo
+ optional bool
}
+// structFields resolves the typeinfo of all public fields in a struct type.
func structFields(typ reflect.Type) (fields []field, err error) {
- lastPublic := lastPublicField(typ)
+ var (
+ lastPublic = lastPublicField(typ)
+ anyOptional = false
+ )
for i := 0; i < typ.NumField(); i++ {
if f := typ.Field(i); f.PkgPath == "" { // exported
tags, err := parseStructTag(typ, i, lastPublic)
if err != nil {
return nil, err
}
+
+ // Skip rlp:"-" fields.
if tags.ignored {
continue
}
- info := cachedTypeInfo1(f.Type, tags)
- fields = append(fields, field{i, info})
+ // If any field has the "optional" tag, subsequent fields must also have it.
+ if tags.optional || tags.tail {
+ anyOptional = true
+ } else if anyOptional {
+ return nil, fmt.Errorf(`rlp: struct field %v.%s needs "optional" tag`, typ, f.Name)
+ }
+ info := theTC.infoWhileGenerating(f.Type, tags)
+ fields = append(fields, field{i, info, tags.optional})
}
}
return fields, nil
}
+// anyOptionalFields returns the index of the first field with "optional" tag.
+func firstOptionalField(fields []field) int {
+ for i, f := range fields {
+ if f.optional {
+ return i
+ }
+ }
+ return len(fields)
+}
+
type structFieldError struct {
typ reflect.Type
field int
@@ -166,11 +222,19 @@ func parseStructTag(typ reflect.Type, fi, lastPublic int) (tags, error) {
case "nilList":
ts.nilKind = List
}
+ case "optional":
+ ts.optional = true
+ if ts.tail {
+ return ts, structTagError{typ, f.Name, t, `also has "tail" tag`}
+ }
case "tail":
ts.tail = true
if fi != lastPublic {
return ts, structTagError{typ, f.Name, t, "must be on last field"}
}
+ if ts.optional {
+ return ts, structTagError{typ, f.Name, t, `also has "optional" tag`}
+ }
if f.Type.Kind() != reflect.Slice {
return ts, structTagError{typ, f.Name, t, "field type is not slice"}
}
diff --git a/rlp/unsafe.go b/rlp/unsafe.go
new file mode 100644
index 0000000000..94ed5405a8
--- /dev/null
+++ b/rlp/unsafe.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// +build !nacl,!js,cgo
+
+package rlp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// byteArrayBytes returns a slice of the byte array v.
+func byteArrayBytes(v reflect.Value) []byte {
+ len := v.Len()
+ var s []byte
+ hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s))
+ hdr.Data = v.UnsafeAddr()
+ hdr.Cap = len
+ hdr.Len = len
+ return s
+}
diff --git a/rpc/handler.go b/rpc/handler.go
index 2ca6480ec2..b1fd20c86b 100644
--- a/rpc/handler.go
+++ b/rpc/handler.go
@@ -25,6 +25,8 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/common/gopool"
+
"github.com/ethereum/go-ethereum/log"
)
@@ -219,12 +221,12 @@ func (h *handler) cancelServerSubscriptions(err error) {
// startCallProc runs fn in a new goroutine and starts tracking it in the h.calls wait group.
func (h *handler) startCallProc(fn func(*callProc)) {
h.callWG.Add(1)
- go func() {
+ gopool.Submit(func() {
ctx, cancel := context.WithCancel(h.rootCtx)
defer h.callWG.Done()
defer cancel()
fn(&callProc{ctx: ctx})
- }()
+ })
}
// handleImmediate executes non-call messages. It returns false if the message is a
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 46834de6da..19c79b6eed 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -230,7 +230,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
var snaps *snapshot.Tree
if snapshotter {
- snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false)
+ snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, 128, root, false, true, false)
}
statedb, _ = state.New(root, sdb, snaps)
return snaps, statedb
diff --git a/trie/committer.go b/trie/committer.go
index ce4065f5fd..250b36e37f 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -44,7 +44,6 @@ type leaf struct {
// By 'some level' of parallelism, it's still the case that all leaves will be
// processed sequentially - onleaf will never be called in parallel or out of order.
type committer struct {
- tmp sliceBuffer
sha crypto.KeccakState
onleaf LeafCallback
@@ -55,7 +54,6 @@ type committer struct {
var committerPool = sync.Pool{
New: func() interface{} {
return &committer{
- tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
}
},
@@ -95,6 +93,7 @@ func (c *committer) commit(n node, db *Database) (node, error) {
switch cn := n.(type) {
case *shortNode:
// Commit child
+ cn.flags.dirty = false
collapsed := cn.copy()
// If the child is fullnode, recursively commit.
@@ -114,6 +113,7 @@ func (c *committer) commit(n node, db *Database) (node, error) {
}
return collapsed, nil
case *fullNode:
+ cn.flags.dirty = false
hashedKids, err := c.commitChildren(cn, db)
if err != nil {
return nil, err
diff --git a/trie/database.go b/trie/database.go
index b18665770e..76f8b26ccd 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -88,6 +88,11 @@ type Database struct {
childrenSize common.StorageSize // Storage size of the external children tracking
preimagesSize common.StorageSize // Storage size of the preimages cache
+ //metrics with light lock
+ sizeLock sync.RWMutex
+ roughPreimagesSize common.StorageSize
+ roughDirtiesSize common.StorageSize
+
lock sync.RWMutex
}
@@ -483,9 +488,15 @@ func (db *Database) Nodes() []common.Hash {
// are referenced together by database itself.
func (db *Database) Reference(child common.Hash, parent common.Hash) {
db.lock.Lock()
- defer db.lock.Unlock()
-
db.reference(child, parent)
+ var roughDirtiesSize = common.StorageSize((len(db.dirties)-1)*cachedNodeSize) + db.dirtiesSize + db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
+ var roughPreimagesSize = db.preimagesSize
+ db.lock.Unlock()
+
+ db.sizeLock.Lock()
+ db.roughDirtiesSize = roughDirtiesSize
+ db.roughPreimagesSize = roughPreimagesSize
+ db.sizeLock.Unlock()
}
// reference is the private locked version of Reference.
@@ -703,12 +714,6 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
// Move all of the accumulated preimages into a write batch
if db.preimages != nil {
rawdb.WritePreimages(batch, db.preimages)
- if batch.ValueSize() > ethdb.IdealBatchSize {
- if err := batch.Write(); err != nil {
- return err
- }
- batch.Reset()
- }
// Since we're going to replay trie node writes into the clean cache, flush out
// any batched pre-images before continuing.
if err := batch.Write(); err != nil {
@@ -843,15 +848,9 @@ func (c *cleaner) Delete(key []byte) error {
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
func (db *Database) Size() (common.StorageSize, common.StorageSize) {
- db.lock.RLock()
- defer db.lock.RUnlock()
-
- // db.dirtiesSize only contains the useful data in the cache, but when reporting
- // the total memory consumption, the maintenance metadata is also needed to be
- // counted.
- var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize)
- var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2))
- return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize
+ db.sizeLock.RLock()
+ defer db.sizeLock.RUnlock()
+ return db.roughDirtiesSize, db.roughPreimagesSize
}
// saveCache saves clean state cache to given directory path
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index e38471c1b7..c85d0831a7 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -35,7 +35,6 @@ import (
// SecureTrie is not safe for concurrent use.
type SecureTrie struct {
trie Trie
- hashKeyBuf [common.HashLength]byte
secKeyCache map[string][]byte
secKeyCacheOwner *SecureTrie // Pointer to self, replace the key cache on mismatch
}
@@ -172,6 +171,17 @@ func (t *SecureTrie) Copy() *SecureTrie {
return &cpy
}
+func (t *SecureTrie) ResetCopy() *SecureTrie {
+ cpy := *t
+ cpy.secKeyCacheOwner = nil
+ cpy.secKeyCache = nil
+ return &cpy
+}
+
+func (t *SecureTrie) GetRawTrie() Trie {
+ return t.trie
+}
+
// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration
// starts at the key after the given start key.
func (t *SecureTrie) NodeIterator(start []byte) NodeIterator {
@@ -182,12 +192,13 @@ func (t *SecureTrie) NodeIterator(start []byte) NodeIterator {
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
func (t *SecureTrie) hashKey(key []byte) []byte {
+ hash := make([]byte, common.HashLength)
h := newHasher(false)
h.sha.Reset()
h.sha.Write(key)
- h.sha.Read(t.hashKeyBuf[:])
+ h.sha.Read(hash)
returnHasherToPool(h)
- return t.hashKeyBuf[:]
+ return hash
}
// getSecKeyCache returns the current secure key cache, creating a new one if
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index fb6c38ee22..7bbdf29ef0 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -23,6 +23,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
@@ -119,7 +120,8 @@ func TestSecureTrieConcurrency(t *testing.T) {
pend := new(sync.WaitGroup)
pend.Add(threads)
for i := 0; i < threads; i++ {
- go func(index int) {
+ index := i
+ gopool.Submit(func() {
defer pend.Done()
for j := byte(0); j < 255; j++ {
@@ -137,7 +139,7 @@ func TestSecureTrieConcurrency(t *testing.T) {
}
}
tries[index].Commit(nil)
- }(i)
+ })
}
// Wait for all threads to finish
pend.Wait()
diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go
index 1afcce21da..855c7cf5eb 100644
--- a/trie/sync_bloom.go
+++ b/trie/sync_bloom.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/gopool"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
@@ -67,14 +68,15 @@ func NewSyncBloom(memory uint64, database ethdb.Iteratee) *SyncBloom {
bloom: bloom,
}
b.pend.Add(2)
- go func() {
+ gopool.Submit(func() {
defer b.pend.Done()
b.init(database)
- }()
- go func() {
+ })
+
+ gopool.Submit(func() {
defer b.pend.Done()
b.meter()
- }()
+ })
return b
}
diff --git a/trie/trie.go b/trie/trie.go
index 7ed235fa8a..44de1374a4 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -570,3 +570,7 @@ func (t *Trie) Reset() {
t.root = nil
t.unhashed = 0
}
+
+func (t *Trie) Size() int {
+ return estimateSize(t.root)
+}