Skip to content

Commit

Permalink
Merge pull request #2215 from CortexFoundation/dev
Browse files Browse the repository at this point in the history
tx index fix
  • Loading branch information
ucwong authored Nov 30, 2024
2 parents 8b066ee + 01f0e3d commit 5c6c8be
Showing 1 changed file with 20 additions and 62 deletions.
82 changes: 20 additions & 62 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1119,14 +1119,13 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Ensure genesis is in ancients.
if first.NumberU64() == 1 {
if frozen, _ := bc.db.Ancients(); frozen == 0 {
b := bc.genesisBlock
td := bc.genesisBlock.Difficulty()
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
size += writeSize
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
if err != nil {
log.Error("Error writing genesis to ancients", "err", err)
return 0, err
}
size += writeSize
log.Info("Wrote genesis to ancients")
}
}
Expand All @@ -1140,44 +1139,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Write all chain data to ancients.
td := bc.GetTd(first.Hash(), first.NumberU64())
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
size += writeSize
if err != nil {
log.Error("Error importing chain data to ancients", "err", err)
return 0, err
}

// Write tx indices if any condition is satisfied:
// * If user requires to reserve all tx indices(txlookuplimit=0)
// * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
// * If block number is large enough to be regarded as a recent block
// It means blocks below the ancientLimit-txlookupLimit won't be indexed.
//
// But if the `TxIndexTail` is not nil, e.g. Cortex is initialized with
// an external ancient database, during the setup, blockchain will start
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
// range. In this case, all tx indices of newly imported blocks should be
// generated.
var batch = bc.db.NewBatch()
for i, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
}
stats.processed++

if batch.ValueSize() > ctxcdb.IdealBatchSize || i == len(blockChain)-1 {
size += int64(batch.ValueSize())
if err = batch.Write(); err != nil {
fastBlock := bc.CurrentFastBlock().NumberU64()
if _, err := bc.db.TruncateHead(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, err
}
batch.Reset()
}
}
size += writeSize

// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
Expand All @@ -1195,8 +1161,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}

// Delete block data from the main database.
batch.Reset()
canonHashes := make(map[common.Hash]struct{})
var (
batch = bc.db.NewBatch()
canonHashes = make(map[common.Hash]struct{}, len(blockChain))
)
for _, block := range blockChain {
canonHashes[block.Hash()] = struct{}{}
if block.NumberU64() == 0 {
Expand All @@ -1214,13 +1182,16 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if err := batch.Write(); err != nil {
return 0, err
}
stats.processed += int32(len(blockChain))
return 0, nil
}

// writeLive writes blockchain and corresponding receipt chain into active store.
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
skipPresenceCheck := false
batch := bc.db.NewBatch()
var (
skipPresenceCheck = false
batch = bc.db.NewBatch()
)
for i, block := range blockChain {
// Short circuit insertion if shutting down or processing failed
if bc.insertStopped() {
Expand All @@ -1245,7 +1216,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Write all the data out into the database
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
rawdb.WriteTxLookupEntriesByBlock(batch, block) // Always write tx indices for live blocks, we assume they are needed

// Write everything belongs to the blocks into the database. So that
// we can ensure all components of body is completed(body, receipts,
Expand Down Expand Up @@ -1281,19 +1251,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return n, err
}
}
// Write the tx index tail (block number from where we index) before write any live blocks
if len(liveBlocks) > 0 && liveBlocks[0].NumberU64() == ancientLimit+1 {
// The tx index tail can only be one of the following two options:
// * 0: all ancient blocks have been indexed
// * ancient-limit: the indices of blocks before ancient-limit are ignored
if tail := rawdb.ReadTxIndexTail(bc.db); tail == nil {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit {
//rawdb.WriteTxIndexTail(bc.db, 0)
} else {
rawdb.WriteTxIndexTail(bc.db, ancientLimit-bc.txLookupLimit)
}
}
}
if len(liveBlocks) > 0 {
if n, err := writeLive(liveBlocks, liveReceipts); err != nil {
if err == errInsertionInterrupted {
Expand All @@ -1302,13 +1259,14 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return n, err
}
}

head := blockChain[len(blockChain)-1]
context := []any{
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
"size", common.StorageSize(size),
}
var (
head = blockChain[len(blockChain)-1]
context = []interface{}{
"count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
"number", head.Number(), "hash", head.Hash(), "age", common.PrettyAge(time.Unix(int64(head.Time()), 0)),
"size", common.StorageSize(size),
}
)
if stats.ignored > 0 {
context = append(context, []any{"ignored", stats.ignored}...)
}
Expand Down

0 comments on commit 5c6c8be

Please sign in to comment.