Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce BlockGap #5592

Merged
merged 4 commits into from
Sep 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions prdoc/pr_5592.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
title: Introduce `BlockGap`

doc:
- audience: Node Dev
description: |
This is the first step towards https://github.com/paritytech/polkadot-sdk/issues/5406,
refactoring the representation of block gap. This refactor converts the existing
`(NumberFor<Block>, NumberFor<Block>)` into a dedicated `BlockGap<NumberFor<Block>>`
struct. This change is purely structural and does not alter existing logic, but lays
the groundwork for the follow-up PR. The compatibility concern in the database caused
by the new structure transition is addressed as well.

The `BlockGap` refactoring results in breaking changes in the `Info` structure returned
in `client.info()`.

crates:
- name: sc-consensus-babe
bump: none
- name: sc-client-db
bump: none
- name: sc-network-sync
bump: none
- name: sc-service
bump: none
- name: sp-blockchain
bump: major
6 changes: 4 additions & 2 deletions substrate/client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1146,7 +1146,9 @@ where
let info = self.client.info();
let number = *block.header.number();

if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || block.with_state() {
if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) ||
block.with_state()
{
// Verification for imported blocks is skipped in two cases:
// 1. When importing blocks below the last finalized block during network initial
// synchronization.
Expand Down Expand Up @@ -1420,7 +1422,7 @@ where
// Skip babe logic if block already in chain or importing blocks during initial sync,
// otherwise the check for epoch changes will error because trying to re-import an
// epoch change or because of missing epoch data in the tree, respectively.
if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) ||
if info.block_gap.map_or(false, |gap| gap.start <= number && number <= gap.end) ||
block_status == BlockStatus::InChain
{
// When re-importing existing block strip away intermediates.
Expand Down
73 changes: 48 additions & 25 deletions substrate/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ use codec::{Decode, Encode};
use hash_db::Prefix;
use sc_client_api::{
backend::NewBlockState,
blockchain::{BlockGap, BlockGapType},
leaves::{FinalizationOutcome, LeafSet},
utils::is_descendent_of,
IoInfo, MemoryInfo, MemorySize, UsageInfo,
Expand Down Expand Up @@ -91,6 +92,7 @@ use sp_state_machine::{
StorageValue, UsageInfo as StateUsageInfo,
};
use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, MerkleValue, PrefixedMemoryDB};
use utils::BLOCK_GAP_CURRENT_VERSION;

// Re-export the Database trait so that one can pass an implementation of it.
pub use sc_state_db::PruningMode;
Expand Down Expand Up @@ -522,7 +524,7 @@ impl<Block: BlockT> BlockchainDb<Block> {
}
}

fn update_block_gap(&self, gap: Option<(NumberFor<Block>, NumberFor<Block>)>) {
fn update_block_gap(&self, gap: Option<BlockGap<NumberFor<Block>>>) {
let mut meta = self.meta.write();
meta.block_gap = gap;
}
Expand Down Expand Up @@ -1671,35 +1673,56 @@ impl<Block: BlockT> Backend<Block> {
);
}

if let Some((mut start, end)) = block_gap {
if number == start {
start += One::one();
utils::insert_number_to_key_mapping(
&mut transaction,
columns::KEY_LOOKUP,
number,
hash,
)?;
if start > end {
transaction.remove(columns::META, meta_keys::BLOCK_GAP);
block_gap = None;
debug!(target: "db", "Removed block gap.");
} else {
block_gap = Some((start, end));
debug!(target: "db", "Update block gap. {block_gap:?}");
transaction.set(
columns::META,
meta_keys::BLOCK_GAP,
&(start, end).encode(),
);
}
block_gap_updated = true;
if let Some(mut gap) = block_gap {
match gap.gap_type {
BlockGapType::MissingHeaderAndBody =>
if number == gap.start {
gap.start += One::one();
utils::insert_number_to_key_mapping(
&mut transaction,
columns::KEY_LOOKUP,
number,
hash,
)?;
if gap.start > gap.end {
transaction.remove(columns::META, meta_keys::BLOCK_GAP);
transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
block_gap = None;
debug!(target: "db", "Removed block gap.");
} else {
block_gap = Some(gap);
debug!(target: "db", "Update block gap. {block_gap:?}");
transaction.set(
columns::META,
meta_keys::BLOCK_GAP,
&gap.encode(),
);
transaction.set(
columns::META,
meta_keys::BLOCK_GAP_VERSION,
&BLOCK_GAP_CURRENT_VERSION.encode(),
);
}
block_gap_updated = true;
},
BlockGapType::MissingBody => {
unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406")
},
}
} else if number > best_num + One::one() &&
number > One::one() && self.blockchain.header(parent_hash)?.is_none()
{
let gap = (best_num + One::one(), number - One::one());
let gap = BlockGap {
start: best_num + One::one(),
end: number - One::one(),
gap_type: BlockGapType::MissingHeaderAndBody,
};
transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode());
transaction.set(
columns::META,
meta_keys::BLOCK_GAP_VERSION,
&BLOCK_GAP_CURRENT_VERSION.encode(),
);
block_gap = Some(gap);
block_gap_updated = true;
debug!(target: "db", "Detected block gap {block_gap:?}");
Expand Down
47 changes: 39 additions & 8 deletions substrate/client/db/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,14 @@ use log::{debug, info};

use crate::{Database, DatabaseSource, DbHash};
use codec::Decode;
use sc_client_api::blockchain::{BlockGap, BlockGapType};
use sp_database::Transaction;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero},
traits::{
Block as BlockT, Header as HeaderT, NumberFor, UniqueSaturatedFrom, UniqueSaturatedInto,
Zero,
},
};
use sp_trie::DBValue;

Expand All @@ -38,6 +42,9 @@ pub const NUM_COLUMNS: u32 = 13;
/// Meta column. The set of keys in the column is shared by full && light storages.
pub const COLUMN_META: u32 = 0;

/// Current block gap version.
pub const BLOCK_GAP_CURRENT_VERSION: u32 = 1;

/// Keys of entries in COLUMN_META.
pub mod meta_keys {
/// Type of storage (full or light).
Expand All @@ -50,6 +57,8 @@ pub mod meta_keys {
pub const FINALIZED_STATE: &[u8; 6] = b"fstate";
/// Block gap.
pub const BLOCK_GAP: &[u8; 3] = b"gap";
/// Block gap version.
pub const BLOCK_GAP_VERSION: &[u8; 7] = b"gap_ver";
/// Genesis block hash.
pub const GENESIS_HASH: &[u8; 3] = b"gen";
/// Leaves prefix list key.
Expand All @@ -73,8 +82,8 @@ pub struct Meta<N, H> {
pub genesis_hash: H,
/// Finalized state, if any
pub finalized_state: Option<(H, N)>,
/// Block gap, start and end inclusive, if any.
pub block_gap: Option<(N, N)>,
/// Block gap, if any.
pub block_gap: Option<BlockGap<N>>,
}

/// A block lookup key: used for canonical lookup from block number to hash
Expand Down Expand Up @@ -197,7 +206,7 @@ fn open_database_at<Block: BlockT>(
open_kvdb_rocksdb::<Block>(path, db_type, create, *cache_size)?,
DatabaseSource::Custom { db, require_create_flag } => {
if *require_create_flag && !create {
return Err(OpenDbError::DoesNotExist)
return Err(OpenDbError::DoesNotExist);
}
db.clone()
},
Expand Down Expand Up @@ -364,7 +373,7 @@ pub fn check_database_type(
return Err(OpenDbError::UnexpectedDbType {
expected: db_type,
found: stored_type.to_owned(),
})
});
},
None => {
let mut transaction = Transaction::new();
Expand Down Expand Up @@ -515,9 +524,31 @@ where
} else {
None
};
let block_gap = db
.get(COLUMN_META, meta_keys::BLOCK_GAP)
.and_then(|d| Decode::decode(&mut d.as_slice()).ok());
let block_gap = match db
.get(COLUMN_META, meta_keys::BLOCK_GAP_VERSION)
.and_then(|d| u32::decode(&mut d.as_slice()).ok())
{
None => {
let old_block_gap: Option<(NumberFor<Block>, NumberFor<Block>)> = db
.get(COLUMN_META, meta_keys::BLOCK_GAP)
.and_then(|d| Decode::decode(&mut d.as_slice()).ok());

old_block_gap.map(|(start, end)| BlockGap {
start,
end,
gap_type: BlockGapType::MissingHeaderAndBody,
})
},
Some(version) => match version {
BLOCK_GAP_CURRENT_VERSION => db
.get(COLUMN_META, meta_keys::BLOCK_GAP)
.and_then(|d| Decode::decode(&mut d.as_slice()).ok()),
v =>
return Err(sp_blockchain::Error::Backend(format!(
"Unsupported block gap DB version: {v}"
))),
},
};
debug!(target: "db", "block_gap={:?}", block_gap);

Ok(Meta {
Expand Down
4 changes: 2 additions & 2 deletions substrate/client/network/sync/src/strategy/chain_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ use crate::{
use codec::Encode;
use log::{debug, error, info, trace, warn};
use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64};
use sc_client_api::{BlockBackend, ProofProvider};
use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider};
use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
use sc_network_common::sync::message::{
BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock,
Expand Down Expand Up @@ -1381,7 +1381,7 @@ where
}
}

if let Some((start, end)) = info.block_gap {
if let Some(BlockGap { start, end, .. }) = info.block_gap {
debug!(target: LOG_TARGET, "Starting gap sync #{start} - #{end}");
self.gap_sync = Some(GapSync {
best_queued_number: start - One::one(),
Expand Down
5 changes: 2 additions & 3 deletions substrate/client/service/src/client/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -604,9 +604,8 @@ where
}

let info = self.backend.blockchain().info();
let gap_block = info
.block_gap
.map_or(false, |(start, _)| *import_headers.post().number() == start);
let gap_block =
info.block_gap.map_or(false, |gap| *import_headers.post().number() == gap.start);

// the block is lower than our last finalized block so it must revert
// finality, refusing import.
Expand Down
40 changes: 32 additions & 8 deletions substrate/primitives/blockchain/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

//! Substrate blockchain trait

use codec::{Decode, Encode};
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
Expand Down Expand Up @@ -109,7 +110,7 @@ pub trait ForkBackend<Block: BlockT>:
for block in tree_route.retracted() {
expanded_forks.insert(block.hash);
}
continue
continue;
},
Err(_) => {
// There are cases when blocks are missing (e.g. warp-sync).
Expand Down Expand Up @@ -196,7 +197,7 @@ pub trait Backend<Block: BlockT>:
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
return Ok(None);
}
self.leaves()?
};
Expand All @@ -207,7 +208,7 @@ pub trait Backend<Block: BlockT>:
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
return Ok(Some(leaf_hash));
}

let current_header = self
Expand All @@ -216,7 +217,7 @@ pub trait Backend<Block: BlockT>:

// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
break;
}

current_hash = *current_header.parent_hash();
Expand Down Expand Up @@ -266,7 +267,7 @@ pub trait Backend<Block: BlockT>:

// If we have only one leaf there are no forks, and we can return early.
if finalized_block_number == Zero::zero() || leaves.len() == 1 {
return Ok(DisplacedLeavesAfterFinalization::default())
return Ok(DisplacedLeavesAfterFinalization::default());
}

// Store hashes of finalized blocks for quick checking later, the last block is the
Expand Down Expand Up @@ -332,7 +333,7 @@ pub trait Backend<Block: BlockT>:
elapsed = ?now.elapsed(),
"Added genesis leaf to displaced leaves."
);
continue
continue;
}

debug!(
Expand Down Expand Up @@ -539,6 +540,29 @@ impl<Block: BlockT> DisplacedLeavesAfterFinalization<Block> {
}
}

/// Represents the type of block gaps that may result from either warp sync or fast sync.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)]
pub enum BlockGapType {
/// Both the header and body are missing, as a result of warp sync.
MissingHeaderAndBody,
/// The block body is missing, as a result of fast sync.
MissingBody,
}

/// Represents the block gap resulted by warp sync or fast sync.
///
/// A block gap is a range of blocks where either the bodies, or both headers and bodies are
/// missing.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Encode, Decode)]
pub struct BlockGap<N> {
/// The starting block number of the gap (inclusive).
pub start: N,
/// The ending block number of the gap (inclusive).
pub end: N,
/// The type of gap.
pub gap_type: BlockGapType,
}

/// Blockchain info
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct Info<Block: BlockT> {
Expand All @@ -556,8 +580,8 @@ pub struct Info<Block: BlockT> {
pub finalized_state: Option<(Block::Hash, <<Block as BlockT>::Header as HeaderT>::Number)>,
/// Number of concurrent leave forks.
pub number_leaves: usize,
/// Missing blocks after warp sync. (start, end).
pub block_gap: Option<(NumberFor<Block>, NumberFor<Block>)>,
/// Missing blocks after warp sync or fast sync.
pub block_gap: Option<BlockGap<NumberFor<Block>>>,
}

/// Block status.
Expand Down
Loading