Skip to content

Commit

Permalink
chore: chain_backend (#5530)
Browse files Browse the repository at this point in the history
Description
---
Some minor improvements on the chain backend

Motivation and Context
---
Some comments and cleanup

How Has This Been Tested?
---

Unit tests
  • Loading branch information
SWvheerden authored Jun 28, 2023
1 parent 8d5e8e6 commit ee5fb6c
Show file tree
Hide file tree
Showing 9 changed files with 95 additions and 62 deletions.
33 changes: 18 additions & 15 deletions base_layer/core/src/chain_storage/block_add_result.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,22 @@ impl BlockAddResult {
matches!(self, BlockAddResult::OrphanBlock)
}

pub fn added_blocks(&self) -> Vec<Arc<ChainBlock>> {
match self {
Self::ChainReorg { added, removed: _ } => added.clone(),
Self::Ok(added) => vec![added.clone()],
_ => vec![],
}
}

pub fn removed_blocks(&self) -> Vec<Arc<ChainBlock>> {
match self {
Self::ChainReorg { added: _, removed } => removed.clone(),
_ => vec![],
}
}

#[cfg(test)]
pub fn assert_added(&self) -> ChainBlock {
match self {
BlockAddResult::ChainReorg { added, removed } => panic!(
Expand All @@ -71,10 +87,12 @@ impl BlockAddResult {
}
}

#[cfg(test)]
pub fn assert_orphaned(&self) {
assert!(self.is_orphaned(), "Result was not orphaned");
}

#[cfg(test)]
pub fn assert_reorg(&self, num_added: usize, num_removed: usize) {
match self {
BlockAddResult::ChainReorg { added, removed } => {
Expand All @@ -90,21 +108,6 @@ impl BlockAddResult {
BlockAddResult::OrphanBlock => panic!("Expected reorg result, but was OrphanBlock"),
}
}

pub fn added_blocks(&self) -> Vec<Arc<ChainBlock>> {
match self {
Self::ChainReorg { added, removed: _ } => added.clone(),
Self::Ok(added) => vec![added.clone()],
_ => vec![],
}
}

pub fn removed_blocks(&self) -> Vec<Arc<ChainBlock>> {
match self {
Self::ChainReorg { added: _, removed } => removed.clone(),
_ => vec![],
}
}
}

impl fmt::Display for BlockAddResult {
Expand Down
20 changes: 12 additions & 8 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -627,17 +627,21 @@ where B: BlockchainBackend
}
}

/// Returns the header at the tip
/// Returns the header at the tip of the chain according to local chain metadata
pub fn fetch_tip_header(&self) -> Result<ChainHeader, ChainStorageError> {
let db = self.db_read_access()?;
db.fetch_tip_header()
}

/// Fetches the last header that was added, might be past the tip, as the block body between this last header and
/// actual tip might not have been added yet
pub fn fetch_last_header(&self) -> Result<BlockHeader, ChainStorageError> {
let db = self.db_read_access()?;
db.fetch_last_header()
}

/// Fetches the last chain header that was added, might be past the tip, as the block body between this last chain
/// header and actual tip might not have been added yet
pub fn fetch_last_chain_header(&self) -> Result<ChainHeader, ChainStorageError> {
let db = self.db_read_access()?;
db.fetch_last_chain_header()
Expand Down Expand Up @@ -826,15 +830,13 @@ where B: BlockchainBackend
}

/// `calculate_mmr_roots` takes a _pre-sorted_ block body and calculates the MMR roots for it.
///
/// ## Panic
/// This function will panic if the block body is not sorted
pub fn calculate_mmr_roots(&self, block: Block) -> Result<(Block, MmrRoots), ChainStorageError> {
let db = self.db_read_access()?;
assert!(
block.body.is_sorted(),
"calculate_mmr_roots expected a sorted block body, however the block body was not sorted"
);
if !block.body.is_sorted() {
return Err(ChainStorageError::InvalidBlock(
"calculate_mmr_roots expected a sorted block body, however the block body was not sorted".to_string(),
));
};
let mmr_roots = calculate_mmr_roots(&*db, self.rules(), &block)?;
Ok((block, mmr_roots))
}
Expand Down Expand Up @@ -1549,6 +1551,8 @@ pub fn fetch_target_difficulty_for_next_block<T: BlockchainBackend>(
while header.height() > 0 && !target_difficulties.is_full() {
header = db.fetch_chain_header_in_all_chains(&header.header().prev_hash)?;

// LWMA works with the "newest" value being at the back of the array, so we need to keep pushing to the front as
// we keep adding "older" values
if header.header().pow.pow_algo == pow_algo {
target_difficulties.add_front(header.header().timestamp(), header.accumulated_data().target_difficulty);
}
Expand Down
2 changes: 2 additions & 0 deletions base_layer/core/src/chain_storage/db_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ impl DbTransaction {
DbTransaction::default()
}

/// deletes the orphan, and if it was a tip will delete the orphan tip and make its prev header the new tip. This
/// will not fail if the orphan does not exist.
pub fn delete_orphan(&mut self, hash: HashOutput) -> &mut Self {
self.operations.push(WriteOperation::DeleteOrphan(hash));
self
Expand Down
13 changes: 13 additions & 0 deletions base_layer/core/src/chain_storage/lmdb_db/lmdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ use crate::chain_storage::{

pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb";

/// Makes an insertion into the lmdb table, will error if the key already exists
pub fn lmdb_insert<K, V>(
txn: &WriteTransaction<'_>,
db: &Database,
Expand Down Expand Up @@ -150,6 +151,7 @@ where
Ok(())
}

/// Deletes the given key value pair. An error is returned if the key and value does not exist
pub fn lmdb_delete_key_value<K, V>(
txn: &WriteTransaction<'_>,
db: &Database,
Expand All @@ -164,6 +166,7 @@ where
Ok(())
}

/// Deletes all keys matching the key
pub fn lmdb_delete_keys_starting_with<V>(
txn: &WriteTransaction<'_>,
db: &Database,
Expand Down Expand Up @@ -196,6 +199,7 @@ where
Ok(result)
}

/// retrieves the given key value pair
pub fn lmdb_get<K, V>(txn: &ConstTransaction<'_>, db: &Database, key: &K) -> Result<Option<V>, ChainStorageError>
where
K: AsLmdbBytes + ?Sized,
Expand All @@ -221,6 +225,7 @@ where
}
}

/// retrieves the multiple values matching the key
pub fn lmdb_get_multiple<K, V>(txn: &ConstTransaction<'_>, db: &Database, key: &K) -> Result<Vec<V>, ChainStorageError>
where
K: AsLmdbBytes + FromLmdbBytes + ?Sized,
Expand Down Expand Up @@ -250,6 +255,7 @@ where
Ok(result)
}

/// Retrieves the last value stored in the database
pub fn lmdb_last<V>(txn: &ConstTransaction<'_>, db: &Database) -> Result<Option<V>, ChainStorageError>
where V: DeserializeOwned {
let mut cursor = txn.cursor(db)?;
Expand All @@ -270,6 +276,7 @@ where V: DeserializeOwned {
}
}

/// Checks if the key exists in the database
pub fn lmdb_exists<K>(txn: &ConstTransaction<'_>, db: &Database, key: &K) -> Result<bool, ChainStorageError>
where K: AsLmdbBytes + ?Sized {
let access = txn.access();
Expand All @@ -283,6 +290,7 @@ where K: AsLmdbBytes + ?Sized {
}
}

/// Returns the amount of entries of the database table
pub fn lmdb_len(txn: &ConstTransaction<'_>, db: &Database) -> Result<usize, ChainStorageError> {
let stats = txn.db_stat(db).map_err(|e| {
error!(target: LOG_TARGET, "Could not read length from lmdb: {:?}", e);
Expand Down Expand Up @@ -310,6 +318,7 @@ where
Ok(KeyPrefixCursor::new(cursor, access, prefix_key))
}

/// Fetches values the key prefix
pub fn lmdb_fetch_matching_after<V>(
txn: &ConstTransaction<'_>,
db: &Database,
Expand All @@ -326,6 +335,7 @@ where
Ok(result)
}

/// Fetches first value the key prefix
pub fn lmdb_first_after<K, V>(
txn: &ConstTransaction<'_>,
db: &Database,
Expand All @@ -350,6 +360,7 @@ where
}
}

/// Filter the values matching the fn
pub fn lmdb_filter_map_values<F, V, R>(
txn: &ConstTransaction<'_>,
db: &Database,
Expand Down Expand Up @@ -398,6 +409,7 @@ pub fn fetch_db_entry_sizes(txn: &ConstTransaction<'_>, db: &Database) -> Result
Ok((num_entries, total_key_size, total_value_size))
}

/// deletes entries using the filter Fn
pub fn lmdb_delete_each_where<K, V, F>(
txn: &WriteTransaction<'_>,
db: &Database,
Expand Down Expand Up @@ -435,6 +447,7 @@ where
Ok(num_deleted)
}

/// Deletes the entire database
pub fn lmdb_clear(txn: &WriteTransaction<'_>, db: &Database) -> Result<usize, ChainStorageError> {
let mut cursor = txn.cursor(db)?;
let mut access = txn.access();
Expand Down
9 changes: 0 additions & 9 deletions base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1138,15 +1138,6 @@ impl LMDBDatabase {
}
}

if lmdb_exists(txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? {
lmdb_delete(
txn,
&self.orphan_header_accumulated_data_db,
hash.as_slice(),
"orphan_header_accumulated_data_db",
)?;
}

if lmdb_exists(txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? {
lmdb_delete(
txn,
Expand Down
15 changes: 7 additions & 8 deletions base_layer/core/tests/tests/base_node_rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,12 @@ use tari_utilities::epoch_time::EpochTime;
use tempfile::{tempdir, TempDir};
use tokio::sync::broadcast;

use crate::helpers::{
block_builders::{chain_block, chain_block_with_new_coinbase, create_genesis_block_with_coinbase_value},
nodes::{BaseNodeBuilder, NodeInterfaces},
use crate::{
helpers::{
block_builders::{chain_block, chain_block_with_new_coinbase, create_genesis_block_with_coinbase_value},
nodes::{BaseNodeBuilder, NodeInterfaces},
},
tests::assert_block_add_result_added,
};

async fn setup() -> (
Expand Down Expand Up @@ -306,11 +309,7 @@ async fn test_get_height_at_time() {
)
.unwrap();

prev_block = base_node
.blockchain_db
.add_block(Arc::new(new_block))
.unwrap()
.assert_added();
prev_block = assert_block_add_result_added(&base_node.blockchain_db.add_block(Arc::new(new_block)).unwrap());
times.push(prev_block.header().timestamp);
}

Expand Down
25 changes: 14 additions & 11 deletions base_layer/core/tests/tests/block_validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,18 @@ use tari_script::{inputs, script};
use tari_test_utils::unpack_enum;
use tari_utilities::hex::Hex;

use crate::helpers::{
block_builders::{
chain_block_with_coinbase,
chain_block_with_new_coinbase,
create_coinbase,
create_genesis_block_with_utxos,
find_header_with_achieved_difficulty,
use crate::{
helpers::{
block_builders::{
chain_block_with_coinbase,
chain_block_with_new_coinbase,
create_coinbase,
create_genesis_block_with_utxos,
find_header_with_achieved_difficulty,
},
test_blockchain::TestBlockchain,
},
test_blockchain::TestBlockchain,
tests::assert_block_add_result_added,
};

#[tokio::test]
Expand Down Expand Up @@ -122,13 +125,13 @@ async fn test_monero_blocks() {

// Now we have block 1, lets add monero data to it
add_monero_data(&mut block_1, seed1);
let cb_1 = db.add_block(Arc::new(block_1)).unwrap().assert_added();
let cb_1 = assert_block_add_result_added(&db.add_block(Arc::new(block_1)).unwrap());
// Now lets add a second faulty block using the same seed hash
let (block_2_t, _) = chain_block_with_new_coinbase(&cb_1, vec![], &cm, None, &key_manager).await;
let mut block_2 = db.prepare_new_block(block_2_t).unwrap();

add_monero_data(&mut block_2, seed1);
let cb_2 = db.add_block(Arc::new(block_2)).unwrap().assert_added();
let cb_2 = assert_block_add_result_added(&db.add_block(Arc::new(block_2)).unwrap());
// Now lets add a third faulty block using the same seed hash. This should fail.
let (block_3_t, _) = chain_block_with_new_coinbase(&cb_2, vec![], &cm, None, &key_manager).await;
let mut block_3 = db.prepare_new_block(block_3_t).unwrap();
Expand All @@ -148,7 +151,7 @@ async fn test_monero_blocks() {

// now lets fix the seed, and try again
add_monero_data(&mut block_3, seed2);
db.add_block(Arc::new(block_3)).unwrap().assert_added();
assert_block_add_result_added(&db.add_block(Arc::new(block_3)).unwrap());
}

fn add_monero_data(tblock: &mut Block, seed_key: &str) {
Expand Down
15 changes: 15 additions & 0 deletions base_layer/core/tests/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,25 @@
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

use tari_core::{blocks::ChainBlock, chain_storage::BlockAddResult};

mod async_db;
mod base_node_rpc;
mod block_validation;
mod mempool;
mod node_comms_interface;
mod node_service;
mod node_state_machine;

pub fn assert_block_add_result_added(result: &BlockAddResult) -> ChainBlock {
match result {
BlockAddResult::ChainReorg { added, removed } => panic!(
"Expected added result, but was reorg ({} added, {} removed)",
added.len(),
removed.len()
),
BlockAddResult::Ok(b) => b.as_ref().clone(),
BlockAddResult::BlockExists => panic!("Expected added result, but was BlockExists"),
BlockAddResult::OrphanBlock => panic!("Expected added result, but was OrphanBlock"),
}
}
25 changes: 14 additions & 11 deletions base_layer/core/tests/tests/node_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,20 @@ use tari_core::{
use tari_test_utils::unpack_enum;
use tempfile::tempdir;

use crate::helpers::{
block_builders::{
append_block,
chain_block,
construct_chained_blocks,
create_coinbase,
create_genesis_block,
create_genesis_block_with_utxos,
use crate::{
helpers::{
block_builders::{
append_block,
chain_block,
construct_chained_blocks,
create_coinbase,
create_genesis_block,
create_genesis_block_with_utxos,
},
event_stream::event_stream_next,
nodes::{random_node_identity, wait_until_online, BaseNodeBuilder},
},
event_stream::event_stream_next,
nodes::{random_node_identity, wait_until_online, BaseNodeBuilder},
tests::assert_block_add_result_added,
};

#[allow(clippy::too_many_lines)]
Expand Down Expand Up @@ -721,7 +724,7 @@ async fn local_submit_block() {
let event = event_stream_next(&mut event_stream, Duration::from_millis(20000)).await;
if let BlockEvent::ValidBlockAdded(received_block, result) = &*event.unwrap() {
assert_eq!(received_block.hash(), block1.hash());
result.assert_added();
assert_block_add_result_added(result);
} else {
panic!("Block validation failed");
}
Expand Down

0 comments on commit ee5fb6c

Please sign in to comment.