Skip to content

Commit

Permalink
chore: code review (#5881)
Browse files Browse the repository at this point in the history
Description
---
final hazop code review for SMT
  • Loading branch information
SWvheerden authored Nov 1, 2023
1 parent ba407bc commit a4fabcb
Show file tree
Hide file tree
Showing 8 changed files with 120 additions and 51 deletions.
62 changes: 38 additions & 24 deletions base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ pub struct LMDBDatabase {
/// Maps <contract_id, output_type> -> (block_hash, output_hash)
/// and <block_hash, output_type, contract_id> -> output_hash
contract_index: DatabaseRef,
/// Maps output_mmr_pos -> <block_hash, output_hash>
/// Maps output hash-> <block_hash, input_hash>
deleted_txo_hash_to_header_index: DatabaseRef,
/// Maps block_hash -> Block
orphans_db: DatabaseRef,
Expand Down Expand Up @@ -630,24 +630,25 @@ impl LMDBDatabase {
// does the key need to include the mmr pos
// make index safe key
let hash = input.canonical_hash();
let output_hash = input.output_hash();
let key = InputKey::new(header_hash, &hash)?;
lmdb_insert(
txn,
&self.deleted_txo_hash_to_header_index,
&hash.to_vec(),
&(height, header_hash),
&output_hash.to_vec(),
&(key.clone().convert_to_comp_key().to_vec()),
"deleted_txo_hash_to_header_index",
)?;

let key = InputKey::new(header_hash, &hash)?;
lmdb_insert(
txn,
&self.inputs_db,
&key.convert_to_comp_key(),
&TransactionInputRowDataRef {
input: &input.to_compact(),
header_hash,
mined_timestamp: header_timestamp,
height,
spent_timestamp: header_timestamp,
spent_height: height,
hash: &hash,
},
"inputs_db",
Expand Down Expand Up @@ -911,7 +912,7 @@ impl LMDBDatabase {
lmdb_delete(
txn,
&self.deleted_txo_hash_to_header_index,
&row.hash.to_vec(),
&output_hash.to_vec(),
"deleted_txo_hash_to_header_index",
)?;
if output_rows.iter().any(|r| r.hash == output_hash) {
Expand Down Expand Up @@ -945,7 +946,7 @@ impl LMDBDatabase {
utxo_mined_info.output.minimum_value_promise,
);
let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?;
let smt_node = ValueHash::try_from(input.smt_hash(row.height).as_slice())?;
let smt_node = ValueHash::try_from(input.smt_hash(row.spent_height).as_slice())?;
output_smt.insert(smt_key, smt_node)?;

trace!(target: LOG_TARGET, "Input moved to UTXO set: {}", input);
Expand Down Expand Up @@ -1081,6 +1082,12 @@ impl LMDBDatabase {
header: &BlockHeader,
body: AggregateBody,
) -> Result<(), ChainStorageError> {
if self.fetch_block_accumulated_data(txn, header.height + 1)?.is_some() {
return Err(ChainStorageError::InvalidOperation(format!(
"Attempted to insert block at height {} while next block already exists",
header.height
)));
}
let block_hash = header.hash();
debug!(
target: LOG_TARGET,
Expand Down Expand Up @@ -1494,27 +1501,27 @@ impl LMDBDatabase {
fn fetch_input_in_txn(
&self,
txn: &ConstTransaction<'_>,
input_hash: &[u8],
output_hash: &[u8],
) -> Result<Option<InputMinedInfo>, ChainStorageError> {
if let Some(key) = lmdb_get::<_, Vec<u8>>(txn, &self.deleted_txo_hash_to_header_index, input_hash)? {
if let Some(key) = lmdb_get::<_, Vec<u8>>(txn, &self.deleted_txo_hash_to_header_index, output_hash)? {
debug!(
target: LOG_TARGET,
"Fetch input: {} Found ({})",
to_hex(input_hash),
to_hex(output_hash),
key.to_hex()
);
match lmdb_get::<_, TransactionInputRowData>(txn, &self.utxos_db, &key)? {
match lmdb_get::<_, TransactionInputRowData>(txn, &self.inputs_db, &key)? {
Some(TransactionInputRowData {
input: i,
height,
spent_height: height,
header_hash,
mined_timestamp,
spent_timestamp,
..
}) => Ok(Some(InputMinedInfo {
input: i,
spent_height: height,
header_hash,
spent_timestamp: mined_timestamp,
spent_timestamp,
})),

_ => Ok(None),
Expand All @@ -1523,7 +1530,7 @@ impl LMDBDatabase {
debug!(
target: LOG_TARGET,
"Fetch input: {} NOT found in index",
to_hex(input_hash)
to_hex(output_hash)
);
Ok(None)
}
Expand Down Expand Up @@ -1835,7 +1842,7 @@ impl BlockchainBackend for LMDBDatabase {
) -> Result<Vec<(TransactionOutput, bool)>, ChainStorageError> {
let txn = self.read_transaction()?;

let mut utxos: Vec<(TransactionOutput, bool)> =
let mut outputs: Vec<(TransactionOutput, bool)> =
lmdb_fetch_matching_after::<TransactionOutputRowData>(&txn, &self.utxos_db, header_hash.deref())?
.into_iter()
.map(|row| (row.output, false))
Expand All @@ -1848,20 +1855,27 @@ impl BlockchainBackend for LMDBDatabase {
field: "hash",
value: header.to_hex(),
})?;
for utxo in &mut utxos {
let hash = utxo.0.hash();
if let Some((height, _)) =
lmdb_get::<_, (u64, Vec<u8>)>(&txn, &self.deleted_txo_hash_to_header_index, hash.as_slice())?
for output in &mut outputs {
let hash = output.0.hash();
if let Some(key) =
lmdb_get::<_, Vec<u8>>(&txn, &self.deleted_txo_hash_to_header_index, hash.as_slice())?
{
if height <= header_height {
let input = lmdb_get::<_, TransactionInputRowData>(&txn, &self.inputs_db, &key)?.ok_or(
ChainStorageError::ValueNotFound {
entity: "input",
field: "hash",
value: header.to_hex(),
},
)?;
if input.spent_height <= header_height {
// we know its spend at the header height specified as optional in the fn
utxo.1 = true;
output.1 = true;
}
}
}
}

Ok(utxos)
Ok(outputs)
}

fn fetch_output(&self, output_hash: &HashOutput) -> Result<Option<OutputMinedInfo>, ChainStorageError> {
Expand Down
8 changes: 4 additions & 4 deletions base_layer/core/src/chain_storage/lmdb_db/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ pub(crate) struct TransactionInputRowDataRef<'a> {
pub input: &'a TransactionInput,
#[allow(clippy::ptr_arg)]
pub header_hash: &'a HashOutput,
pub mined_timestamp: u64,
pub height: u64,
pub spent_timestamp: u64,
pub spent_height: u64,
#[allow(clippy::ptr_arg)]
pub hash: &'a HashOutput,
}
Expand All @@ -61,8 +61,8 @@ pub(crate) struct TransactionInputRowDataRef<'a> {
pub(crate) struct TransactionInputRowData {
pub input: TransactionInput,
pub header_hash: HashOutput,
pub mined_timestamp: u64,
pub height: u64,
pub spent_timestamp: u64,
pub spent_height: u64,
pub hash: HashOutput,
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,15 @@ async fn input_and_output_and_wallet_output_hash_match() {
assert_eq!(output.hash(), i.hash(&key_manager).await.unwrap());
}

#[test]
fn test_smt_hashes() {
let input = TransactionInput::default();
let output = TransactionOutput::default();
let input_hash = input.smt_hash(10);
let output_hash = output.smt_hash(10);
assert_eq!(input_hash, output_hash);
}

#[tokio::test]
async fn key_manager_input() {
let key_manager = create_test_core_key_manager_with_memory_db();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -561,6 +561,14 @@ impl Ord for TransactionInput {
}
}

impl Default for TransactionInput {
fn default() -> Self {
let output = SpentOutput::create_from_output(TransactionOutput::default());

TransactionInput::new_current_version(output, ExecutionStack::default(), Default::default())
}
}

#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[allow(clippy::large_enum_variant)]
pub enum SpentOutput {
Expand All @@ -587,4 +595,23 @@ impl SpentOutput {
SpentOutput::OutputData { .. } => 1,
}
}

pub fn create_from_output(output: TransactionOutput) -> SpentOutput {
let rp_hash = match output.proof {
Some(proof) => proof.hash(),
None => FixedHash::zero(),
};
SpentOutput::OutputData {
version: output.version,
features: output.features,
commitment: output.commitment,
script: output.script,
sender_offset_public_key: output.sender_offset_public_key,
covenant: output.covenant,
encrypted_data: output.encrypted_data,
metadata_signature: output.metadata_signature,
rangeproof_hash: rp_hash,
minimum_value_promise: output.minimum_value_promise,
}
}
}
2 changes: 1 addition & 1 deletion base_layer/core/src/validation/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ pub fn check_mmr_roots(header: &BlockHeader, mmr_roots: &MmrRoots) -> Result<(),
mmr_roots.output_smt_size
);
return Err(ValidationError::BlockError(BlockValidationError::MismatchedMmrSize {
mmr_tree: "Utxo".to_string(),
mmr_tree: "UTXO".to_string(),
expected: mmr_roots.output_smt_size,
actual: header.output_smt_size,
}));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,13 @@ pub struct TxoValidationTask<TBackend, TWalletConnectivity> {
config: OutputManagerServiceConfig,
}

struct MinedOutputInfo {
output: DbWalletOutput,
mined_at_height: u64,
mined_block_hash: FixedHash,
mined_timestamp: u64,
}

impl<TBackend, TWalletConnectivity> TxoValidationTask<TBackend, TWalletConnectivity>
where
TBackend: OutputManagerBackend + 'static,
Expand Down Expand Up @@ -151,19 +158,25 @@ where
unmined.len(),
self.operation_id
);
for (output, mined_height, mined_in_block, mined_timestamp) in &mined {
for mined_info in &mined {
info!(
target: LOG_TARGET,
"Updating output comm:{}: hash {} as mined at height {} with current tip at {} (Operation ID:
{})",
output.commitment.to_hex(),
output.hash.to_hex(),
mined_height,
mined_info.output.commitment.to_hex(),
mined_info.output.hash.to_hex(),
mined_info.mined_at_height,
tip_height,
self.operation_id
);
self.update_output_as_mined(output, mined_in_block, *mined_height, tip_height, *mined_timestamp)
.await?;
self.update_output_as_mined(
&mined_info.output,
&mined_info.mined_block_hash,
mined_info.mined_at_height,
tip_height,
mined_info.mined_timestamp,
)
.await?;
}
for output in unmined {
self.db
Expand Down Expand Up @@ -289,18 +302,24 @@ where
unmined.len(),
self.operation_id
);
for (output, mined_height, mined_in_block, mined_timestamp) in &mined {
for mined_info in &mined {
info!(
target: LOG_TARGET,
"Updating output comm:{}: hash {} as mined at height {} with current tip at {} (Operation ID: {})",
output.commitment.to_hex(),
output.hash.to_hex(),
mined_height,
mined_info.output.commitment.to_hex(),
mined_info.output.hash.to_hex(),
mined_info.mined_at_height,
tip_height,
self.operation_id
);
self.update_output_as_mined(output, mined_in_block, *mined_height, tip_height, *mined_timestamp)
.await?;
self.update_output_as_mined(
&mined_info.output,
&mined_info.mined_block_hash,
mined_info.mined_at_height,
tip_height,
mined_info.mined_timestamp,
)
.await?;
}
}

Expand Down Expand Up @@ -459,7 +478,7 @@ where
&self,
batch: &[DbWalletOutput],
base_node_client: &mut BaseNodeWalletRpcClient,
) -> Result<(Vec<(DbWalletOutput, u64, BlockHash, u64)>, Vec<DbWalletOutput>, u64), OutputManagerError> {
) -> Result<(Vec<MinedOutputInfo>, Vec<DbWalletOutput>, u64), OutputManagerError> {
let batch_hashes = batch.iter().map(|o| o.hash.to_vec()).collect();
trace!(
target: LOG_TARGET,
Expand Down Expand Up @@ -494,12 +513,12 @@ where
for output in batch {
if let Some(returned_output) = returned_outputs.get(&output.hash) {
match returned_output.mined_in_block.clone().try_into() {
Ok(block_hash) => mined.push((
output.clone(),
returned_output.mined_at_height,
block_hash,
returned_output.mined_timestamp,
)),
Ok(block_hash) => mined.push(MinedOutputInfo {
output: output.clone(),
mined_at_height: returned_output.mined_at_height,
mined_block_hash: block_hash,
mined_timestamp: returned_output.mined_timestamp,
}),
Err(_) => {
warn!(
target: LOG_TARGET,
Expand Down
2 changes: 1 addition & 1 deletion integration_tests/tests/features/Propagation.feature
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ Feature: Block Propagation
Then node MINER is at height 7
Then all nodes are at height 7

@critical @pruned
@critical @pruned @broken @broken_prune
Scenario: Pruned node should prune outputs
Given I have 1 seed nodes
When I have a base node SENDER connected to all seed nodes
Expand Down
4 changes: 2 additions & 2 deletions integration_tests/tests/features/Sync.feature
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Feature: Block Sync
When I have a base node NODE2 connected to all seed nodes
Then all nodes are at height 25

@critical @pruned
@critical @pruned @broken @broken_prune
Scenario: Pruned mode simple sync
Given I have 1 seed nodes
When I have a SHA3 miner NODE1 connected to all seed nodes
Expand All @@ -57,7 +57,7 @@ Feature: Block Sync
When I have a pruned node PNODE1 connected to node NODE1 with pruning horizon set to 5
Then all nodes are at height 20

@critical @pruned
@critical @pruned @broken @broken_prune
Scenario: Pruned node should handle burned output
Given I have a seed node NODE
When I have a base node NODE1 connected to all seed nodes
Expand Down

0 comments on commit a4fabcb

Please sign in to comment.