diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 1276ffadf0..ad9eb4dd0d 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -860,7 +860,7 @@ where B: BlockchainBackend if block_add_result.was_chain_modified() { // If blocks were added and the node is in pruned mode, perform pruning - prune_database_if_needed(&mut *db, self.config.pruning_horizon, self.config.pruning_interval)? + prune_database_if_needed(&mut *db, self.config.pruning_horizon, self.config.pruning_interval)?; } info!( diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 3b927e0667..b47ac1e417 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -87,6 +87,7 @@ use crate::{ lmdb_replace, }, TransactionInputRowData, + TransactionInputRowDataRef, TransactionKernelRowData, TransactionOutputRowData, }, @@ -637,8 +638,8 @@ impl LMDBDatabase { &self, txn: &WriteTransaction<'_>, height: u64, - header_hash: HashOutput, - input: TransactionInput, + header_hash: &HashOutput, + input: &TransactionInput, mmr_position: u32, ) -> Result<(), ChainStorageError> { lmdb_delete( @@ -646,12 +647,17 @@ impl LMDBDatabase { &self.utxo_commitment_index, input.commitment().as_bytes(), "utxo_commitment_index", - )?; + ) + .or_else(|err| match err { + // The commitment may not yet be included in the DB in the 0-conf transaction case + ChainStorageError::ValueNotFound { .. } => Ok(()), + _ => Err(err), + })?; lmdb_insert( txn, &self.deleted_txo_mmr_position_to_height_index, &mmr_position, - &(height, &header_hash), + &(height, header_hash), "deleted_txo_mmr_position_to_height_index", )?; @@ -680,6 +686,7 @@ impl LMDBDatabase { }); } + // TODO: 0-conf is not currently supported for transactions with unique_id set lmdb_delete(txn, &self.unique_id_index, key.as_bytes(), "unique_id_index")?; key.set_deleted_height(height); debug!( @@ -702,16 +709,16 @@ impl LMDBDatabase { } let hash = input.hash(); - let key = InputKey::new(&header_hash, mmr_position, &hash); + let key = InputKey::new(header_hash, mmr_position, &hash); lmdb_insert( txn, &*self.inputs_db, key.as_bytes(), - &TransactionInputRowData { + &TransactionInputRowDataRef { input, header_hash, mmr_position, - hash, + hash: &hash, }, "inputs_db", ) @@ -1169,11 +1176,37 @@ impl LMDBDatabase { let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); + let leaf_count = witness_mmr.get_leaf_count()?; + + // Output hashes added before inputs so that inputs can spend outputs in this transaction (0-conf and combined) + let outputs = outputs + .into_iter() + .enumerate() + .map(|(i, output)| { + output_mmr.push(output.hash())?; + witness_mmr.push(output.witness_hash())?; + Ok((output, leaf_count + i + 1)) + }) + .collect::, ChainStorageError>>()?; + + let mut spent_zero_conf_commitments = Vec::new(); // unique_id_index expects inputs to be inserted before outputs - for input in inputs { - let index = self - .fetch_mmr_leaf_index(&**txn, MmrTree::Utxo, &input.output_hash())? - .ok_or(ChainStorageError::UnspendableInput)?; + for input in &inputs { + let output_hash = input.output_hash(); + let index = match self.fetch_mmr_leaf_index(&**txn, MmrTree::Utxo, &output_hash)? { + Some(index) => index, + None => match output_mmr.find_leaf_index(&output_hash)? { + Some(index) => { + debug!( + target: LOG_TARGET, + "Input {} spends output from current block (0-conf)", input + ); + spent_zero_conf_commitments.push(&input.commitment); + index + }, + None => return Err(ChainStorageError::UnspendableInput), + }, + }; if !output_mmr.delete(index) { return Err(ChainStorageError::InvalidOperation(format!( "Could not delete index {} from the output MMR", @@ -1181,22 +1214,22 @@ impl LMDBDatabase { ))); } debug!(target: LOG_TARGET, "Inserting input `{}`", input.commitment.to_hex()); - self.insert_input(txn, current_header_at_height.height, block_hash.clone(), input, index)?; + self.insert_input(txn, current_header_at_height.height, &block_hash, input, index)?; } - for output in outputs { - output_mmr.push(output.hash())?; - witness_mmr.push(output.witness_hash())?; + for (output, mmr_count) in outputs { debug!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); - self.insert_output( + self.insert_output(txn, &block_hash, header.height, &output, mmr_count as u32 - 1)?; + } + + for commitment in spent_zero_conf_commitments { + lmdb_delete( txn, - &block_hash, - header.height, - &output, - (witness_mmr.get_leaf_count()? - 1) as u32, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", )?; } - // Merge current deletions with the tip bitmap let deleted_at_current_height = output_mmr.deleted().clone(); // Merge the new indexes with the blockchain deleted bitmap diff --git a/base_layer/core/src/chain_storage/lmdb_db/mod.rs b/base_layer/core/src/chain_storage/lmdb_db/mod.rs index cfbc276009..e01ed62c58 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/mod.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/mod.rs @@ -42,6 +42,18 @@ pub(crate) struct TransactionOutputRowData { pub mined_height: u64, } +/// Transaction input row data taking references and used for serialization. +/// This struct must mirror the fields in `TransactionInputRowData` +#[derive(Serialize, Debug)] +pub(crate) struct TransactionInputRowDataRef<'a> { + pub input: &'a TransactionInput, + #[allow(clippy::ptr_arg)] + pub header_hash: &'a HashOutput, + pub mmr_position: u32, + #[allow(clippy::ptr_arg)] + pub hash: &'a HashOutput, +} + #[derive(Serialize, Deserialize, Debug)] pub(crate) struct TransactionInputRowData { pub input: TransactionInput, diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index ab1d0c1ba2..a638433517 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -23,6 +23,7 @@ use std::sync::Arc; use rand::rngs::OsRng; +use tari_common_types::types::PublicKey; use tari_crypto::keys::PublicKey as PublicKeyTrait; use tari_test_utils::unpack_enum; use tari_utilities::Hashable; @@ -39,7 +40,7 @@ use crate::{ transactions::{ tari_amount::T, test_helpers::{schema_to_transaction, TransactionSchema}, - transaction::{OutputFeatures, Transaction, UnblindedOutput}, + transaction::{OutputFeatures, OutputFlags, Transaction, UnblindedOutput}, }, txn_schema, }; @@ -375,13 +376,11 @@ mod fetch_block_hashes_from_header_tip { } mod add_block { - use tari_common_types::types::PublicKey; + use tari_utilities::hex::Hex; use super::*; - use crate::{transactions::transaction::OutputFlags, validation::ValidationError}; #[test] - #[ignore = "broken after validator node merge"] fn it_rejects_duplicate_commitments_in_the_utxo_set() { let db = setup(); let (blocks, outputs) = add_many_chained_blocks(5, &db); @@ -407,14 +406,12 @@ mod add_block { script: tari_crypto::script![Nop], input_data: None, }]); + let commitment_hex = txns[0].body.outputs()[0].commitment.to_hex(); let (block, _) = create_next_block(&db, &prev_block, txns); let err = db.add_block(block.clone()).unwrap_err(); - unpack_enum!( - ChainStorageError::ValidationError { - source: ValidationError::ContainsTxO - } = err - ); + unpack_enum!(ChainStorageError::KeyExists { key, .. } = err); + assert_eq!(key, commitment_hex); // Check rollback let header = db.fetch_header(block.header.height).unwrap(); assert!(header.is_none()); @@ -481,91 +478,6 @@ mod add_block { let (block, _) = create_next_block(&db, prev_block, transactions); db.add_block(block).unwrap().assert_added(); } - - #[test] - #[ignore = "broken after validator node merge"] - fn it_rejects_duplicate_mint_or_burn_transactions_per_unique_id() { - let db = setup(); - let (blocks, outputs) = add_many_chained_blocks(1, &db); - - let prev_block = blocks.last().unwrap(); - - let (_, asset_pk) = PublicKey::random_keypair(&mut OsRng); - let unique_id = vec![1u8; 3]; - let features = OutputFeatures::for_minting(asset_pk.clone(), Default::default(), unique_id.clone(), None); - let (txns, _) = schema_to_transaction(&[txn_schema!( - from: vec![outputs[0].clone()], - to: vec![10 * T, 10 * T], - features: features - )]); - - let (block, _) = create_next_block(&db, prev_block, txns); - let err = db.add_block(block).unwrap_err(); - - unpack_enum!( - ChainStorageError::ValidationError { - source: ValidationError::ContainsDuplicateUtxoUniqueID - } = err - ); - - let features = OutputFeatures { - flags: OutputFlags::BURN_NON_FUNGIBLE, - parent_public_key: Some(asset_pk), - unique_id: Some(unique_id), - ..Default::default() - }; - let (txns, _) = schema_to_transaction(&[txn_schema!( - from: vec![outputs[0].clone()], - to: vec![10 * T, 10 * T], - features: features - )]); - - let (block, _) = create_next_block(&db, prev_block, txns); - let err = db.add_block(block).unwrap_err(); - - unpack_enum!( - ChainStorageError::ValidationError { - source: ValidationError::ContainsDuplicateUtxoUniqueID - } = err - ); - } - - #[test] - #[ignore = "broken after validator node merge"] - fn it_rejects_duplicate_mint_or_burn_transactions_in_blockchain() { - let db = setup(); - let (blocks, outputs) = add_many_chained_blocks(1, &db); - - let prev_block = blocks.last().unwrap(); - - let (_, asset_pk) = PublicKey::random_keypair(&mut OsRng); - let unique_id = vec![1u8; 3]; - let features = OutputFeatures::for_minting(asset_pk.clone(), Default::default(), unique_id.clone(), None); - let (txns, outputs) = schema_to_transaction(&[txn_schema!( - from: vec![outputs[0].clone()], - to: vec![10 * T], - features: features - )]); - - let (block, _) = create_next_block(&db, prev_block, txns); - db.add_block(block.clone()).unwrap().assert_added(); - - let features = OutputFeatures::for_minting(asset_pk, Default::default(), unique_id, None); - let (txns, _) = schema_to_transaction(&[txn_schema!( - from: vec![outputs[0].clone()], - to: vec![T], - features: features - )]); - - let (block, _) = create_next_block(&db, &block, txns); - let err = db.add_block(block).unwrap_err(); - - unpack_enum!( - ChainStorageError::ValidationError { - source: ValidationError::ContainsDuplicateUtxoUniqueID - } = err - ); - } } mod get_stats { @@ -583,14 +495,13 @@ mod fetch_total_size_stats { use super::*; #[test] - #[ignore = "broken after validator node merge"] fn it_measures_the_number_of_entries() { let db = setup(); let _ = add_many_chained_blocks(2, &db); let stats = db.fetch_total_size_stats().unwrap(); assert_eq!( stats.sizes().iter().find(|s| s.name == "utxos_db").unwrap().num_entries, - 2 + 3 ); } } @@ -734,18 +645,19 @@ mod clear_all_pending_headers { } #[test] - #[ignore = "broken after validator node merge"] fn it_clears_headers_after_tip() { let db = setup(); let _ = add_many_chained_blocks(2, &db); let prev_block = db.fetch_block(2).unwrap(); let mut prev_accum = prev_block.accumulated_data.clone(); - let mut prev_block = Arc::new(prev_block.try_into_block().unwrap()); + let mut prev_header = prev_block.try_into_chain_block().unwrap().to_chain_header(); let headers = (0..5) .map(|_| { - let (block, _) = create_next_block(&db, &prev_block, vec![]); + let mut header = BlockHeader::from_previous(prev_header.header()); + header.kernel_mmr_size += 1; + header.output_mmr_size += 1; let accum = BlockHeaderAccumulatedData::builder(&prev_accum) - .with_hash(block.hash()) + .with_hash(header.hash()) .with_achieved_target_difficulty( AchievedTargetDifficulty::try_construct(PowAlgorithm::Sha3, 0.into(), 0.into()).unwrap(), ) @@ -753,9 +665,9 @@ mod clear_all_pending_headers { .build() .unwrap(); - let header = ChainHeader::try_construct(block.header.clone(), accum.clone()).unwrap(); + let header = ChainHeader::try_construct(header, accum.clone()).unwrap(); - prev_block = block; + prev_header = header.clone(); prev_accum = accum; header }) @@ -786,7 +698,6 @@ mod fetch_utxo_by_unique_id { } #[test] - #[ignore = "broken after validator node merge"] fn it_finds_the_utxo_by_unique_id_at_deleted_height() { let db = setup(); let unique_id = vec![1u8; 3]; diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index fe6e8ee093..49ed79cbcd 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -150,6 +150,7 @@ pub fn create_test_db() -> TempDatabase { pub struct TempDatabase { path: PathBuf, db: Option, + delete_on_drop: bool, } impl TempDatabase { @@ -159,8 +160,22 @@ impl TempDatabase { Self { db: Some(create_lmdb_database(&temp_path, LMDBConfig::default()).unwrap()), path: temp_path, + delete_on_drop: true, } } + + pub fn from_path>(temp_path: P) -> Self { + Self { + db: Some(create_lmdb_database(&temp_path, LMDBConfig::default()).unwrap()), + path: temp_path.as_ref().to_path_buf(), + delete_on_drop: true, + } + } + + pub fn disable_delete_on_drop(&mut self) -> &mut Self { + self.delete_on_drop = false; + self + } } impl Default for TempDatabase { @@ -181,7 +196,7 @@ impl Drop for TempDatabase { fn drop(&mut self) { // force a drop on the LMDB db self.db = None; - if Path::new(&self.path).exists() { + if self.delete_on_drop && Path::new(&self.path).exists() { fs::remove_dir_all(&self.path).expect("Could not delete temporary file"); } } diff --git a/base_layer/core/src/validation/block_validators/test.rs b/base_layer/core/src/validation/block_validators/test.rs index ddb4162e6a..2679fd12cb 100644 --- a/base_layer/core/src/validation/block_validators/test.rs +++ b/base_layer/core/src/validation/block_validators/test.rs @@ -255,7 +255,6 @@ mod unique_id { } #[tokio::test] - #[ignore = "broken after validator node merge"] async fn it_allows_spending_to_new_utxo() { let (mut blockchain, validator) = setup(); diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index b3879920ba..d63b59b212 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -1204,7 +1204,6 @@ fn store_and_retrieve_blocks_from_contents() { } #[test] -#[ignore = "To be completed with pruned mode"] fn restore_metadata_and_pruning_horizon_update() { // Perform test let validators = Validators::new( @@ -1217,11 +1216,11 @@ fn restore_metadata_and_pruning_horizon_update() { let rules = ConsensusManagerBuilder::new(network).with_block(block0.clone()).build(); let mut config = BlockchainDatabaseConfig::default(); let block_hash: BlockHash; - let pruning_horizon1: u64 = 1000; - let pruning_horizon2: u64 = 900; + let temp_path = create_temporary_data_path(); { - let db = TempDatabase::new(); - config.pruning_horizon = pruning_horizon1; + let mut db = TempDatabase::from_path(&temp_path); + db.disable_delete_on_drop(); + config.pruning_horizon = 1000; let db = BlockchainDatabase::new( db, rules.clone(), @@ -1238,12 +1237,14 @@ fn restore_metadata_and_pruning_horizon_update() { let metadata = db.get_chain_metadata().unwrap(); assert_eq!(metadata.height_of_longest_chain(), 1); assert_eq!(metadata.best_block(), &block_hash); - assert_eq!(metadata.pruning_horizon(), pruning_horizon1); + assert_eq!(metadata.pruning_horizon(), 1000); } // Restore blockchain db with larger pruning horizon + { config.pruning_horizon = 2000; - let db = TempDatabase::new(); + let mut db = TempDatabase::from_path(&temp_path); + db.disable_delete_on_drop(); let db = BlockchainDatabase::new( db, rules.clone(), @@ -1262,7 +1263,7 @@ fn restore_metadata_and_pruning_horizon_update() { // Restore blockchain db with smaller pruning horizon update { config.pruning_horizon = 900; - let db = TempDatabase::new(); + let db = TempDatabase::from_path(&temp_path); let db = BlockchainDatabase::new( db, rules.clone(), @@ -1276,7 +1277,7 @@ fn restore_metadata_and_pruning_horizon_update() { let metadata = db.get_chain_metadata().unwrap(); assert_eq!(metadata.height_of_longest_chain(), 1); assert_eq!(metadata.best_block(), &block_hash); - assert_eq!(metadata.pruning_horizon(), pruning_horizon2); + assert_eq!(metadata.pruning_horizon(), 900); } } static EMISSION: [u64; 2] = [10, 10]; @@ -1449,7 +1450,7 @@ fn orphan_cleanup_on_block_add() { } #[test] -#[ignore = "To be completed with pruned mode"] +#[ignore = "take a look at orphan cleanup, seems not to be implemented anymore in add block"] fn horizon_height_orphan_cleanup() { let network = Network::LocalNet; let block0 = genesis_block::get_dibbler_genesis_block();