diff --git a/Cargo.lock b/Cargo.lock index e2c4905075..34de41fc34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4938,9 +4938,9 @@ checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "serde" -version = "1.0.181" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3e73c93c3240c0bda063c239298e633114c69a888c3e37ca8bb33f343e9890" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -4957,9 +4957,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.181" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be02f6cb0cd3a5ec20bbcfbcbd749f57daddb1a0882dc2e46a6c236c90b977ed" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", @@ -5664,7 +5664,6 @@ dependencies = [ "chrono", "config", "criterion 0.4.0", - "croaring", "decimal-rs", "derivative", "digest 0.10.7", @@ -5870,6 +5869,7 @@ dependencies = [ "log", "rand", "serde", + "serde_derive", "serde_json", "tari_common", "tari_crypto", diff --git a/applications/minotari_app_grpc/proto/block.proto b/applications/minotari_app_grpc/proto/block.proto index 9822767bc5..5ae2880d4c 100644 --- a/applications/minotari_app_grpc/proto/block.proto +++ b/applications/minotari_app_grpc/proto/block.proto @@ -102,7 +102,7 @@ message HistoricalBlock { // confirmation. uint64 confirmations = 1; // The underlying block - Block block = 3; + Block block = 2; } diff --git a/applications/minotari_app_grpc/src/conversions/block_header.rs b/applications/minotari_app_grpc/src/conversions/block_header.rs index 25beb80f40..dd93af950a 100644 --- a/applications/minotari_app_grpc/src/conversions/block_header.rs +++ b/applications/minotari_app_grpc/src/conversions/block_header.rs @@ -39,7 +39,7 @@ impl From for grpc::BlockHeader { timestamp: h.timestamp.as_u64(), input_mr: h.input_mr.to_vec(), output_mr: h.output_mr.to_vec(), - output_mmr_size: h.output_mmr_size, + output_mmr_size: h.output_smt_size, kernel_mr: h.kernel_mr.to_vec(), kernel_mmr_size: h.kernel_mmr_size, total_kernel_offset: h.total_kernel_offset.to_vec(), @@ -73,7 +73,7 @@ impl TryFrom for BlockHeader { timestamp: EpochTime::from(header.timestamp), input_mr: FixedHash::try_from(header.input_mr).map_err(|err| err.to_string())?, output_mr: FixedHash::try_from(header.output_mr).map_err(|err| err.to_string())?, - output_mmr_size: header.output_mmr_size, + output_smt_size: header.output_mmr_size, kernel_mr: FixedHash::try_from(header.kernel_mr).map_err(|err| err.to_string())?, kernel_mmr_size: header.kernel_mmr_size, total_kernel_offset, diff --git a/applications/minotari_app_grpc/src/conversions/historical_block.rs b/applications/minotari_app_grpc/src/conversions/historical_block.rs index f01078f22f..229610ff63 100644 --- a/applications/minotari_app_grpc/src/conversions/historical_block.rs +++ b/applications/minotari_app_grpc/src/conversions/historical_block.rs @@ -32,11 +32,7 @@ impl TryFrom for grpc::HistoricalBlock { fn try_from(hb: HistoricalBlock) -> Result { Ok(Self { confirmations: hb.confirmations(), - block: Some( - hb.try_into_block()? - .try_into() - .map_err(ChainStorageError::ConversionError)?, - ), + block: Some(hb.into_block().try_into().map_err(ChainStorageError::ConversionError)?), }) } } diff --git a/applications/minotari_node/src/grpc/base_node_grpc_server.rs b/applications/minotari_node/src/grpc/base_node_grpc_server.rs index e3f85a855c..70aeb7ad2d 100644 --- a/applications/minotari_node/src/grpc/base_node_grpc_server.rs +++ b/applications/minotari_node/src/grpc/base_node_grpc_server.rs @@ -437,7 +437,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { if is_reversed { data.into_iter() .map(|chain_block| { - let (block, acc_data, confirmations, _) = chain_block.dissolve(); + let (block, acc_data, confirmations) = chain_block.dissolve(); match consensus_rules .calculate_coinbase_and_fees(block.header.height, block.body.kernels()) { @@ -459,7 +459,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { } else { data.into_iter() .map(|chain_block| { - let (block, acc_data, confirmations, _) = chain_block.dissolve(); + let (block, acc_data, confirmations) = chain_block.dissolve(); match consensus_rules .calculate_coinbase_and_fees(block.header.height, block.body.kernels()) { @@ -1569,7 +1569,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { ) })?; - let (block, acc_data, confirmations, _) = block.dissolve(); + let (block, acc_data, confirmations) = block.dissolve(); let total_block_reward = self .consensus_rules .calculate_coinbase_and_fees(block.header.height, block.body.kernels()) diff --git a/applications/minotari_node/src/recovery.rs b/applications/minotari_node/src/recovery.rs index b1d6990622..e47feab973 100644 --- a/applications/minotari_node/src/recovery.rs +++ b/applications/minotari_node/src/recovery.rs @@ -163,7 +163,7 @@ async fn do_recovery( let block = source_database .fetch_block(counter, true) .map_err(|e| anyhow!("Could not get block from recovery db: {}", e))? - .try_into_block()?; + .into_block(); trace!(target: LOG_TARGET, "Adding block: {}", block); db.add_block(Arc::new(block)) .await diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index b9d212d1e8..c30a9da89a 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -13,7 +13,7 @@ edition = "2018" default = ["base_node"] transactions = [] mempool_proto = [] -base_node = ["croaring", "tari_mmr", "transactions", "mempool_proto", "base_node_proto", "monero", "randomx-rs"] +base_node = ["tari_mmr", "transactions", "mempool_proto", "base_node_proto", "monero", "randomx-rs"] base_node_proto = [] benches = ["base_node", "criterion"] @@ -45,7 +45,6 @@ bytes = "0.5" chacha20poly1305 = "0.10.1" chrono = { version = "0.4.19", default-features = false, features = ["serde"] } criterion = { version = "0.4.0", optional = true } -croaring = { version = "0.9", optional = true } decimal-rs = "0.1.42" derivative = "2.2.0" digest = "0.10" diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index b85aa393ea..7765c055fc 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -47,7 +47,7 @@ use crate::{ metrics, }, blocks::{Block, BlockBuilder, BlockHeader, BlockHeaderValidationError, ChainBlock, NewBlock, NewBlockTemplate}, - chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError, PrunedOutput}, + chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError}, consensus::{ConsensusConstants, ConsensusManager}, mempool::Mempool, proof_of_work::{ @@ -163,14 +163,12 @@ where B: BlockchainBackend + 'static }, NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => { let mut res = Vec::with_capacity(utxo_hashes.len()); - for (pruned_output, spent) in (self.blockchain_db.fetch_utxos(utxo_hashes).await?) + for (output, spent) in (self.blockchain_db.fetch_utxos(utxo_hashes).await?) .into_iter() .flatten() { - if let PrunedOutput::NotPruned { output } = pruned_output { - if !spent { - res.push(output); - } + if !spent { + res.push(output); } } Ok(NodeCommsResponse::TransactionOutputs(res)) @@ -367,7 +365,7 @@ where B: BlockchainBackend + 'static }, Some, ), - Some(block) => Some(block.try_into_block()?), + Some(block) => Some(block.into_block()), }; Ok(NodeCommsResponse::Block(Box::new(maybe_block))) @@ -417,12 +415,7 @@ where B: BlockchainBackend + 'static }, NodeCommsRequest::FetchUnspentUtxosInBlock { block_hash } => { let utxos = self.blockchain_db.fetch_outputs_in_block(block_hash).await?; - Ok(NodeCommsResponse::TransactionOutputs( - utxos - .into_iter() - .filter_map(|utxo| utxo.into_unpruned_output()) - .collect(), - )) + Ok(NodeCommsResponse::TransactionOutputs(utxos)) }, } } @@ -879,32 +872,22 @@ where B: BlockchainBackend + 'static details: format!("Output {} to be spent does not exist in db", input.output_hash()), })?; - match output_mined_info.output { - PrunedOutput::Pruned { .. } => { - return Err(CommsInterfaceError::InvalidFullBlock { - hash: block_hash, - details: format!("Output {} to be spent is pruned", input.output_hash()), - }); - }, - PrunedOutput::NotPruned { output } => { - let rp_hash = match output.proof { - Some(proof) => proof.hash(), - None => FixedHash::zero(), - }; - input.add_output_data( - output.version, - output.features, - output.commitment, - output.script, - output.sender_offset_public_key, - output.covenant, - output.encrypted_data, - output.metadata_signature, - rp_hash, - output.minimum_value_promise, - ); - }, - } + let rp_hash = match output_mined_info.output.proof { + Some(proof) => proof.hash(), + None => FixedHash::zero(), + }; + input.add_output_data( + output_mined_info.output.version, + output_mined_info.output.features, + output_mined_info.output.commitment, + output_mined_info.output.script, + output_mined_info.output.sender_offset_public_key, + output_mined_info.output.covenant, + output_mined_info.output.encrypted_data, + output_mined_info.output.metadata_signature, + rp_hash, + output_mined_info.output.minimum_value_promise, + ); } debug!( target: LOG_TARGET, diff --git a/base_layer/core/src/base_node/proto/rpc.proto b/base_layer/core/src/base_node/proto/rpc.proto index 80d284f79b..bbc6aea8b3 100644 --- a/base_layer/core/src/base_node/proto/rpc.proto +++ b/base_layer/core/src/base_node/proto/rpc.proto @@ -57,26 +57,12 @@ message SyncKernelsRequest { } message SyncUtxosRequest { - uint64 start = 1; + bytes start_header_hash = 1; bytes end_header_hash = 2; - bool include_pruned_utxos = 3; - bool include_deleted_bitmaps = 4; } message SyncUtxosResponse { - oneof utxo_or_deleted { - SyncUtxo utxo = 1; - bytes deleted_diff = 2; - } - uint64 mmr_index = 3; -} - -message SyncUtxo { - oneof utxo { - // The unspent transaction output - tari.types.TransactionOutput output = 1; - // If the UTXO is deleted/pruned, the hashes are returned - PrunedOutput pruned_output = 2; - } + tari.types.TransactionOutput output = 1; + bytes mined_header = 2; } message PrunedOutput { diff --git a/base_layer/core/src/base_node/proto/rpc.rs b/base_layer/core/src/base_node/proto/rpc.rs index 7b96c79ae3..6110f2890f 100644 --- a/base_layer/core/src/base_node/proto/rpc.rs +++ b/base_layer/core/src/base_node/proto/rpc.rs @@ -24,7 +24,7 @@ use std::convert::{TryFrom, TryInto}; use tari_utilities::ByteArray; -use crate::{blocks::Block, chain_storage::PrunedOutput, mempool::FeePerGramStat, proto::base_node as proto}; +use crate::{blocks::Block, mempool::FeePerGramStat, proto::base_node as proto}; impl TryFrom for proto::BlockBodyResponse { type Error = String; @@ -37,22 +37,22 @@ impl TryFrom for proto::BlockBodyResponse { } } -impl TryFrom for proto::SyncUtxo { - type Error = String; - - fn try_from(output: PrunedOutput) -> Result { - Ok(match output { - PrunedOutput::Pruned { output_hash } => proto::SyncUtxo { - utxo: Some(proto::sync_utxo::Utxo::PrunedOutput(proto::PrunedOutput { - hash: output_hash.to_vec(), - })), - }, - PrunedOutput::NotPruned { output } => proto::SyncUtxo { - utxo: Some(proto::sync_utxo::Utxo::Output(output.try_into()?)), - }, - }) - } -} +// impl TryFrom for proto::SyncUtxo { +// type Error = String; +// +// fn try_from(output: PrunedOutput) -> Result { +// Ok(match output { +// PrunedOutput::Pruned { output_hash } => proto::SyncUtxo { +// utxo: Some(proto::sync_utxo::Utxo::PrunedOutput(proto::PrunedOutput { +// hash: output_hash.to_vec(), +// })), +// }, +// PrunedOutput::NotPruned { output } => proto::SyncUtxo { +// utxo: Some(proto::sync_utxo::Utxo::Output(output.try_into()?)), +// }, +// }) +// } +// } impl From> for proto::GetMempoolFeePerGramStatsResponse { fn from(stats: Vec) -> Self { diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.proto b/base_layer/core/src/base_node/proto/wallet_rpc.proto index 26ae9b6a19..d70f3ee566 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.proto +++ b/base_layer/core/src/base_node/proto/wallet_rpc.proto @@ -69,18 +69,21 @@ message FetchUtxosResponse { } message QueryDeletedRequest { - repeated uint64 mmr_positions = 1; - bytes chain_must_include_header = 2; - bool include_deleted_block_data = 3; + repeated bytes hashes = 1; + google.protobuf.BytesValue chain_must_include_header = 2; } message QueryDeletedResponse { - repeated uint64 deleted_positions = 1; - repeated uint64 not_deleted_positions = 2; - bytes best_block = 3; - uint64 height_of_longest_chain = 4; - repeated bytes blocks_deleted_in = 5; - repeated uint64 heights_deleted_at = 6; + repeated QueryDeletedData data = 1; + bytes best_block = 2; + uint64 height_of_longest_chain = 3; +} + +message QueryDeletedData{ + uint64 mined_height = 1; + bytes block_mined_in = 2; + uint64 height_deleted_at = 3; + bytes block_deleted_in = 4; } message UtxoQueryRequest { @@ -95,11 +98,10 @@ message UtxoQueryResponses { message UtxoQueryResponse { tari.types.TransactionOutput output = 1; - uint64 mmr_position = 2; - uint64 mined_height = 3; - bytes mined_in_block = 4; - bytes output_hash = 5; - uint64 mined_timestamp = 6; + uint64 mined_height = 2; + bytes mined_in_block = 3; + bytes output_hash = 4; + uint64 mined_timestamp = 5; } message TipInfoResponse { diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.rs b/base_layer/core/src/base_node/proto/wallet_rpc.rs index c68df96ee2..6159694a11 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.rs +++ b/base_layer/core/src/base_node/proto/wallet_rpc.rs @@ -29,7 +29,7 @@ use serde::{Deserialize, Serialize}; use tari_common_types::types::{BlockHash, Signature}; use tari_utilities::ByteArray; -use crate::proto::{base_node as proto, types}; +use crate::proto::base_node as proto; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TxSubmissionResponse { @@ -270,31 +270,3 @@ impl TryFrom for TxQueryBatchResponse { }) } } - -impl proto::SyncUtxosResponse { - pub fn into_utxo(self) -> Option { - use proto::sync_utxos_response::UtxoOrDeleted::{DeletedDiff, Utxo}; - match self.utxo_or_deleted? { - Utxo(utxo) => Some(utxo), - DeletedDiff(_) => None, - } - } - - pub fn into_bitmap(self) -> Option> { - use proto::sync_utxos_response::UtxoOrDeleted::{DeletedDiff, Utxo}; - match self.utxo_or_deleted? { - Utxo(_) => None, - DeletedDiff(bitmap) => Some(bitmap), - } - } -} - -impl proto::sync_utxo::Utxo { - pub fn into_transaction_output(self) -> Option { - use proto::sync_utxo::Utxo::{Output, PrunedOutput}; - match self { - Output(output) => Some(output), - PrunedOutput(_) => None, - } - } -} diff --git a/base_layer/core/src/base_node/rpc/service.rs b/base_layer/core/src/base_node/rpc/service.rs index 47bdf23b14..3943df8e44 100644 --- a/base_layer/core/src/base_node/rpc/service.rs +++ b/base_layer/core/src/base_node/rpc/service.rs @@ -35,7 +35,7 @@ use crate::{ state_machine_service::states::StateInfo, StateMachineHandle, }, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, PrunedOutput}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, mempool::{service::MempoolHandle, TxStorageResponse}, proto, proto::{ @@ -44,6 +44,7 @@ use crate::{ FetchUtxosResponse, GetMempoolFeePerGramStatsRequest, GetMempoolFeePerGramStatsResponse, + QueryDeletedData, QueryDeletedRequest, QueryDeletedResponse, Signatures as SignaturesProto, @@ -66,6 +67,7 @@ use crate::{ }; const LOG_TARGET: &str = "c::base_node::rpc"; +const MAX_QUERY_DELETED_HASHES: usize = 1000; pub struct BaseNodeWalletRpcService { db: AsyncBlockchainDb, @@ -352,11 +354,9 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)? .into_iter() .flatten(); - for (pruned_output, spent) in utxos { - if let PrunedOutput::NotPruned { output } = pruned_output { - if !spent { - res.push(output); - } + for (output, spent) in utxos { + if !spent { + res.push(output); } } @@ -428,18 +428,14 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .flatten() .map(|utxo| { Ok(UtxoQueryResponse { - mmr_position: utxo.mmr_position.into(), mined_height: utxo.mined_height, mined_in_block: utxo.header_hash.to_vec(), output_hash: utxo.output.hash().to_vec(), - output: match utxo.output { - PrunedOutput::Pruned { .. } => None, - PrunedOutput::NotPruned { output } => Some(match output.try_into() { - Ok(output) => output, - Err(err) => { - return Err(err); - }, - }), + output: match utxo.output.try_into() { + Ok(output) => Some(output), + Err(err) => { + return Err(err); + }, }, mined_timestamp: utxo.mined_timestamp, }) @@ -457,6 +453,11 @@ impl BaseNodeWalletService for BaseNodeWalletRpc request: Request, ) -> Result, RpcStatus> { let message = request.into_message(); + if message.hashes.len() > MAX_QUERY_DELETED_HASHES { + return Err(RpcStatus::bad_request( + &"Received more hashes than we allow".to_string(), + )); + } if !message.chain_must_include_header.is_empty() { let hash = message @@ -475,43 +476,47 @@ impl BaseNodeWalletService for BaseNodeWalletRpc )); } } - - let deleted_bitmap = self + let hashes: Vec = message + .hashes + .into_iter() + .map(|hash| hash.try_into().map_err(|_| "Malformed pruned hash".to_string())) + .collect::>() + .map_err(|_| RpcStatus::bad_request(&"Malformed block hash received".to_string()))?; + let mut return_data = Vec::with_capacity(hashes.len()); + let utxos = self .db - .fetch_deleted_bitmap_at_tip() + .fetch_utxos_and_mined_info(hashes.clone()) .await .rpc_status_internal_error(LOG_TARGET)?; - - let mut deleted_positions = vec![]; - let mut not_deleted_positions = vec![]; - - for position in message.mmr_positions { - let position = - u32::try_from(position).map_err(|_| RpcStatus::bad_request("All MMR positions must fit into a u32"))?; - if deleted_bitmap.bitmap().contains(position) { - deleted_positions.push(position); - } else { - not_deleted_positions.push(position); - } + let txos = self + .db + .fetch_txos_and_mined_info(hashes) + .await + .rpc_status_internal_error(LOG_TARGET)?; + if utxos.len() != txos.len() { + return Err(RpcStatus::general("database returned different inputs vs outputs")); } - - let mut blocks_deleted_in = Vec::new(); - let mut heights_deleted_at = Vec::new(); - if message.include_deleted_block_data { - let headers = self - .db - .fetch_header_hash_by_deleted_mmr_positions(deleted_positions.clone()) - .await - .rpc_status_internal_error(LOG_TARGET)?; - - heights_deleted_at.reserve(headers.len()); - blocks_deleted_in.reserve(headers.len()); - for (height, hash) in headers.into_iter().flatten() { - heights_deleted_at.push(height); - blocks_deleted_in.push(hash.to_vec()); - } + for (utxo, txo) in utxos.iter().zip(txos.iter()) { + let mut data = match utxo { + None => QueryDeletedData { + mined_height: 0, + block_mined_in: Vec::new(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + Some(u) => QueryDeletedData { + mined_height: u.mined_height, + block_mined_in: u.header_hash.to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + }; + if let Some(input) = txo { + data.height_deleted_at = input.spent_height; + data.block_deleted_in = input.header_hash.to_vec(); + }; + return_data.push(data); } - let metadata = self .db .get_chain_metadata() @@ -521,10 +526,7 @@ impl BaseNodeWalletService for BaseNodeWalletRpc Ok(Response::new(QueryDeletedResponse { height_of_longest_chain: metadata.height_of_longest_chain(), best_block: metadata.best_block().to_vec(), - deleted_positions: deleted_positions.into_iter().map(u64::from).collect(), - not_deleted_positions: not_deleted_positions.into_iter().map(u64::from).collect(), - blocks_deleted_in, - heights_deleted_at, + data: return_data, })) } diff --git a/base_layer/core/src/base_node/rpc/sync_utxos_by_block_task.rs b/base_layer/core/src/base_node/rpc/sync_utxos_by_block_task.rs index 49a2e27be8..52c87af21a 100644 --- a/base_layer/core/src/base_node/rpc/sync_utxos_by_block_task.rs +++ b/base_layer/core/src/base_node/rpc/sync_utxos_by_block_task.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryInto, sync::Arc, time::Instant}; +use std::{convert::TryInto, time::Instant}; use log::*; use tari_comms::protocol::rpc::{RpcStatus, RpcStatusResultExt}; @@ -29,7 +29,7 @@ use tokio::{sync::mpsc, task}; use crate::{ blocks::BlockHeader, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, PrunedOutput}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, proto, proto::base_node::{SyncUtxosByBlockRequest, SyncUtxosByBlockResponse}, }; @@ -98,20 +98,6 @@ where B: BlockchainBackend + 'static start_header: BlockHeader, end_header: BlockHeader, ) -> Result<(), RpcStatus> { - let bitmap = self - .db - .fetch_complete_deleted_bitmap_at(end_header.hash()) - .await - .map_err(|err| { - error!(target: LOG_TARGET, "Failed to get deleted bitmap: {}", err); - RpcStatus::general(&format!( - "Could not get deleted bitmap at hash {}", - end_header.hash().to_hex() - )) - })? - .into_bitmap(); - let bitmap = Arc::new(bitmap); - debug!( target: LOG_TARGET, "Starting stream task with start_header: {} and end_header: {}", @@ -120,7 +106,6 @@ where B: BlockchainBackend + 'static ); let mut current_header = start_header; - loop { let timer = Instant::now(); let current_header_hash = current_header.hash(); @@ -137,19 +122,15 @@ where B: BlockchainBackend + 'static break; } - let (utxos, _deleted_diff) = self + let utxos = self .db - .fetch_utxos_in_block(current_header.hash(), Some(bitmap.clone())) + .fetch_utxos_in_block(current_header.hash(), None) .await .rpc_status_internal_error(LOG_TARGET)?; let utxos = utxos .into_iter() - .enumerate() // Don't include pruned UTXOs - .filter_map(|(_, utxo)| match utxo { - PrunedOutput::Pruned{output_hash: _} => None, - PrunedOutput::NotPruned{output} => Some(output.try_into()), - }).collect::, String>>().map_err(|err| RpcStatus::general(&err))?; + .filter_map(|(utxo, _spent)| Some(utxo.try_into())).collect::, String>>().map_err(|err| RpcStatus::general(&err))?; debug!( target: LOG_TARGET, @@ -211,7 +192,7 @@ where B: BlockchainBackend + 'static debug!( target: LOG_TARGET, "UTXO sync by block completed to UTXO {} (Header hash = {})", - current_header.output_mmr_size, + current_header.output_smt_size, current_header.hash().to_hex() ); diff --git a/base_layer/core/src/base_node/state_machine_service/states/listening.rs b/base_layer/core/src/base_node/state_machine_service/states/listening.rs index 1d260c3868..50aeaf8621 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/listening.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/listening.rs @@ -46,6 +46,7 @@ use crate::{ StateEvent::FatalError, StateInfo, SyncStatus, + SyncStatus::{Lagging, SyncNotPossible, UpToDate}, Waiting, }, BaseNodeStateMachine, @@ -135,6 +136,8 @@ impl Listening { } }, Ok(ChainMetadataEvent::PeerChainMetadataReceived(peer_metadata)) => { + // We already ban the peer based on some previous logic, but this message was already in the + // pipeline before the ban went into effect. match shared.peer_manager.is_peer_banned(peer_metadata.node_id()).await { Ok(true) => { warn!( @@ -205,6 +208,7 @@ impl Listening { if time_since_better_block .map(|t| t.elapsed() > shared.config.time_before_considered_lagging) .unwrap() + // unwrap is safe because time_since_better_block is set right above { sync_mode = SyncStatus::Lagging { local: local.clone(), @@ -324,7 +328,6 @@ fn determine_sync_mode( local: &ChainMetadata, network: &PeerChainMetadata, ) -> SyncStatus { - use SyncStatus::{Lagging, SyncNotPossible, UpToDate}; let network_tip_accum_difficulty = network.claimed_chain_metadata().accumulated_difficulty(); let local_tip_accum_difficulty = local.accumulated_difficulty(); if local_tip_accum_difficulty < network_tip_accum_difficulty { diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index d61f8f8220..af56083a28 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -361,7 +361,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { self.db .write_transaction() .delete_orphan(header_hash) - .insert_block_body(block.clone()) + .insert_tip_block_body(block.clone()) .set_best_block( block.height(), header_hash, diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 08672ec807..9a3bf2edcc 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -663,7 +663,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { if let Some(h) = existing_header { warn!( target: LOG_TARGET, - "Received header #{} `{}` that we already have. Ignoring", + "Received header #{} `{}` that we already have.", h.height, h.hash().to_hex() ); diff --git a/base_layer/core/src/base_node/sync/header_sync/validator.rs b/base_layer/core/src/base_node/sync/header_sync/validator.rs index d1140bb092..416c711011 100644 --- a/base_layer/core/src/base_node/sync/header_sync/validator.rs +++ b/base_layer/core/src/base_node/sync/header_sync/validator.rs @@ -269,7 +269,7 @@ mod test { let mut header = BlockHeader::from_previous(tip.header()); // Needed to have unique keys for the blockchain db mmr count indexes (MDB_KEY_EXIST error) header.kernel_mmr_size += 1; - header.output_mmr_size += 1; + header.output_smt_size += 1; let acc_data = BlockHeaderAccumulatedData { hash: header.hash(), ..Default::default() diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs index dc0211125e..4c07210020 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs @@ -29,12 +29,12 @@ use tari_comms::{ protocol::rpc::{RpcError, RpcStatus}, }; use tari_crypto::errors::RangeProofError; -use tari_mmr::error::MerkleMountainRangeError; +use tari_mmr::{error::MerkleMountainRangeError, sparse_merkle_tree::SMTError}; use thiserror::Error; use tokio::task; use crate::{ - chain_storage::{ChainStorageError, MmrTree}, + chain_storage::ChainStorageError, common::BanReason, transactions::transaction_components::TransactionError, validation::ValidationError, @@ -54,9 +54,11 @@ pub enum HorizonSyncError { RangeProofError(String), #[error("An invalid transaction has been encountered: {0}")] TransactionError(#[from] TransactionError), - #[error("MMR did not match for {mmr_tree} at height {at_height}. Expected {actual_hex} to equal {expected_hex}")] - InvalidMmrRoot { - mmr_tree: MmrTree, + #[error( + "Merkle root did not match for {mr_tree} at height {at_height}. Expected {actual_hex} to equal {expected_hex}" + )] + InvalidMrRoot { + mr_tree: String, at_height: u64, expected_hex: String, actual_hex: String, @@ -93,6 +95,8 @@ pub enum HorizonSyncError { NoMoreSyncPeers(String), #[error("Could not find peer info")] PeerNotFound, + #[error("Sparse Merkle Tree error: {0}")] + SMTError(#[from] SMTError), } impl From for HorizonSyncError { @@ -132,7 +136,8 @@ impl HorizonSyncError { err @ HorizonSyncError::IncorrectResponse(_) | err @ HorizonSyncError::FinalStateValidationFailed(_) | err @ HorizonSyncError::RangeProofError(_) | - err @ HorizonSyncError::InvalidMmrRoot { .. } | + err @ HorizonSyncError::InvalidMrRoot { .. } | + err @ HorizonSyncError::SMTError(_) | err @ HorizonSyncError::InvalidMmrPosition { .. } | err @ HorizonSyncError::ConversionError(_) | err @ HorizonSyncError::MerkleMountainRangeError(_) | diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs index bd2c79d72f..473af8da33 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs @@ -23,17 +23,17 @@ use std::{ cmp, convert::{TryFrom, TryInto}, - mem, sync::Arc, time::{Duration, Instant}, }; -use croaring::Bitmap; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::StreamExt; use log::*; -use tari_common_types::types::{Commitment, RangeProofService}; +use tari_common_types::types::{Commitment, FixedHash, RangeProofService}; use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId, protocol::rpc::RpcClient, PeerConnection}; -use tari_crypto::{commitment::HomomorphicCommitment, tari_utilities::hex::Hex}; +use tari_crypto::commitment::HomomorphicCommitment; +use tari_mmr::sparse_merkle_tree::{NodeKey, ValueHash}; +use tari_utilities::{hex::Hex, ByteArray}; use tokio::task; use super::error::HorizonSyncError; @@ -47,33 +47,18 @@ use crate::{ SyncPeer, }, blocks::{BlockHeader, ChainHeader, UpdateBlockAccumulatedData}, - chain_storage::{ - async_db::AsyncBlockchainDb, - BlockchainBackend, - ChainStorageError, - DbTransaction, - MmrTree, - PrunedOutput, - }, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, common::rolling_avg::RollingAverageTime, consensus::ConsensusManager, - proto::base_node::{ - sync_utxo as proto_sync_utxo, - sync_utxos_response::UtxoOrDeleted, - SyncKernelsRequest, - SyncUtxo, - SyncUtxosRequest, - SyncUtxosResponse, - }, + proto::base_node::{SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, transactions::transaction_components::{ transaction_output::batch_verify_range_proofs, TransactionKernel, TransactionOutput, }, validation::{helpers, FinalHorizonStateValidation}, - MutablePrunedOutputMmr, + OutputSmt, PrunedKernelMmr, - PrunedOutputMmr, }; const LOG_TARGET: &str = "c::bn::state_machine_service::states::horizon_state_sync"; @@ -89,7 +74,6 @@ pub struct HorizonStateSynchronization<'a, B> { prover: Arc, num_kernels: u64, num_outputs: u64, - full_bitmap: Option, hooks: Hooks, connectivity: ConnectivityRequester, final_state_validator: Arc>, @@ -121,7 +105,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { prover, num_kernels: 0, num_outputs: 0, - full_bitmap: None, hooks: Hooks::default(), final_state_validator, peer_ban_manager, @@ -310,8 +293,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { db.prune_to_height(new_prune_height).await?; } - self.full_bitmap = Some(db.fetch_deleted_bitmap_at_tip().await?.into_bitmap()); - Ok(()) } @@ -408,7 +389,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let block_data = db .fetch_block_accumulated_data(current_header.header().prev_hash) .await?; - let kernel_pruned_set = block_data.dissolve().0; + let kernel_pruned_set = block_data.dissolve(); let mut kernel_mmr = PrunedKernelMmr::new(kernel_pruned_set); for hash in kernel_hashes.drain(..) { @@ -417,8 +398,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mmr_root = kernel_mmr.get_merkle_root()?; if mmr_root.as_slice() != current_header.header().kernel_mr.as_slice() { - return Err(HorizonSyncError::InvalidMmrRoot { - mmr_tree: MmrTree::Kernel, + return Err(HorizonSyncError::InvalidMrRoot { + mr_tree: MmrTree::Kernel.to_string(), at_height: current_header.height(), expected_hex: current_header.header().kernel_mr.to_hex(), actual_hex: mmr_root.to_hex(), @@ -500,18 +481,13 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { info!(target: LOG_TARGET, "Starting output sync from peer {}", sync_peer); - let local_num_outputs = self.db().fetch_mmr_size(MmrTree::Utxo).await?; - let remote_num_outputs = to_header.output_mmr_size; + let remote_num_outputs = to_header.output_smt_size; self.num_outputs = remote_num_outputs; - if local_num_outputs >= remote_num_outputs { - debug!(target: LOG_TARGET, "Local output set already synchronized"); - return Ok(()); - } - + // todo we need to be able to pause and resume this let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { - current: local_num_outputs, + current: 0, total: self.num_outputs, sync_peer: sync_peer.clone(), }); @@ -519,15 +495,14 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { debug!( target: LOG_TARGET, - "Requesting outputs from {} to {} ({} remaining)", - local_num_outputs, + "Requesting outputs from {}", remote_num_outputs, - remote_num_outputs - local_num_outputs, ); + let db = self.db().clone(); - let start = local_num_outputs; let end = remote_num_outputs; let end_hash = to_header.hash(); + let start_hash = db.fetch_chain_header(0).await?; let latency = client.get_last_request_latency(); debug!( @@ -538,39 +513,16 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); let req = SyncUtxosRequest { - start, + start_header_hash: start_hash.hash().to_vec(), end_header_hash: end_hash.to_vec(), - include_deleted_bitmaps: true, - include_pruned_utxos: true, }; - let mut current_header = self.db().fetch_header_containing_utxo_mmr(start).await?; let mut output_stream = client.sync_utxos(req).await?; - debug!( - target: LOG_TARGET, - "Found header for utxos at mmr pos: {} - {} height: {}", - start, - current_header.header().output_mmr_size, - current_header.height() - ); - - let db = self.db().clone(); - let mut txn = db.write_transaction(); - let mut unpruned_outputs = vec![]; - let mut mmr_position = start; - let mut height_utxo_counter = 0u64; - let mut height_txo_counter = 0u64; - let mut timer = Instant::now(); - - let block_data = db - .fetch_block_accumulated_data(current_header.header().prev_hash) - .await?; - let (_, output_pruned_set, _) = block_data.dissolve(); - - let mut output_mmr = PrunedOutputMmr::new(output_pruned_set); - let mut constants = self.rules.consensus_constants(current_header.height()).clone(); + let mut utxo_counter = 0u64; + let timer = Instant::now(); + let mut output_smt = OutputSmt::new(); let mut last_sync_timer = Instant::now(); let mut avg_latency = RollingAverageTime::new(20); @@ -578,238 +530,84 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let latency = last_sync_timer.elapsed(); avg_latency.add_sample(latency); let res: SyncUtxosResponse = response?; + utxo_counter += 1; - if mmr_position > end { + if utxo_counter > end { return Err(HorizonSyncError::IncorrectResponse( "Peer sent too many outputs".to_string(), )); } - - if res.mmr_index != 0 && res.mmr_index != mmr_position { - return Err(HorizonSyncError::IncorrectResponse(format!( - "Expected MMR position of {} but got {}", - mmr_position, res.mmr_index, - ))); - } - - let txo = res - .utxo_or_deleted + let output = res + .output .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; - - match txo { - UtxoOrDeleted::Utxo(SyncUtxo { - utxo: Some(proto_sync_utxo::Utxo::Output(output)), - }) => { - trace!( - target: LOG_TARGET, - "UTXO {} received from sync peer for header #{}", - res.mmr_index, - current_header.height() - ); - height_utxo_counter += 1; - let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; - helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; - unpruned_outputs.push(output.clone()); - - output_mmr.push(output.hash().to_vec())?; - - txn.insert_output_via_horizon_sync( - output, - *current_header.hash(), - current_header.height(), - u32::try_from(mmr_position)?, - current_header.timestamp(), - ); - mmr_position += 1; - }, - UtxoOrDeleted::Utxo(SyncUtxo { - utxo: Some(proto_sync_utxo::Utxo::PrunedOutput(utxo)), - }) => { - trace!( + let output_header = FixedHash::try_from(res.mined_header) + .map_err(|_| HorizonSyncError::IncorrectResponse("Peer sent no mined header".into()))?; + let current_header = self + .db() + .fetch_header_by_block_hash(output_header) + .await? + .ok_or_else(|| { + HorizonSyncError::IncorrectResponse("Peer sent mined header we do not know of".into()) + })?; + + let constants = self.rules.consensus_constants(current_header.height).clone(); + let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; + trace!( target: LOG_TARGET, - "UTXO {} (pruned) received from sync peer for header #{}", - res.mmr_index, - current_header.height() - ); - height_txo_counter += 1; - output_mmr.push(utxo.hash.clone())?; - - txn.insert_pruned_output_via_horizon_sync( - utxo.hash.try_into()?, - *current_header.hash(), - current_header.height(), - u32::try_from(mmr_position)?, - current_header.timestamp(), - ); - mmr_position += 1; - }, - UtxoOrDeleted::DeletedDiff(diff_bitmap_buff) => { - if mmr_position != current_header.header().output_mmr_size { - return Err(HorizonSyncError::IncorrectResponse(format!( - "Peer unexpectedly sent a deleted bitmap. Expected at MMR index {} but it was sent at {}", - current_header.header().output_mmr_size, - mmr_position - ))); - } - - // Check that the difference bitmap isn't excessively large. Bitmap::deserialize panics if greater - // than isize::MAX, however isize::MAX is still an inordinate amount of data. An - // arbitrary 4 MiB limit is used. - const MAX_DIFF_BITMAP_BYTE_LEN: usize = 4 * 1024 * 1024; - if diff_bitmap_buff.len() > MAX_DIFF_BITMAP_BYTE_LEN { - return Err(HorizonSyncError::IncorrectResponse(format!( - "Received difference bitmap (size = {}) that exceeded the maximum size limit of {} from \ - peer {}", - diff_bitmap_buff.len(), - MAX_DIFF_BITMAP_BYTE_LEN, - sync_peer.node_id() - ))); - } - - let diff_bitmap = Bitmap::try_deserialize(&diff_bitmap_buff).ok_or_else(|| { - HorizonSyncError::IncorrectResponse(format!( - "Peer {} sent an invalid difference bitmap", - sync_peer.node_id() - )) - })?; - - // Merge the differences into the final bitmap so that we can commit to the entire spend state - // in the output MMR - let bitmap = self.full_bitmap_mut(); - bitmap.or_inplace(&diff_bitmap); - // let force optimize here as we need to ensure this runs as we compute the merkle root on the - // optimized bitmap. - bitmap.run_optimize(); - - let pruned_output_set = output_mmr.get_pruned_hash_set()?; - let total_output_mmr = MutablePrunedOutputMmr::new(pruned_output_set.clone(), bitmap.clone())?; - - let mmr_root = total_output_mmr.get_merkle_root()?; - if mmr_root.as_slice() != current_header.header().output_mr.as_slice() { - return Err(HorizonSyncError::InvalidMmrRoot { - mmr_tree: MmrTree::Utxo, - at_height: current_header.height(), - expected_hex: current_header.header().output_mr.to_hex(), - actual_hex: mmr_root.to_hex(), - }); - } - - self.validate_rangeproofs(mem::take(&mut unpruned_outputs)).await?; - - txn.update_deleted_bitmap(diff_bitmap.clone()); - - txn.update_block_accumulated_data_via_horizon_sync( - *current_header.hash(), - UpdateBlockAccumulatedData { - utxo_hash_set: Some(pruned_output_set), - deleted_diff: Some(diff_bitmap.into()), - ..Default::default() - }, - ); - txn.commit().await?; - - debug!( - target: LOG_TARGET, - "UTXO: {}/{}, Header #{}, added {} utxos, added {} txos in {:.2?}", - mmr_position, - end, - current_header.height(), - height_utxo_counter, - height_txo_counter, - timer.elapsed() - ); - height_txo_counter = 0; - height_utxo_counter = 0; - timer = Instant::now(); - - if mmr_position == end { - debug!( - target: LOG_TARGET, - "Sync complete at mmr position {}, height #{}", - mmr_position, - current_header.height() - ); - break; - } else { - current_header = db.fetch_chain_header(current_header.height() + 1).await?; - constants = self.rules.consensus_constants(current_header.height()).clone(); - debug!( - target: LOG_TARGET, - "Expecting to receive the next UTXO set {}-{} for header #{}", - mmr_position, - current_header.header().output_mmr_size, - current_header.height() - ); - } - }, - v => { - error!(target: LOG_TARGET, "Remote node returned an invalid response {:?}", v); - return Err(HorizonSyncError::IncorrectResponse( - "Invalid sync utxo returned".to_string(), - )); - }, - } - - sync_peer.set_latency(latency); - sync_peer.add_sample(last_sync_timer.elapsed()); - if mmr_position % 100 == 0 || mmr_position == self.num_outputs { + "UTXO {} received from sync peer", + output.hash(), + ); + helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; + + batch_verify_range_proofs(&self.prover, &vec![&output])?; + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; + output_smt.insert(smt_key, smt_node)?; + txn.insert_output_via_horizon_sync( + output, + current_header.hash(), + current_header.height, + current_header.timestamp.as_u64(), + ); + + // we have checked the range proof, and we have checked that the linked to header exists. + txn.commit().await?; + + if utxo_counter % 100 == 0 { let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { - current: mmr_position, + current: utxo_counter, total: self.num_outputs, sync_peer: sync_peer.clone(), }); self.hooks.call_on_progress_horizon_hooks(info); } - - self.check_latency(sync_peer.node_id(), &avg_latency)?; - + sync_peer.set_latency(latency); + sync_peer.add_sample(last_sync_timer.elapsed()); last_sync_timer = Instant::now(); } - - if !unpruned_outputs.is_empty() { + if utxo_counter != end { return Err(HorizonSyncError::IncorrectResponse( - "Sync node sent leftover unpruned outputs".to_string(), + "Peer did not send enough outputs".to_string(), )); } - - if mmr_position != end { - return Err(HorizonSyncError::IncorrectResponse( - "Sync node did not send all utxos requested".to_string(), - )); + debug!( + target: LOG_TARGET, + "finished syncing UTXOs: {} downloaded in {:.2?}", + end, + timer.elapsed() + ); + let root = FixedHash::try_from(output_smt.hash().as_slice())?; + if root != to_header.output_mr { + return Err(HorizonSyncError::InvalidMrRoot { + mr_tree: "UTXO SMT".to_string(), + at_height: to_header.height, + expected_hex: to_header.output_mr.to_hex(), + actual_hex: root.to_hex(), + }); } - Ok(()) - } + self.check_latency(sync_peer.node_id(), &avg_latency)?; - async fn validate_rangeproofs(&self, mut unpruned_outputs: Vec) -> Result<(), HorizonSyncError> { - let concurrency = self.config.validation_concurrency; - let mut chunk_size = unpruned_outputs.len() / concurrency; - if unpruned_outputs.len() % concurrency > 0 { - chunk_size += 1; - } - // Validate rangeproofs in parallel - let mut tasks = (0..concurrency) - .filter_map(|_| { - if unpruned_outputs.is_empty() { - None - } else { - let end = cmp::min(unpruned_outputs.len(), chunk_size); - Some(unpruned_outputs.drain(..end).collect::>()) - } - }) - .map(|chunk| { - let prover = self.prover.clone(); - task::spawn_blocking(move || -> Result<(), HorizonSyncError> { - let outputs = chunk.iter().collect::>(); - batch_verify_range_proofs(&prover, &outputs)?; - Ok(()) - }) - }) - .collect::>(); - - while let Some(result) = tasks.next().await { - result??; - } Ok(()) } @@ -858,19 +656,6 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } - fn take_final_bitmap(&mut self) -> Arc { - self.full_bitmap - .take() - .map(Arc::new) - .expect("take_full_bitmap called before initialize") - } - - fn full_bitmap_mut(&mut self) -> &mut Bitmap { - self.full_bitmap - .as_mut() - .expect("full_bitmap_mut called before initialize") - } - /// (UTXO sum, Kernel sum) async fn calculate_commitment_sums( &mut self, @@ -884,24 +669,20 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut prev_kernel_mmr = 0; let height = header.height(); - let bitmap = self.take_final_bitmap(); let db = self.db().inner().clone(); + let header_hash = header.hash().clone(); task::spawn_blocking(move || { - let mut txn = DbTransaction::new(); - let mut utxo_mmr_position = 0; - let mut prune_positions = vec![]; - for h in 0..=height { let curr_header = db.fetch_chain_header(h)?; trace!( target: LOG_TARGET, "Fetching utxos from db: height:{}, header.output_mmr:{}, prev_mmr:{}, end:{}", curr_header.height(), - curr_header.header().output_mmr_size, + curr_header.header().output_smt_size, prev_mmr, - curr_header.header().output_mmr_size - 1 + curr_header.header().output_smt_size - 1 ); - let (utxos, _) = db.fetch_utxos_in_block(*curr_header.hash(), None)?; + let utxos = db.fetch_utxos_in_block(*curr_header.hash(), Some(header_hash))?; debug!( target: LOG_TARGET, "{} output(s) loaded for height {}", @@ -918,31 +699,12 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); - let mut pruned_counter = 0; - for u in utxos { - match u { - PrunedOutput::NotPruned { output } => { - if bitmap.contains(utxo_mmr_position) { - debug!( - target: LOG_TARGET, - "Found output that needs pruning at height: {} position: {}", h, utxo_mmr_position - ); - prune_positions.push(utxo_mmr_position); - pruned_counter += 1; - } else { - utxo_sum = &output.commitment + &utxo_sum; - } - }, - _ => { - pruned_counter += 1; - }, + for (u, spent) in utxos { + if !spent { + utxo_sum = &u.commitment + &utxo_sum; } - utxo_mmr_position += 1; - } - if pruned_counter > 0 { - trace!(target: LOG_TARGET, "{} pruned output(s)", pruned_counter); } - prev_mmr = curr_header.header().output_mmr_size; + prev_mmr = curr_header.header().output_smt_size; let kernels = db.fetch_kernels_in_block(*curr_header.hash())?; trace!(target: LOG_TARGET, "Number of kernels returned: {}", kernels.len()); @@ -957,22 +719,13 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { if h % 1000 == 0 { debug!( target: LOG_TARGET, - "Final Validation: {:.2}% complete. Height: {}, mmr_position: {}, {} outputs to prune after \ - sync", + "Final Validation: {:.2}% complete. Height: {} sync", (h as f32 / height as f32) * 100.0, h, - utxo_mmr_position, - prune_positions.len() ); } } - if !prune_positions.is_empty() { - debug!(target: LOG_TARGET, "Pruning {} spent outputs", prune_positions.len()); - txn.prune_outputs_at_positions(prune_positions); - db.write(txn)?; - } - Ok((utxo_sum, kernel_sum, burned_sum)) }) .await? diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 4e46bf8ede..ee49e62b32 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -231,16 +231,13 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ break; }, Ok(blocks) => { - let blocks = blocks - .into_iter() - .map(|hb| hb.try_into_block().map_err(RpcStatus::log_internal_error(LOG_TARGET))) - .map(|block| match block { - Ok(b) => proto::base_node::BlockBodyResponse::try_from(b).map_err(|e| { - log::error!(target: LOG_TARGET, "Internal error: {}", e); - RpcStatus::general_default() - }), - Err(err) => Err(err), - }); + let blocks = blocks.into_iter().map(|hb| { + let block = hb.into_block(); + proto::base_node::BlockBodyResponse::try_from(block).map_err(|e| { + log::error!(target: LOG_TARGET, "Internal error: {}", e); + RpcStatus::general_default() + }) + }); // Ensure task stops if the peer prematurely stops their RPC session if utils::mpsc::send_all(&tx, blocks).await.is_err() { @@ -589,13 +586,10 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ let peer_node_id = request.context().peer_node_id(); debug!( target: LOG_TARGET, - "Received sync_utxos request from header {} to {} (start = {}, include_pruned_utxos = {}, \ - include_deleted_bitmaps = {})", + "Received sync_utxos-{} request from header {} to {}", peer_node_id, - req.start, + req.start_header_hash.to_hex(), req.end_header_hash.to_hex(), - req.include_pruned_utxos, - req.include_deleted_bitmaps ); let session_token = self.try_add_exclusive_session(peer_node_id.clone()).await?; diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index 7f7317aea5..04df85f29c 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -20,11 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, - time::Instant, -}; +use std::{convert::TryInto, sync::Arc, time::Instant}; use log::*; use tari_comms::{ @@ -39,8 +35,7 @@ use crate::{ base_node::metrics, blocks::BlockHeader, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, - proto, - proto::base_node::{SyncUtxo, SyncUtxosRequest, SyncUtxosResponse}, + proto::base_node::{SyncUtxosRequest, SyncUtxosResponse}, }; const LOG_TARGET: &str = "c::base_node::sync_rpc::sync_utxo_task"; @@ -63,19 +58,20 @@ where B: BlockchainBackend + 'static mut tx: mpsc::Sender>, ) -> Result<(), RpcStatus> { let msg = request.into_message(); + let start_hash = msg + .start_header_hash + .clone() + .try_into() + .rpc_status_bad_request("Invalid header hash")?; + let start_header = self .db - .fetch_header_containing_utxo_mmr(msg.start) + .fetch_header_by_block_hash(start_hash) .await - .map_err(|err| { - error!(target: LOG_TARGET, "{}", err); - if err.is_value_not_found() { - RpcStatus::not_found("start index not found") - } else { - RpcStatus::general("DB failure when fetching header containing start index") - } - })?; - let hash = msg + .rpc_status_internal_error(LOG_TARGET)? + .ok_or_else(|| RpcStatus::not_found("Start header hash is was not found"))?; + + let end_hash = msg .end_header_hash .clone() .try_into() @@ -83,54 +79,17 @@ where B: BlockchainBackend + 'static let end_header = self .db - .fetch_header_by_block_hash(hash) + .fetch_header_by_block_hash(end_hash) .await .rpc_status_internal_error(LOG_TARGET)? .ok_or_else(|| RpcStatus::not_found("End header hash is was not found"))?; - if start_header.height() > end_header.height { - return Err(RpcStatus::bad_request(&format!( - "start header height {} cannot be greater than the end header height ({})", - start_header.height(), - end_header.height - ))); - } - - let (skip_outputs, prev_utxo_mmr_size) = if start_header.height() == 0 { - (msg.start, 0) - } else { - let prev_header = self - .db - .fetch_header_by_block_hash(start_header.header().prev_hash) - .await - .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("Previous start header hash is was not found"))?; - - let skip = msg.start.checked_sub(prev_header.output_mmr_size) - // This is a data inconsistency because fetch_header_containing_utxo_mmr returned the header we are basing this on - .ok_or_else(|| RpcStatus::general(&format!("Data inconsistency: output mmr size of header at {} was more than the start index {}", prev_header.height, msg.start)))?; - (skip, prev_header.output_mmr_size) - }; - - let include_pruned_utxos = msg.include_pruned_utxos; - let include_deleted_bitmaps = msg.include_deleted_bitmaps; task::spawn(async move { debug!( target: LOG_TARGET, "Starting UTXO stream for peer '{}'", self.peer_node_id ); - if let Err(err) = self - .start_streaming( - &mut tx, - start_header.into_header(), - skip_outputs, - prev_utxo_mmr_size, - end_header, - include_pruned_utxos, - include_deleted_bitmaps, - ) - .await - { + if let Err(err) = self.start_streaming(&mut tx, start_header, end_header).await { debug!( target: LOG_TARGET, "UTXO stream errored for peer '{}': {}", self.peer_node_id, err @@ -152,36 +111,13 @@ where B: BlockchainBackend + 'static &self, tx: &mut mpsc::Sender>, mut current_header: BlockHeader, - mut skip_outputs: u64, - mut prev_utxo_mmr_size: u64, end_header: BlockHeader, - include_pruned_utxos: bool, - include_deleted_bitmaps: bool, ) -> Result<(), RpcStatus> { - // we need to fetch the spent bitmap for the height the client requested - let bitmap = self - .db - .fetch_complete_deleted_bitmap_at(end_header.hash()) - .await - .map_err(|err| { - error!(target: LOG_TARGET, "Failed to get deleted bitmap: {}", err); - RpcStatus::general(&format!( - "Could not get deleted bitmap at hash {}", - end_header.hash().to_hex() - )) - })? - .into_bitmap(); - let bitmap = Arc::new(bitmap); debug!( target: LOG_TARGET, - "Starting stream task with current_header: {}, skip_outputs: {}, prev_utxo_mmr_size: {}, end_header: {}, \ - include_pruned_utxos: {:?}, include_deleted_bitmaps: {:?}", + "Starting stream task with current_header: {}, end_header: {},", current_header.hash().to_hex(), - skip_outputs, - prev_utxo_mmr_size, end_header.hash().to_hex(), - include_pruned_utxos, - include_deleted_bitmaps ); loop { let timer = Instant::now(); @@ -194,9 +130,6 @@ where B: BlockchainBackend + 'static current_header_hash.to_hex() ); - let start = prev_utxo_mmr_size + skip_outputs; - let end = current_header.output_mmr_size; - if tx.is_closed() { debug!( target: LOG_TARGET, @@ -205,19 +138,15 @@ where B: BlockchainBackend + 'static break; } - let (utxos, deleted_diff) = self + let utxos = self .db - .fetch_utxos_in_block(current_header.hash(), Some(bitmap.clone())) + .fetch_utxos_in_block(current_header.hash(), Some(end_header.hash())) .await .rpc_status_internal_error(LOG_TARGET)?; debug!( target: LOG_TARGET, - "Streaming UTXO(s) {}-{} ({}) for block #{}. Deleted diff len = {}", - start, - end, - utxos.len(), + "Streaming UTXO(s) for block #{}.", current_header.height, - deleted_diff.cardinality(), ); if tx.is_closed() { debug!( @@ -227,56 +156,38 @@ where B: BlockchainBackend + 'static break; } - let skip = usize::try_from(skip_outputs) - .map_err(|_| RpcStatus::bad_request("skip_outputs exceeds a 32-bit unsigned integer"))?; - let utxos = utxos .into_iter() - .skip(skip) - // Only enumerate after skip, because `start` already has the offset in it so `i` can begin from 0 - .enumerate() - .filter_map(|(i, utxo)| { - // Only include pruned UTXOs if include_pruned_utxos is true - // We use filter_map because we still want the pruned utxos to count towards the index - if include_pruned_utxos || !utxo.is_pruned() { - match SyncUtxo::try_from(utxo) { - Ok(utxo) => Some(Ok(SyncUtxosResponse {utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::Utxo(utxo)),mmr_index: start + i as u64})), + .filter_map(|(utxo, spent)| { + // We only send unspent utxos + if !spent { + match utxo.try_into() { + Ok(tx_ouput) => Some(Ok(SyncUtxosResponse { + output: Some(tx_ouput), + mined_header: current_header.hash().to_vec(), + })), Err(err) => Some(Err(err)), - } + } } else { None } - }).collect::,String>>().map_err(|err| RpcStatus::bad_request(&err))?.into_iter().map(Ok); + }) + .collect::, String>>() + .map_err(|err| RpcStatus::bad_request(&err))? + .into_iter() + .map(Ok); // Ensure task stops if the peer prematurely stops their RPC session if utils::mpsc::send_all(tx, utxos).await.is_err() { break; } - // We only want to skip the first block UTXOs - skip_outputs = 0; - - if include_deleted_bitmaps { - let bitmaps = SyncUtxosResponse { - utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::DeletedDiff( - deleted_diff.serialize(), - )), - mmr_index: 0, - }; - - if tx.send(Ok(bitmaps)).await.is_err() { - break; - } - } debug!( target: LOG_TARGET, - "Streamed utxos {} to {} in {:.2?} (including stream backpressure)", - start, - end, + "Streamed utxos in {:.2?} (including stream backpressure)", timer.elapsed() ); - prev_utxo_mmr_size = current_header.output_mmr_size; if current_header.height + 1 > end_header.height { break; } @@ -296,8 +207,7 @@ where B: BlockchainBackend + 'static debug!( target: LOG_TARGET, - "UTXO sync completed to UTXO {} (Header hash = {})", - current_header.output_mmr_size, + "UTXO sync completed to Header hash = {}", current_header.hash().to_hex() ); diff --git a/base_layer/core/src/base_node/sync/rpc/tests.rs b/base_layer/core/src/base_node/sync/rpc/tests.rs index a791eb9e6c..46482ae2f9 100644 --- a/base_layer/core/src/base_node/sync/rpc/tests.rs +++ b/base_layer/core/src/base_node/sync/rpc/tests.rs @@ -121,12 +121,11 @@ mod sync_utxos { #[tokio::test] async fn it_returns_not_found_if_unknown_hash() { - let (service, _, rpc_request_mock, _tmp) = setup(); + let (service, db, rpc_request_mock, _tmp) = setup(); + let gen_block_hash = db.fetch_header(0).unwrap().unwrap().hash(); let msg = SyncUtxosRequest { - start: 0, + start_header_hash: gen_block_hash.to_vec(), end_header_hash: vec![0; 32], - include_pruned_utxos: true, - include_deleted_bitmaps: false, }; let req = rpc_request_mock.request_with_context(Default::default(), msg); let err = service.sync_utxos(req).await.unwrap_err(); @@ -136,41 +135,15 @@ mod sync_utxos { #[tokio::test] async fn it_returns_not_found_if_index_too_large() { let (service, db, rpc_request_mock, _tmp) = setup(); + let gen_block_hash = db.fetch_header(0).unwrap().unwrap().hash(); let (_, chain) = create_main_chain(&db, block_specs!(["A->GB"])).await; let gb = chain.get("GB").unwrap(); let msg = SyncUtxosRequest { - start: 100000000, + start_header_hash: gen_block_hash.to_vec(), end_header_hash: gb.hash().to_vec(), - include_pruned_utxos: true, - include_deleted_bitmaps: false, }; let req = rpc_request_mock.request_with_context(Default::default(), msg); let err = service.sync_utxos(req).await.unwrap_err(); unpack_enum!(RpcStatusCode::NotFound = err.as_status_code()); } - - #[tokio::test] - async fn it_sends_an_offset_response() { - let (service, db, rpc_request_mock, _tmp) = setup(); - - let (_, chain) = create_main_chain(&db, block_specs!(["A->GB"], ["B->A"])).await; - - let block = chain.get("B").unwrap(); - let total_outputs = block.block().header.output_mmr_size; - let start = total_outputs - 2; - let msg = SyncUtxosRequest { - start, - end_header_hash: block.hash().to_vec(), - include_pruned_utxos: true, - include_deleted_bitmaps: false, - }; - let req = rpc_request_mock.request_with_context(Default::default(), msg); - let mut streaming = service.sync_utxos(req).await.unwrap().into_inner(); - let utxo_indexes = convert_mpsc_to_stream(&mut streaming) - .map(|utxo| utxo.unwrap().mmr_index) - .collect::>() - .await; - - assert!(utxo_indexes.iter().all(|index| (start..=start + 2).contains(index))); - } } diff --git a/base_layer/core/src/blocks/accumulated_data.rs b/base_layer/core/src/blocks/accumulated_data.rs index d96d6b647a..a151f28c9f 100644 --- a/base_layer/core/src/blocks/accumulated_data.rs +++ b/base_layer/core/src/blocks/accumulated_data.rs @@ -25,18 +25,9 @@ use std::{ sync::Arc, }; -use croaring::Bitmap; use log::*; use num_format::{Locale, ToFormattedString}; -use serde::{ - de, - de::{MapAccess, SeqAccess, Visitor}, - ser::SerializeStruct, - Deserialize, - Deserializer, - Serialize, - Serializer, -}; +use serde::{Deserialize, Serialize}; use tari_common_types::types::{Commitment, HashOutput, PrivateKey}; use tari_mmr::{pruned_hashset::PrunedHashSet, ArrayLike}; use tari_utilities::hex::Hex; @@ -52,32 +43,19 @@ const LOG_TARGET: &str = "c::bn::acc_data"; #[derive(Debug, Serialize, Deserialize)] pub struct BlockAccumulatedData { pub(crate) kernels: PrunedHashSet, - pub(crate) outputs: PrunedHashSet, - pub(crate) deleted: DeletedBitmap, pub(crate) kernel_sum: Commitment, } impl BlockAccumulatedData { - pub fn new(kernels: PrunedHashSet, outputs: PrunedHashSet, deleted: Bitmap, total_kernel_sum: Commitment) -> Self { + pub fn new(kernels: PrunedHashSet, total_kernel_sum: Commitment) -> Self { Self { kernels, - outputs, - deleted: DeletedBitmap { deleted }, kernel_sum: total_kernel_sum, } } - pub fn deleted(&self) -> &Bitmap { - &self.deleted.deleted - } - - pub fn set_deleted(&mut self, deleted: DeletedBitmap) -> &mut Self { - self.deleted = deleted; - self - } - - pub fn dissolve(self) -> (PrunedHashSet, PrunedHashSet, Bitmap) { - (self.kernels, self.outputs, self.deleted.deleted) + pub fn dissolve(self) -> PrunedHashSet { + self.kernels } pub fn kernel_sum(&self) -> &Commitment { @@ -89,10 +67,6 @@ impl Default for BlockAccumulatedData { fn default() -> Self { Self { kernels: Default::default(), - outputs: Default::default(), - deleted: DeletedBitmap { - deleted: Bitmap::create(), - }, kernel_sum: Default::default(), } } @@ -100,141 +74,16 @@ impl Default for BlockAccumulatedData { impl Display for BlockAccumulatedData { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!( - f, - "{} hashes in output MMR, {} spends this block, {} hashes in kernel MMR,", - self.outputs.len().unwrap_or(0), - self.deleted.deleted.cardinality(), - self.kernels.len().unwrap_or(0) - ) + write!(f, "{} hashes in kernel MMR,", self.kernels.len().unwrap_or(0)) } } #[derive(Debug, Clone, Default)] pub struct UpdateBlockAccumulatedData { pub kernel_hash_set: Option, - pub utxo_hash_set: Option, - pub deleted_diff: Option, pub kernel_sum: Option, } -/// Wrapper struct to serialize and deserialize Bitmap -#[derive(Debug, Clone)] -pub struct DeletedBitmap { - deleted: Bitmap, -} - -impl DeletedBitmap { - pub fn into_bitmap(self) -> Bitmap { - self.deleted - } - - pub fn bitmap(&self) -> &Bitmap { - &self.deleted - } - - pub(crate) fn bitmap_mut(&mut self) -> &mut Bitmap { - &mut self.deleted - } -} - -impl From for DeletedBitmap { - fn from(deleted: Bitmap) -> Self { - Self { deleted } - } -} - -impl Serialize for DeletedBitmap { - fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> - where S: Serializer { - let mut s = serializer.serialize_struct("DeletedBitmap", 1)?; - s.serialize_field("deleted", &self.deleted.serialize())?; - s.end() - } -} - -impl<'de> Deserialize<'de> for DeletedBitmap { - fn deserialize(deserializer: D) -> Result>::Error> - where D: Deserializer<'de> { - const FIELDS: &[&str] = &["deleted"]; - - deserializer.deserialize_struct("DeletedBitmap", FIELDS, DeletedBitmapVisitor) - } -} - -struct DeletedBitmapVisitor; - -impl<'de> Visitor<'de> for DeletedBitmapVisitor { - type Value = DeletedBitmap; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("`deleted`") - } - - fn visit_seq(self, mut seq: V) -> Result - where V: SeqAccess<'de> { - let deleted: Vec = seq.next_element()?.ok_or_else(|| de::Error::invalid_length(2, &self))?; - Ok(DeletedBitmap { - deleted: Bitmap::deserialize(&deleted), - }) - } - - fn visit_map(self, mut map: V) -> Result - where V: MapAccess<'de> { - #[derive(Deserialize)] - #[serde(field_identifier, rename_all = "lowercase")] - enum Field { - Deleted, - } - let mut deleted = None; - while let Some(key) = map.next_key()? { - match key { - Field::Deleted => { - if deleted.is_some() { - return Err(de::Error::duplicate_field("deleted")); - } - deleted = Some(map.next_value()?); - }, - } - } - let deleted: Vec = deleted.ok_or_else(|| de::Error::missing_field("deleted"))?; - - Ok(DeletedBitmap { - deleted: Bitmap::deserialize(&deleted), - }) - } -} - -/// Wrapper struct to get a completed bitmap with the height it was created at -#[derive(Debug, Clone)] -pub struct CompleteDeletedBitmap { - deleted: Bitmap, - height: u64, - hash: HashOutput, -} - -impl CompleteDeletedBitmap { - pub fn new(deleted: Bitmap, height: u64, hash: HashOutput) -> CompleteDeletedBitmap { - CompleteDeletedBitmap { deleted, height, hash } - } - - pub fn into_bitmap(self) -> Bitmap { - self.deleted - } - - pub fn bitmap(&self) -> &Bitmap { - &self.deleted - } - - pub fn dissolve(self) -> (Bitmap, u64, HashOutput) { - (self.deleted, self.height, self.hash) - } - - pub fn into_bytes(self) -> Vec { - self.deleted.serialize() - } -} - pub struct BlockHeaderAccumulatedDataBuilder<'a> { previous_accum: &'a BlockHeaderAccumulatedData, hash: Option, diff --git a/base_layer/core/src/blocks/block.rs b/base_layer/core/src/blocks/block.rs index a33f751b72..4f7463d817 100644 --- a/base_layer/core/src/blocks/block.rs +++ b/base_layer/core/src/blocks/block.rs @@ -216,7 +216,7 @@ impl BlockBuilder { pub fn add_transaction(mut self, tx: Transaction) -> Self { let (inputs, outputs, kernels) = tx.body.dissolve(); self = self.add_inputs(inputs); - self.header.output_mmr_size += outputs.len() as u64; + self.header.output_smt_size += outputs.len() as u64; self = self.add_outputs(outputs); self.header.kernel_mmr_size += kernels.len() as u64; self = self.add_kernels(kernels); diff --git a/base_layer/core/src/blocks/block_header.rs b/base_layer/core/src/blocks/block_header.rs index eda64c7728..182878a1f7 100644 --- a/base_layer/core/src/blocks/block_header.rs +++ b/base_layer/core/src/blocks/block_header.rs @@ -100,7 +100,7 @@ pub struct BlockHeader { /// This is calculated as Hash (txo MMR root || roaring bitmap hash of UTXO indices) pub output_mr: FixedHash, /// The size (number of leaves) of the output and range proof MMRs at the time of this header - pub output_mmr_size: u64, + pub output_smt_size: u64, /// This is the MMR root of the kernels pub kernel_mr: FixedHash, /// The number of MMR leaves in the kernel MMR @@ -126,7 +126,7 @@ impl BlockHeader { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -158,7 +158,7 @@ impl BlockHeader { prev_hash, timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: prev.output_mmr_size, + output_smt_size: prev.output_smt_size, kernel_mr: FixedHash::zero(), kernel_mmr_size: prev.kernel_mmr_size, input_mr: FixedHash::zero(), @@ -223,7 +223,7 @@ impl BlockHeader { .chain(&self.timestamp) .chain(&self.input_mr) .chain(&self.output_mr) - .chain(&self.output_mmr_size) + .chain(&self.output_smt_size) .chain(&self.kernel_mr) .chain(&self.kernel_mmr_size) .chain(&self.total_kernel_offset) @@ -266,7 +266,7 @@ impl From for BlockHeader { prev_hash: header_template.prev_hash, timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -302,7 +302,7 @@ impl Display for BlockHeader { "Merkle roots:\nInputs: {},\nOutputs: {} ({})\n\nKernels: {} ({})", self.input_mr.to_hex(), self.output_mr.to_hex(), - self.output_mmr_size, + self.output_smt_size, self.kernel_mr.to_hex(), self.kernel_mmr_size )?; diff --git a/base_layer/core/src/blocks/genesis_block.rs b/base_layer/core/src/blocks/genesis_block.rs index 95d9e516f5..f0d0c89d78 100644 --- a/base_layer/core/src/blocks/genesis_block.rs +++ b/base_layer/core/src/blocks/genesis_block.rs @@ -26,12 +26,14 @@ use chrono::{DateTime, FixedOffset}; use tari_common::configuration::Network; use tari_common_types::types::{FixedHash, PrivateKey}; use tari_crypto::tari_utilities::hex::*; +use tari_mmr::sparse_merkle_tree::{NodeKey, ValueHash}; use tari_utilities::ByteArray; use crate::{ blocks::{block::Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock}, proof_of_work::{Difficulty, PowAlgorithm, ProofOfWork}, transactions::{aggregated_body::AggregateBody, transaction_components::TransactionOutput}, + OutputSmt, }; // This can be adjusted as required, but must be limited @@ -64,7 +66,7 @@ fn add_faucet_utxos_to_genesis_block(file: &str, block: &mut Block) { } counter += 1; } - block.header.output_mmr_size += utxos.len() as u64; + block.header.output_smt_size += utxos.len() as u64; block.body.add_outputs(utxos); block.body.sort(); } @@ -75,9 +77,7 @@ fn print_mr_values(block: &mut Block, print: bool) { } use std::convert::TryFrom; - use croaring::Bitmap; - - use crate::{chain_storage::calculate_validator_node_mr, KernelMmr, MutableOutputMmr}; + use crate::{chain_storage::calculate_validator_node_mr, KernelMmr}; let mut kernel_mmr = KernelMmr::new(Vec::new()); for k in block.body.kernels() { @@ -85,15 +85,17 @@ fn print_mr_values(block: &mut Block, print: bool) { kernel_mmr.push(k.hash().to_vec()).unwrap(); } - let mut output_mmr = MutableOutputMmr::new(Vec::new(), Bitmap::create()).unwrap(); + let mut output_smt = OutputSmt::new(); for o in block.body.outputs() { - output_mmr.push(o.hash().to_vec()).unwrap(); + let smt_key = NodeKey::try_from(o.commitment.as_bytes()).unwrap(); + let smt_node = ValueHash::try_from(o.smt_hash(block.header.height).as_slice()).unwrap(); + output_smt.insert(smt_key, smt_node).unwrap(); } let vn_mmr = calculate_validator_node_mr(&[]); block.header.kernel_mr = FixedHash::try_from(kernel_mmr.get_merkle_root().unwrap()).unwrap(); - block.header.output_mr = FixedHash::try_from(output_mmr.get_merkle_root().unwrap()).unwrap(); + block.header.output_mr = FixedHash::try_from(output_smt.hash().as_slice()).unwrap(); block.header.validator_node_mr = FixedHash::try_from(vn_mmr).unwrap(); println!(); println!("kernel mr: {}", block.header.kernel_mr.to_hex()); @@ -304,7 +306,7 @@ pub fn get_esmeralda_genesis_block() -> ChainBlock { fn get_esmeralda_genesis_block_raw() -> Block { // Set genesis timestamp - let genesis_timestamp = DateTime::parse_from_rfc2822("31 Aug 2023 08:01:00 +0200").expect("parse may not fail"); + let genesis_timestamp = DateTime::parse_from_rfc2822("09 Oct 2023 08:01:00 +0200").expect("parse may not fail"); // Let us add a "not before" proof to the genesis block let not_before_proof = b"as I sip my drink, thoughts of esmeralda consume my mind, like a refreshing nourishing draught \ @@ -337,7 +339,7 @@ fn get_raw_block(genesis_timestamp: &DateTime, not_before_proof: &[ prev_hash: FixedHash::zero(), timestamp: timestamp.into(), output_mr: FixedHash::from_hex("daab077d6dadb830bf506cc55c82abc6c3563bec6ff1d5699806f8b13059b4c3").unwrap(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::from_hex("c14803066909d6d22abf0d2d2782e8936afc3f713f2af3a4ef5c42e8400c1303").unwrap(), kernel_mmr_size: 0, validator_node_mr: FixedHash::from_hex("277da65c40b2cf99db86baedb903a3f0a38540f3a94d40c826eecac7e27d5dfc") @@ -363,7 +365,8 @@ fn get_raw_block(genesis_timestamp: &DateTime, not_before_proof: &[ #[cfg(test)] mod test { - use croaring::Bitmap; + use std::convert::TryFrom; + use tari_common_types::{epoch::VnEpoch, types::Commitment}; use tari_utilities::ByteArray; @@ -378,7 +381,6 @@ mod test { }, validation::{ChainBalanceValidator, FinalHorizonStateValidation}, KernelMmr, - MutableOutputMmr, }; #[test] @@ -430,7 +432,7 @@ mod test { ); assert_eq!( block.block().body.outputs().len() as u64, - block.header().output_mmr_size + block.header().output_smt_size ); for kernel in block.block().body.kernels() { @@ -449,12 +451,14 @@ mod test { for k in block.block().body.kernels() { kernel_mmr.push(k.hash().to_vec()).unwrap(); } + let mut output_smt = OutputSmt::new(); - let mut output_mmr = MutableOutputMmr::new(Vec::new(), Bitmap::create()).unwrap(); let mut vn_nodes = Vec::new(); for o in block.block().body.outputs() { + let smt_key = NodeKey::try_from(o.commitment.as_bytes()).unwrap(); + let smt_node = ValueHash::try_from(o.smt_hash(block.header().height).as_slice()).unwrap(); + output_smt.insert(smt_key, smt_node).unwrap(); o.verify_metadata_signature().unwrap(); - output_mmr.push(o.hash().to_vec()).unwrap(); if matches!(o.features.output_type, OutputType::ValidatorNodeRegistration) { let reg = o .features @@ -470,7 +474,10 @@ mod test { } assert_eq!(kernel_mmr.get_merkle_root().unwrap(), block.header().kernel_mr,); - assert_eq!(output_mmr.get_merkle_root().unwrap(), block.header().output_mr,); + assert_eq!( + FixedHash::try_from(output_smt.hash().as_slice()).unwrap(), + block.header().output_mr, + ); assert_eq!(calculate_validator_node_mr(&vn_nodes), block.header().validator_node_mr,); // Check that the faucet UTXOs balance (the faucet_value consensus constant is set correctly and faucet kernel diff --git a/base_layer/core/src/blocks/historical_block.rs b/base_layer/core/src/blocks/historical_block.rs index 18847da486..65524a3ec7 100644 --- a/base_layer/core/src/blocks/historical_block.rs +++ b/base_layer/core/src/blocks/historical_block.rs @@ -24,7 +24,6 @@ use std::{fmt, fmt::Display, sync::Arc}; use serde::{Deserialize, Serialize}; use tari_common_types::types::HashOutput; -use tari_utilities::hex::Hex; use crate::blocks::{error::BlockError, Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock}; @@ -39,24 +38,14 @@ pub struct HistoricalBlock { block: Block, /// Accumulated data in the block header accumulated_data: BlockHeaderAccumulatedData, - pruned_outputs: Vec, - pruned_input_count: u64, } impl HistoricalBlock { - pub fn new( - block: Block, - confirmations: u64, - accumulated_data: BlockHeaderAccumulatedData, - pruned_outputs: Vec, - pruned_input_count: u64, - ) -> Self { + pub fn new(block: Block, confirmations: u64, accumulated_data: BlockHeaderAccumulatedData) -> Self { HistoricalBlock { confirmations, block, accumulated_data, - pruned_outputs, - pruned_input_count, } } @@ -85,23 +74,7 @@ impl HistoricalBlock { &self.accumulated_data.hash } - pub fn contains_pruned_txos(&self) -> bool { - !self.pruned_outputs.is_empty() || self.pruned_input_count > 0 - } - - pub fn try_into_block(self) -> Result { - if self.contains_pruned_txos() { - Err(BlockError::HistoricalBlockContainsPrunedTxos) - } else { - Ok(self.block) - } - } - pub fn try_into_chain_block(self) -> Result { - if self.contains_pruned_txos() { - return Err(BlockError::HistoricalBlockContainsPrunedTxos); - } - let chain_block = ChainBlock::try_construct(Arc::new(self.block), self.accumulated_data).ok_or_else(|| { BlockError::ChainBlockInvariantError( "Unable to construct ChainBlock because of a hash mismatch".to_string(), @@ -111,30 +84,14 @@ impl HistoricalBlock { Ok(chain_block) } - pub fn pruned_outputs(&self) -> &[HashOutput] { - self.pruned_outputs.as_slice() - } - - pub fn dissolve(self) -> (Block, BlockHeaderAccumulatedData, u64, u64) { - ( - self.block, - self.accumulated_data, - self.confirmations, - self.pruned_input_count, - ) + pub fn dissolve(self) -> (Block, BlockHeaderAccumulatedData, u64) { + (self.block, self.accumulated_data, self.confirmations) } } impl Display for HistoricalBlock { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "{}", self.block())?; - - if !self.pruned_outputs.is_empty() { - writeln!(f, "Pruned outputs: ")?; - for output in &self.pruned_outputs { - writeln!(f, "Output hash: {}", output.to_hex())?; - } - } Ok(()) } } diff --git a/base_layer/core/src/blocks/mod.rs b/base_layer/core/src/blocks/mod.rs index 6df31971e8..e3f06abb9f 100644 --- a/base_layer/core/src/blocks/mod.rs +++ b/base_layer/core/src/blocks/mod.rs @@ -29,8 +29,6 @@ pub use accumulated_data::{ BlockHeaderAccumulatedData, ChainBlock, ChainHeader, - CompleteDeletedBitmap, - DeletedBitmap, UpdateBlockAccumulatedData, }; use tari_crypto::hash_domain; diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index f78e6e8f93..9ce241901a 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -21,12 +21,11 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{mem, ops::RangeBounds, sync::Arc, time::Instant}; -use croaring::Bitmap; use log::*; use rand::{rngs::OsRng, RngCore}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, + types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; use tari_utilities::epoch_time::EpochTime; @@ -39,15 +38,13 @@ use crate::{ BlockHeaderAccumulatedData, ChainBlock, ChainHeader, - CompleteDeletedBitmap, - DeletedBitmap, HistoricalBlock, NewBlockTemplate, UpdateBlockAccumulatedData, }, chain_storage::{ blockchain_database::MmrRoots, - utxo_mined_info::UtxoMinedInfo, + utxo_mined_info::{TxoMinedInfo, UtxoMinedInfo}, BlockAddResult, BlockchainBackend, BlockchainDatabase, @@ -57,7 +54,6 @@ use crate::{ DbTransaction, HorizonData, MmrTree, - PrunedOutput, TargetDifficulties, }, common::rolling_vec::RollingVec, @@ -155,15 +151,17 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_horizon_data() -> HorizonData, "fetch_horizon_data"); //---------------------------------- TXO --------------------------------------------// - make_async_fn!(fetch_utxo(hash: HashOutput) -> Option, "fetch_utxo"); + make_async_fn!(fetch_utxo(hash: HashOutput) -> Option, "fetch_utxo"); - make_async_fn!(fetch_utxos(hashes: Vec) -> Vec>, "fetch_utxos"); + make_async_fn!(fetch_utxos(hashes: Vec) -> Vec>, "fetch_utxos"); make_async_fn!(fetch_utxos_and_mined_info(hashes: Vec) -> Vec>, "fetch_utxos_and_mined_info"); - make_async_fn!(fetch_utxos_in_block(hash: HashOutput, deleted: Option>) -> (Vec, Bitmap), "fetch_utxos_in_block"); + make_async_fn!(fetch_txos_and_mined_info(hashes: Vec) -> Vec>, "fetch_txos_and_mined_info"); - make_async_fn!(fetch_outputs_in_block(hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + make_async_fn!(fetch_utxos_in_block(hash: HashOutput, spend_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_utxos_in_block"); + + make_async_fn!(fetch_outputs_in_block(hash: HashOutput) -> Vec, "fetch_outputs_in_block"); make_async_fn!(utxo_count() -> usize, "utxo_count"); @@ -194,8 +192,6 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_header_containing_kernel_mmr(mmr_position: u64) -> ChainHeader, "fetch_header_containing_kernel_mmr"); - make_async_fn!(fetch_header_containing_utxo_mmr(mmr_position: u64) -> ChainHeader, "fetch_header_containing_utxo_mmr"); - make_async_fn!(fetch_chain_header_by_block_hash(hash: HashOutput) -> Option, "fetch_chain_header_by_block_hash"); make_async_fn!( @@ -259,12 +255,6 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_block_hashes_from_header_tip(n: usize, offset: usize) -> Vec, "fetch_block_hashes_from_header_tip"); - make_async_fn!(fetch_complete_deleted_bitmap_at(hash: HashOutput) -> CompleteDeletedBitmap, "fetch_deleted_bitmap"); - - make_async_fn!(fetch_deleted_bitmap_at_tip() -> DeletedBitmap, "fetch_deleted_bitmap_at_tip"); - - make_async_fn!(fetch_header_hash_by_deleted_mmr_positions(mmr_positions: Vec) -> Vec>, "fetch_headers_of_deleted_positions"); - make_async_fn!(get_stats() -> DbBasicStats, "get_stats"); make_async_fn!(fetch_total_size_stats() -> DbTotalSizeStats, "fetch_total_size_stats"); @@ -346,24 +336,10 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { output: TransactionOutput, header_hash: HashOutput, header_height: u64, - mmr_position: u32, timestamp: u64, ) -> &mut Self { self.transaction - .insert_utxo(output, header_hash, header_height, mmr_position, timestamp); - self - } - - pub fn insert_pruned_output_via_horizon_sync( - &mut self, - output_hash: HashOutput, - header_hash: HashOutput, - header_height: u64, - mmr_position: u32, - timestamp: u64, - ) -> &mut Self { - self.transaction - .insert_pruned_utxo(output_hash, header_hash, header_height, mmr_position, timestamp); + .insert_utxo(output, header_hash, header_height, timestamp); self } @@ -376,19 +352,13 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - /// Updates the deleted tip bitmap with the indexes of the given bitmap. - pub fn update_deleted_bitmap(&mut self, deleted: Bitmap) -> &mut Self { - self.transaction.update_deleted_bitmap(deleted); - self - } - pub fn insert_chain_header(&mut self, chain_header: ChainHeader) -> &mut Self { self.transaction.insert_chain_header(chain_header); self } - pub fn insert_block_body(&mut self, block: Arc) -> &mut Self { - self.transaction.insert_block_body(block); + pub fn insert_tip_block_body(&mut self, block: Arc) -> &mut Self { + self.transaction.insert_tip_block_body(block); self } @@ -402,8 +372,8 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } - pub fn prune_outputs_at_positions(&mut self, positions: Vec) -> &mut Self { - self.transaction.prune_outputs_at_positions(positions); + pub fn prune_outputs_at_positions(&mut self, block_hash: BlockHash) -> &mut Self { + self.transaction.prune_outputs_spent_at_hash(block_hash); self } diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index c48e54cd49..56b634a4ca 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -1,25 +1,15 @@ // Copyright 2022 The Tari Project // SPDX-License-Identifier: BSD-3-Clause -use croaring::Bitmap; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, HashOutput, PublicKey, Signature}, + types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; use super::TemplateRegistrationEntry; use crate::{ - blocks::{ - Block, - BlockAccumulatedData, - BlockHeader, - BlockHeaderAccumulatedData, - ChainBlock, - ChainHeader, - DeletedBitmap, - }, + blocks::{Block, BlockAccumulatedData, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader}, chain_storage::{ - pruned_output::PrunedOutput, ChainStorageError, DbBasicStats, DbKey, @@ -29,9 +19,11 @@ use crate::{ HorizonData, MmrTree, Reorg, + TxoMinedInfo, UtxoMinedInfo, }, - transactions::transaction_components::{TransactionInput, TransactionKernel}, + transactions::transaction_components::{TransactionInput, TransactionKernel, TransactionOutput}, + OutputSmt, }; /// Identify behaviour for Blockchain database backends. Implementations must support `Send` and `Sync` so that @@ -70,8 +62,6 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_header_containing_kernel_mmr(&self, mmr_position: u64) -> Result; - fn fetch_header_containing_utxo_mmr(&self, mmr_position: u64) -> Result; - /// Used to determine if the database is empty, i.e. a brand new database. /// This is called to decide if the genesis block should be created. fn is_empty(&self) -> Result; @@ -101,12 +91,15 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_utxos_in_block( &self, header_hash: &HashOutput, - deleted: Option<&Bitmap>, - ) -> Result<(Vec, Bitmap), ChainStorageError>; + spend_header: Option, + ) -> Result, ChainStorageError>; /// Fetch a specific output. Returns the output and the leaf index in the output MMR fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError>; + /// Fetch a specific input. Returns the output and the leaf index in the output MMR + fn fetch_input(&self, input_hash: &HashOutput) -> Result, ChainStorageError>; + /// Returns the unspent TransactionOutput output that matches the given commitment if it exists in the current UTXO /// set, otherwise None is returned. fn fetch_unspent_output_hash_by_commitment( @@ -115,7 +108,7 @@ pub trait BlockchainBackend: Send + Sync { ) -> Result, ChainStorageError>; /// Fetch all outputs in a block - fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError>; + fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError>; /// Fetch all inputs in a block fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError>; @@ -123,9 +116,6 @@ pub trait BlockchainBackend: Send + Sync { /// Fetches the total merkle mountain range node count upto the specified height. fn fetch_mmr_size(&self, tree: MmrTree) -> Result; - /// Fetches the leaf index of the provided leaf node hash in the given MMR tree. - #[allow(clippy::ptr_arg)] - fn fetch_mmr_leaf_index(&self, tree: MmrTree, hash: &HashOutput) -> Result, ChainStorageError>; /// Returns the number of blocks in the block orphan pool. fn orphan_count(&self) -> Result; /// Returns the stored header with the highest corresponding height. @@ -155,9 +145,6 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_orphan_chain_block(&self, hash: HashOutput) -> Result, ChainStorageError>; - /// Returns the full deleted bitmap at the current blockchain tip - fn fetch_deleted_bitmap(&self) -> Result; - /// Delete orphans according to age. Used to keep the orphan pool at a certain capacity fn delete_oldest_orphans( &mut self, @@ -177,12 +164,6 @@ pub trait BlockchainBackend: Send + Sync { /// lock for the duration. fn fetch_total_size_stats(&self) -> Result; - /// Returns a (block height/hash) tuple for each mmr position of the height it was spent, or None if it is not spent - fn fetch_header_hash_by_deleted_mmr_positions( - &self, - mmr_positions: Vec, - ) -> Result>, ChainStorageError>; - /// Check if a block hash is in the bad block list fn bad_block_exists(&self, block_hash: HashOutput) -> Result; @@ -200,4 +181,5 @@ pub trait BlockchainBackend: Send + Sync { start_height: u64, end_height: u64, ) -> Result, ChainStorageError>; + fn fetch_tip_smt(&self) -> Result; } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index eced39eb10..065bdf4b99 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -31,14 +31,16 @@ use std::{ time::Instant, }; -use croaring::Bitmap; use log::*; use serde::{Deserialize, Serialize}; use tari_common_types::{ chain_metadata::ChainMetadata, types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; -use tari_mmr::pruned_hashset::PrunedHashSet; +use tari_mmr::{ + pruned_hashset::PrunedHashSet, + sparse_merkle_tree::{DeleteResult, NodeKey, ValueHash}, +}; use tari_utilities::{epoch_time::EpochTime, hex::Hex, ByteArray}; use super::TemplateRegistrationEntry; @@ -51,8 +53,6 @@ use crate::{ BlockHeaderValidationError, ChainBlock, ChainHeader, - CompleteDeletedBitmap, - DeletedBitmap, HistoricalBlock, NewBlockTemplate, UpdateBlockAccumulatedData, @@ -65,7 +65,6 @@ use crate::{ }, db_transaction::{DbKey, DbTransaction, DbValue}, error::ChainStorageError, - pruned_output::PrunedOutput, utxo_mined_info::UtxoMinedInfo, BlockAddResult, BlockchainBackend, @@ -77,6 +76,7 @@ use crate::{ OrNotFound, Reorg, TargetDifficulties, + TxoMinedInfo, }, common::rolling_vec::RollingVec, consensus::{ @@ -87,7 +87,7 @@ use crate::{ }, proof_of_work::{monero_rx::MoneroPowData, PowAlgorithm, TargetDifficultyWindow}, transactions::{ - transaction_components::{TransactionInput, TransactionKernel}, + transaction_components::{TransactionInput, TransactionKernel, TransactionOutput}, TransactionHashDomain, }, validation::{ @@ -98,7 +98,7 @@ use crate::{ InternalConsistencyValidator, ValidationError, }, - MutablePrunedOutputMmr, + OutputSmt, PrunedInputMmr, PrunedKernelMmr, ValidatorNodeBMT, @@ -238,8 +238,12 @@ where B: BlockchainBackend "Blockchain db is empty. Adding genesis block {}.", genesis_block.block().body.to_counts_string() ); - blockchain_db.insert_block(genesis_block.clone())?; let mut txn = DbTransaction::new(); + let smt = OutputSmt::new(); + txn.insert_tip_smt(smt); + blockchain_db.write(txn)?; + txn = DbTransaction::new(); + blockchain_db.insert_block(genesis_block.clone())?; let body = &genesis_block.block().body; let utxo_sum = body.outputs().iter().map(|k| &k.commitment).sum::(); let kernel_sum = body.kernels().iter().map(|k| &k.excess).sum::(); @@ -377,7 +381,7 @@ where B: BlockchainBackend } // Fetch the utxo - pub fn fetch_utxo(&self, hash: HashOutput) -> Result, ChainStorageError> { + pub fn fetch_utxo(&self, hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; Ok(db.fetch_output(&hash)?.map(|mined_info| mined_info.output)) } @@ -392,15 +396,22 @@ where B: BlockchainBackend /// Return a list of matching utxos, with each being `None` if not found. If found, the transaction /// output, and a boolean indicating if the UTXO was spent as of the current tip. - pub fn fetch_utxos(&self, hashes: Vec) -> Result>, ChainStorageError> { + pub fn fetch_utxos( + &self, + hashes: Vec, + ) -> Result>, ChainStorageError> { let db = self.db_read_access()?; - let deleted = db.fetch_deleted_bitmap()?; + let smt = db.fetch_tip_smt()?; let mut result = Vec::with_capacity(hashes.len()); for hash in hashes { let output = db.fetch_output(&hash)?; - result - .push(output.map(|mined_info| (mined_info.output, deleted.bitmap().contains(mined_info.mmr_position)))); + + result.push(output.map(|mined_info| { + let smt_key = NodeKey::try_from(mined_info.output.commitment.as_bytes()).unwrap(); + let spent = !smt.contains(&smt_key); + (mined_info.output, spent) + })); } Ok(result) } @@ -419,6 +430,20 @@ where B: BlockchainBackend Ok(result) } + pub fn fetch_txos_and_mined_info( + &self, + hashes: Vec, + ) -> Result>, ChainStorageError> { + let db = self.db_read_access()?; + + let mut result = Vec::with_capacity(hashes.len()); + for hash in hashes { + let input = db.fetch_input(&hash)?; + result.push(input); + } + Ok(result) + } + pub fn fetch_kernel_by_excess_sig( &self, excess_sig: Signature, @@ -435,13 +460,13 @@ where B: BlockchainBackend pub fn fetch_utxos_in_block( &self, hash: HashOutput, - deleted: Option>, - ) -> Result<(Vec, Bitmap), ChainStorageError> { + spend_header: Option, + ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_utxos_in_block(&hash, deleted.as_deref()) + db.fetch_utxos_in_block(&hash, spend_header) } - pub fn fetch_outputs_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { + pub fn fetch_outputs_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; db.fetch_outputs_in_block(&hash) } @@ -474,11 +499,6 @@ where B: BlockchainBackend db.fetch_header_containing_kernel_mmr(mmr_position) } - pub fn fetch_header_containing_utxo_mmr(&self, mmr_position: u64) -> Result { - let db = self.db_read_access()?; - db.fetch_header_containing_utxo_mmr(mmr_position) - } - /// Find the first matching header in a list of block hashes, returning the index of the match and the BlockHeader. /// Or None if not found. pub fn find_headers_after_hash>( @@ -826,7 +846,7 @@ where B: BlockchainBackend block.header.kernel_mmr_size = roots.kernel_mmr_size; block.header.input_mr = roots.input_mr; block.header.output_mr = roots.output_mr; - block.header.output_mmr_size = roots.output_mmr_size; + block.header.output_smt_size = roots.output_smt_size; block.header.validator_node_mr = roots.validator_node_mr; Ok(block) } @@ -1150,49 +1170,6 @@ where B: BlockchainBackend Ok(db.fetch_horizon_data()?.unwrap_or_default()) } - pub fn fetch_complete_deleted_bitmap_at( - &self, - hash: HashOutput, - ) -> Result { - let db = self.db_read_access()?; - let mut deleted = db.fetch_deleted_bitmap()?.into_bitmap(); - - let end_header = - fetch_header_by_block_hash(&*db, hash).or_not_found("BlockHeader", "start_hash", hash.to_hex())?; - let chain_metadata = db.fetch_chain_metadata()?; - let height = chain_metadata.height_of_longest_chain(); - for i in end_header.height..height { - // order here does not matter, we dont have to go in reverse - deleted.xor_inplace( - db.fetch_block_accumulated_data_by_height(i + 1) - .or_not_found("BlockAccumulatedData", "height", height.to_string())? - .deleted(), - ); - } - Ok(CompleteDeletedBitmap::new( - deleted, - height, - *chain_metadata.best_block(), - )) - } - - pub fn fetch_deleted_bitmap_at_tip(&self) -> Result { - let db = self.db_read_access()?; - db.fetch_deleted_bitmap() - } - - pub fn fetch_header_hash_by_deleted_mmr_positions( - &self, - mmr_positions: Vec, - ) -> Result>, ChainStorageError> { - if mmr_positions.is_empty() { - return Ok(Vec::new()); - } - - let db = self.db_read_access()?; - db.fetch_header_hash_by_deleted_mmr_positions(mmr_positions) - } - pub fn get_stats(&self) -> Result { let lock = self.db_read_access()?; lock.get_stats() @@ -1253,7 +1230,7 @@ pub struct MmrRoots { pub kernel_mmr_size: u64, pub input_mr: FixedHash, pub output_mr: FixedHash, - pub output_mmr_size: u64, + pub output_smt_size: u64, pub validator_node_mr: FixedHash, } @@ -1264,7 +1241,7 @@ impl std::fmt::Display for MmrRoots { writeln!(f, "Kernel MR : {}", self.kernel_mr)?; writeln!(f, "Kernel MMR Size : {}", self.kernel_mmr_size)?; writeln!(f, "Output MR : {}", self.output_mr)?; - writeln!(f, "Output MMR Size : {}", self.output_mmr_size)?; + writeln!(f, "Output SMT Size : {}", self.output_smt_size)?; writeln!(f, "Validator MR : {}", self.validator_node_mr)?; Ok(()) } @@ -1290,39 +1267,28 @@ pub fn calculate_mmr_roots( metadata.best_block(), ))); } - let deleted = db.fetch_deleted_bitmap()?.into_bitmap(); - let BlockAccumulatedData { kernels, outputs, .. } = db - .fetch_block_accumulated_data(&header.prev_hash)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData", - field: "header_hash", - value: header.prev_hash.to_hex(), - })?; + let BlockAccumulatedData { kernels, .. } = + db.fetch_block_accumulated_data(&header.prev_hash)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "BlockAccumulatedData", + field: "header_hash", + value: header.prev_hash.to_hex(), + })?; let mut kernel_mmr = PrunedKernelMmr::new(kernels); - let mut output_mmr = MutablePrunedOutputMmr::new(outputs, deleted)?; + let mut output_smt = db.fetch_tip_smt()?; let mut input_mmr = PrunedInputMmr::new(PrunedHashSet::default()); - let mut deleted_outputs = Vec::new(); for kernel in body.kernels().iter() { kernel_mmr.push(kernel.hash().to_vec())?; } for output in body.outputs().iter() { - let output_hash = output.hash(); - let output_mmr_hash = output_hash.to_vec(); - output_mmr.push(output_mmr_hash.clone())?; - if output.is_burned() { - let index = - output_mmr - .find_leaf_index(&output_mmr_hash)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "UTXO", - field: "hash", - value: output_hash.to_hex(), - })?; - deleted_outputs.push((index, output_hash)); + if !output.is_burned() { + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(header.height).as_slice())?; + output_smt.insert(smt_key, smt_node)?; } } @@ -1331,41 +1297,11 @@ pub fn calculate_mmr_roots( // Search the DB for the output leaf index so that it can be marked as spent/deleted. // If the output hash is not found, check the current output_mmr. This allows zero-conf transactions - let output_hash = input.output_hash(); - let index = match db.fetch_mmr_leaf_index(MmrTree::Utxo, &output_hash)? { - Some(index) => index, - None => { - let index = output_mmr.find_leaf_index(&output_hash.to_vec())?.ok_or_else(|| { - ChainStorageError::ValueNotFound { - entity: "UTXO", - field: "hash", - value: output_hash.to_hex(), - } - })?; - debug!( - target: LOG_TARGET, - "0-conf spend detected when calculating MMR roots for UTXO index {} ({})", index, output_hash, - ); - index - }, + let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?; + match output_smt.delete(&smt_key)? { + DeleteResult::Deleted(_value_hash) => {}, + DeleteResult::KeyNotFound => return Err(ChainStorageError::UnspendableInput), }; - deleted_outputs.push((index, output_hash)); - } - for (index, output_hash) in deleted_outputs { - if !output_mmr.delete(index) { - let num_leaves = u32::try_from(output_mmr.get_leaf_count()) - .map_err(|_| ChainStorageError::CriticalError("UTXO MMR leaf count overflows u32".to_string()))?; - if index < num_leaves && output_mmr.deleted().contains(index) { - return Err(ChainStorageError::InvalidOperation(format!( - "UTXO {} was already marked as deleted.", - output_hash, - ))); - } - return Err(ChainStorageError::InvalidOperation(format!( - "Could not delete index {} from the output MMR ({} leaves)", - index, num_leaves - ))); - } } let block_height = block.header.height; @@ -1384,8 +1320,8 @@ pub fn calculate_mmr_roots( kernel_mr: FixedHash::try_from(kernel_mmr.get_merkle_root()?)?, kernel_mmr_size: kernel_mmr.get_leaf_count()? as u64, input_mr: FixedHash::try_from(input_mmr.get_merkle_root()?)?, - output_mr: FixedHash::try_from(output_mmr.get_merkle_root()?)?, - output_mmr_size: output_mmr.get_leaf_count() as u64, + output_mr: FixedHash::try_from(output_smt.hash().as_slice())?, + output_smt_size: output_smt.size(), validator_node_mr, }; Ok(mmr_roots) @@ -1421,7 +1357,7 @@ pub fn fetch_headers( // Allow the headers to be returned in reverse order #[allow(clippy::cast_possible_truncation)] - let mut headers = Vec::with_capacity((end_inclusive - start) as usize); + let mut headers = Vec::with_capacity(end_inclusive.saturating_sub(start) as usize); for h in start..=end_inclusive { match db.fetch(&DbKey::HeaderHeight(h))? { Some(DbValue::HeaderHeight(header)) => { @@ -1528,7 +1464,7 @@ fn insert_best_block(txn: &mut DbTransaction, block: Arc) -> Result< let accumulated_difficulty = block.accumulated_data().total_accumulated_difficulty; let expected_prev_best_block = block.block().header.prev_hash; txn.insert_chain_header(block.to_chain_header()) - .insert_block_body(block) + .insert_tip_block_body(block) .set_best_block( height, block_hash, @@ -1576,7 +1512,7 @@ pub fn fetch_target_difficulty_for_next_block( fn fetch_block(db: &T, height: u64, compact: bool) -> Result { let mark = Instant::now(); - let (tip_height, is_pruned) = check_for_valid_height(db, height)?; + let (tip_height, _is_pruned) = check_for_valid_height(db, height)?; let chain_header = db.fetch_chain_header_by_height(height)?; let (header, accumulated_data) = chain_header.into_parts(); let kernels = db.fetch_kernels_in_block(&accumulated_data.hash)?; @@ -1599,66 +1535,32 @@ fn fetch_block(db: &T, height: u64, compact: bool) -> Resu Err(e) => return Err(e), }; - match utxo_mined_info.output { - PrunedOutput::Pruned { .. } => Ok(compact_input), - PrunedOutput::NotPruned { output } => { - let rp_hash = match output.proof { - Some(proof) => proof.hash(), - None => FixedHash::zero(), - }; - compact_input.add_output_data( - output.version, - output.features, - output.commitment, - output.script, - output.sender_offset_public_key, - output.covenant, - output.encrypted_data, - output.metadata_signature, - rp_hash, - output.minimum_value_promise, - ); - Ok(compact_input) - }, - } + let rp_hash = match utxo_mined_info.output.proof { + Some(proof) => proof.hash(), + None => FixedHash::zero(), + }; + compact_input.add_output_data( + utxo_mined_info.output.version, + utxo_mined_info.output.features, + utxo_mined_info.output.commitment, + utxo_mined_info.output.script, + utxo_mined_info.output.sender_offset_public_key, + utxo_mined_info.output.covenant, + utxo_mined_info.output.encrypted_data, + utxo_mined_info.output.metadata_signature, + rp_hash, + utxo_mined_info.output.minimum_value_promise, + ); + Ok(compact_input) }) .collect::, _>>()?; - let mut unpruned = vec![]; - let mut pruned = vec![]; - for output in outputs { - match output { - PrunedOutput::Pruned { output_hash } => { - pruned.push(output_hash); - }, - PrunedOutput::NotPruned { output } => unpruned.push(output), - } - } - - let mut pruned_input_count = 0; - - if is_pruned { - let mut deleted = db - .fetch_block_accumulated_data_by_height(height) - .or_not_found("BlockAccumulatedData", "height", height.to_string())? - .deleted() - .clone(); - if height > 0 { - let prev = db - .fetch_block_accumulated_data_by_height(height - 1) - .or_not_found("BlockAccumulatedData", "height", (height - 1).to_string())? - .deleted() - .clone(); - deleted -= prev; - } - - pruned_input_count = deleted.cardinality(); - } + // let inputs = db.fetch_inputs_in_block(&accumulated_data.hash)?; let block = header .into_builder() .add_inputs(inputs) - .add_outputs(unpruned) + .add_outputs(outputs) .add_kernels(kernels) .build(); trace!( @@ -1667,13 +1569,7 @@ fn fetch_block(db: &T, height: u64, compact: bool) -> Resu height, mark.elapsed() ); - Ok(HistoricalBlock::new( - block, - tip_height - height + 1, - accumulated_data, - pruned, - pruned_input_count, - )) + Ok(HistoricalBlock::new(block, tip_height - height + 1, accumulated_data)) } fn fetch_blocks( @@ -2469,25 +2365,14 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) target: LOG_TARGET, "Pruning blockchain database at height {} (was={})", target_horizon_height, last_pruned, ); - let mut last_block = db.fetch_block_accumulated_data_by_height(last_pruned).or_not_found( - "BlockAccumulatedData", - "height", - last_pruned.to_string(), - )?; + let mut txn = DbTransaction::new(); for block_to_prune in (last_pruned + 1)..=target_horizon_height { let header = db.fetch_chain_header_by_height(block_to_prune)?; - let curr_block = db.fetch_block_accumulated_data_by_height(block_to_prune).or_not_found( - "BlockAccumulatedData", - "height", - block_to_prune.to_string(), - )?; // Note, this could actually be done in one step instead of each block, since deleted is // accumulated - let output_mmr_positions = curr_block.deleted() - last_block.deleted(); - last_block = curr_block; - txn.prune_outputs_at_positions(output_mmr_positions.to_vec()); + txn.prune_outputs_spent_at_hash(*header.hash()); txn.delete_all_inputs_in_block(*header.hash()); if txn.operations().len() >= 100 { txn.set_pruned_height(block_to_prune); diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 03403b61d5..a480db63af 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -26,7 +26,6 @@ use std::{ sync::Arc, }; -use croaring::Bitmap; use tari_common_types::types::{BlockHash, Commitment, HashOutput}; use tari_utilities::hex::Hex; @@ -34,6 +33,7 @@ use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, transactions::transaction_components::{TransactionKernel, TransactionOutput}, + OutputSmt, }; #[derive(Debug)] @@ -114,41 +114,20 @@ impl DbTransaction { utxo: TransactionOutput, header_hash: HashOutput, header_height: u64, - mmr_leaf_index: u32, timestamp: u64, ) -> &mut Self { self.operations.push(WriteOperation::InsertOutput { header_hash, header_height, - output: Box::new(utxo), - mmr_position: mmr_leaf_index, - timestamp, - }); - self - } - - pub fn insert_pruned_utxo( - &mut self, - output_hash: HashOutput, - header_hash: HashOutput, - header_height: u64, - mmr_leaf_index: u32, - timestamp: u64, - ) -> &mut Self { - self.operations.push(WriteOperation::InsertPrunedOutput { - header_hash, - header_height, - output_hash, - mmr_position: mmr_leaf_index, timestamp, + output: Box::new(utxo), }); self } - pub fn prune_outputs_at_positions(&mut self, output_mmr_positions: Vec) -> &mut Self { - self.operations.push(WriteOperation::PruneOutputsAtMmrPositions { - output_positions: output_mmr_positions, - }); + pub fn prune_outputs_spent_at_hash(&mut self, block_hash: BlockHash) -> &mut Self { + self.operations + .push(WriteOperation::PruneOutputsSpentAtHash { block_hash }); self } @@ -168,17 +147,11 @@ impl DbTransaction { self } - /// Updates the deleted tip bitmap with the indexes of the given bitmap. - pub fn update_deleted_bitmap(&mut self, deleted: Bitmap) -> &mut Self { - self.operations.push(WriteOperation::UpdateDeletedBitmap { deleted }); - self - } - /// Add the BlockHeader and contents of a `Block` (i.e. inputs, outputs and kernels) to the database. /// If the `BlockHeader` already exists, then just the contents are updated along with the relevant accumulated /// data. - pub fn insert_block_body(&mut self, block: Arc) -> &mut Self { - self.operations.push(WriteOperation::InsertBlockBody { block }); + pub fn insert_tip_block_body(&mut self, block: Arc) -> &mut Self { + self.operations.push(WriteOperation::InsertTipBlockBody { block }); self } @@ -284,6 +257,11 @@ impl DbTransaction { self.operations.push(WriteOperation::ClearAllReorgs); self } + + pub fn insert_tip_smt(&mut self, smt: OutputSmt) -> &mut Self { + self.operations.push(WriteOperation::InsertTipSmt { smt }); + self + } } #[derive(Debug)] @@ -294,7 +272,7 @@ pub enum WriteOperation { InsertChainHeader { header: Box, }, - InsertBlockBody { + InsertTipBlockBody { block: Arc, }, InsertKernel { @@ -305,16 +283,8 @@ pub enum WriteOperation { InsertOutput { header_hash: HashOutput, header_height: u64, - output: Box, - mmr_position: u32, - timestamp: u64, - }, - InsertPrunedOutput { - header_hash: HashOutput, - header_height: u64, - output_hash: HashOutput, - mmr_position: u32, timestamp: u64, + output: Box, }, InsertBadBlock { hash: HashOutput, @@ -330,11 +300,8 @@ pub enum WriteOperation { header_hash: HashOutput, values: UpdateBlockAccumulatedData, }, - UpdateDeletedBitmap { - deleted: Bitmap, - }, - PruneOutputsAtMmrPositions { - output_positions: Vec, + PruneOutputsSpentAtHash { + block_hash: BlockHash, }, DeleteAllInputsInBlock { block_hash: BlockHash, @@ -358,6 +325,9 @@ pub enum WriteOperation { reorg: Reorg, }, ClearAllReorgs, + InsertTipSmt { + smt: OutputSmt, + }, } impl fmt::Display for WriteOperation { @@ -374,9 +344,9 @@ impl fmt::Display for WriteOperation { InsertChainHeader { header } => { write!(f, "InsertChainHeader(#{} {})", header.height(), header.hash().to_hex()) }, - InsertBlockBody { block } => write!( + InsertTipBlockBody { block } => write!( f, - "InsertBlockBody({}, {})", + "InsertTipBlockBody({}, {})", block.accumulated_data().hash.to_hex(), block.block().body.to_counts_string(), ), @@ -395,16 +365,13 @@ impl fmt::Display for WriteOperation { header_hash, header_height, output, - mmr_position, - timestamp, + .. } => write!( f, - "Insert output {} in block:{},#{} position: {}, timestamp: {}", + "Insert output {} in block({}):{},", output.hash().to_hex(), - header_hash.to_hex(), header_height, - mmr_position, - timestamp + header_hash.to_hex(), ), DeleteOrphanChainTip(hash) => write!(f, "DeleteOrphanChainTip({})", hash.to_hex()), InsertOrphanChainTip(hash, total_accumulated_difficulty) => write!( @@ -418,20 +385,10 @@ impl fmt::Display for WriteOperation { write!(f, "Insert Monero seed string {} for height: {}", data.to_hex(), height) }, InsertChainOrphanBlock(block) => write!(f, "InsertChainOrphanBlock({})", block.hash().to_hex()), - InsertPrunedOutput { - header_hash: _, - header_height: _, - output_hash: _, - mmr_position: _, - timestamp: _, - } => write!(f, "Insert pruned output"), UpdateBlockAccumulatedData { header_hash, .. } => { write!(f, "Update Block data for block {}", header_hash.to_hex()) }, - UpdateDeletedBitmap { deleted } => { - write!(f, "Merge deleted bitmap at tip ({} new indexes)", deleted.cardinality()) - }, - PruneOutputsAtMmrPositions { output_positions } => write!(f, "Prune {} output(s)", output_positions.len()), + PruneOutputsSpentAtHash { block_hash } => write!(f, "Prune output(s) at hash: {}", block_hash.to_hex()), DeleteAllInputsInBlock { block_hash } => write!(f, "Delete outputs in block {}", block_hash.to_hex()), SetAccumulatedDataForOrphan(accumulated_data) => { write!(f, "Set accumulated data for orphan {}", accumulated_data) @@ -458,6 +415,13 @@ impl fmt::Display for WriteOperation { SetHorizonData { .. } => write!(f, "Set horizon data"), InsertReorg { .. } => write!(f, "Insert reorg"), ClearAllReorgs => write!(f, "Clear all reorgs"), + InsertTipSmt { smt: output_smt } => { + write!( + f, + "Inserting sparse merkle tree with root: {}", + output_smt.unsafe_hash() + ) + }, } } } diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index 240d7725ed..75e19a12fc 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -22,7 +22,7 @@ use lmdb_zero::error; use tari_common_types::types::FixedHashSizeError; -use tari_mmr::{error::MerkleMountainRangeError, MerkleProofError}; +use tari_mmr::{error::MerkleMountainRangeError, sparse_merkle_tree::SMTError, MerkleProofError}; use tari_storage::lmdb_store::LMDBError; use thiserror::Error; use tokio::task; @@ -138,6 +138,8 @@ pub enum ChainStorageError { CompositeKeyLengthExceeded, #[error("Failed to decode key bytes: {0}")] FromKeyBytesFailed(String), + #[error("Sparse Merkle Tree error: {0}")] + SMTError(#[from] SMTError), } impl ChainStorageError { diff --git a/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs b/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs index 38eff4d464..d613a0f87e 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/composite_key.rs @@ -26,6 +26,7 @@ use std::{ }; use lmdb_zero::traits::AsLmdbBytes; +use tari_common_types::types::FixedHash; use tari_utilities::hex::to_hex; use crate::chain_storage::ChainStorageError; @@ -110,3 +111,31 @@ impl AsLmdbBytes for CompositeKey { self.as_bytes() } } + +#[derive(Debug, Clone)] +pub(super) struct OutputKey(pub(super) CompositeKey<68>); + +impl OutputKey { + pub fn new(header_hash: &FixedHash, utxo_hash: &FixedHash) -> Result { + let com_key = CompositeKey::try_from_parts(&[header_hash.as_slice(), utxo_hash.as_slice()])?; + Ok(Self(com_key)) + } + + pub(super) fn to_comp_key(self) -> CompositeKey<68> { + self.0 + } +} + +#[derive(Debug, Clone)] +pub(super) struct InputKey(pub(super) CompositeKey<68>); + +impl InputKey { + pub fn new(header_hash: &FixedHash, txo_hash: &FixedHash) -> Result { + let com_key = CompositeKey::try_from_parts(&[header_hash.as_slice(), txo_hash.as_slice()])?; + Ok(Self(com_key)) + } + + pub(super) fn to_comp_key(self) -> CompositeKey<68> { + self.0 + } +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index d823c87938..9cfbc692e9 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -22,7 +22,6 @@ use std::{convert::TryFrom, fmt, fs, fs::File, ops::Deref, path::Path, sync::Arc, time::Instant}; -use croaring::Bitmap; use fs2::FileExt; use lmdb_zero::{open, ConstTransaction, Database, Environment, ReadTransaction, WriteTransaction}; use log::*; @@ -32,6 +31,7 @@ use tari_common_types::{ epoch::VnEpoch, types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; +use tari_mmr::sparse_merkle_tree::{DeleteResult, NodeKey, ValueHash}; use tari_storage::lmdb_store::{db, LMDBBuilder, LMDBConfig, LMDBStore}; use tari_utilities::{ hex::{to_hex, Hex}, @@ -47,14 +47,13 @@ use crate::{ BlockHeaderAccumulatedData, ChainBlock, ChainHeader, - DeletedBitmap, UpdateBlockAccumulatedData, }, chain_storage::{ db_transaction::{DbKey, DbTransaction, DbValue, WriteOperation}, error::{ChainStorageError, OrNotFound}, lmdb_db::{ - composite_key::CompositeKey, + composite_key::{CompositeKey, InputKey, OutputKey}, lmdb::{ fetch_db_entry_sizes, lmdb_clear, @@ -88,9 +87,9 @@ use crate::{ DbSize, HorizonData, MmrTree, - PrunedOutput, Reorg, TemplateRegistrationEntry, + TxoMinedInfo, ValidatorNodeEntry, }, consensus::{ConsensusConstants, ConsensusManager}, @@ -98,7 +97,7 @@ use crate::{ aggregated_body::AggregateBody, transaction_components::{TransactionInput, TransactionKernel, TransactionOutput, ValidatorNodeRegistration}, }, - MutablePrunedOutputMmr, + OutputSmt, PrunedKernelMmr, }; @@ -118,8 +117,7 @@ const LMDB_DB_KERNELS: &str = "kernels"; const LMDB_DB_KERNEL_EXCESS_INDEX: &str = "kernel_excess_index"; const LMDB_DB_KERNEL_EXCESS_SIG_INDEX: &str = "kernel_excess_sig_index"; const LMDB_DB_KERNEL_MMR_SIZE_INDEX: &str = "kernel_mmr_size_index"; -const LMDB_DB_UTXO_MMR_SIZE_INDEX: &str = "utxo_mmr_size_index"; -const LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX: &str = "deleted_txo_mmr_position_to_height_index"; +const LMDB_DB_DELETED_TXO_HASH_TO_HEADER_INDEX: &str = "deleted_txo_hash_to_header_index"; const LMDB_DB_UTXO_COMMITMENT_INDEX: &str = "utxo_commitment_index"; const LMDB_DB_UNIQUE_ID_INDEX: &str = "unique_id_index"; const LMDB_DB_CONTRACT_ID_INDEX: &str = "contract_index"; @@ -133,13 +131,10 @@ const LMDB_DB_REORGS: &str = "reorgs"; const LMDB_DB_VALIDATOR_NODES: &str = "validator_nodes"; const LMDB_DB_VALIDATOR_NODES_MAPPING: &str = "validator_nodes_mapping"; const LMDB_DB_TEMPLATE_REGISTRATIONS: &str = "template_registrations"; +const LMDB_DB_TIP_UTXO_SMT: &str = "tip_utxo_smt"; -/// HeaderHash(32), mmr_pos(4), hash(32) -type InputKey = CompositeKey<68>; /// HeaderHash(32), mmr_pos(4), hash(32) type KernelKey = CompositeKey<68>; -/// HeaderHash(32), mmr_pos(4), hash(32) -type OutputKey = CompositeKey<68>; /// Height(8), Hash(32) type ValidatorNodeRegistrationKey = CompositeKey<40>; @@ -172,11 +167,10 @@ pub fn create_lmdb_database>( .add_database(LMDB_DB_KERNEL_EXCESS_INDEX, flags) .add_database(LMDB_DB_KERNEL_EXCESS_SIG_INDEX, flags) .add_database(LMDB_DB_KERNEL_MMR_SIZE_INDEX, flags) - .add_database(LMDB_DB_UTXO_MMR_SIZE_INDEX, flags) .add_database(LMDB_DB_UTXO_COMMITMENT_INDEX, flags) .add_database(LMDB_DB_UNIQUE_ID_INDEX, flags) .add_database(LMDB_DB_CONTRACT_ID_INDEX, flags) - .add_database(LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX, flags | db::INTEGERKEY) + .add_database(LMDB_DB_DELETED_TXO_HASH_TO_HEADER_INDEX, flags | db::INTEGERKEY) .add_database(LMDB_DB_ORPHANS, flags) .add_database(LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, flags) .add_database(LMDB_DB_MONERO_SEED_HEIGHT, flags) @@ -187,6 +181,7 @@ pub fn create_lmdb_database>( .add_database(LMDB_DB_VALIDATOR_NODES, flags) .add_database(LMDB_DB_VALIDATOR_NODES_MAPPING, flags) .add_database(LMDB_DB_TEMPLATE_REGISTRATIONS, flags | db::DUPSORT) + .add_database(LMDB_DB_TIP_UTXO_SMT, flags) .build() .map_err(|err| ChainStorageError::CriticalError(format!("Could not create LMDB store:{}", err)))?; debug!(target: LOG_TARGET, "LMDB database creation successful"); @@ -220,8 +215,6 @@ pub struct LMDBDatabase { kernel_excess_sig_index: DatabaseRef, /// Maps kernel_mmr_size -> height kernel_mmr_size_index: DatabaseRef, - /// Maps output_mmr_size -> height - output_mmr_size_index: DatabaseRef, /// Maps commitment -> output_hash utxo_commitment_index: DatabaseRef, /// Maps unique_id -> output_hash @@ -230,7 +223,7 @@ pub struct LMDBDatabase { /// and -> output_hash contract_index: DatabaseRef, /// Maps output_mmr_pos -> - deleted_txo_mmr_position_to_height_index: DatabaseRef, + deleted_txo_hash_to_header_index: DatabaseRef, /// Maps block_hash -> Block orphans_db: DatabaseRef, /// Maps randomx_seed -> height @@ -247,6 +240,8 @@ pub struct LMDBDatabase { reorgs: DatabaseRef, /// Maps -> ActiveValidatorNode validator_nodes: DatabaseRef, + /// Stores the sparse merkle tree of the utxo set on tip + tip_utxo_smt: DatabaseRef, /// Maps -> VN Shard Key validator_nodes_mapping: DatabaseRef, /// Maps CodeTemplateRegistration -> TemplateRegistration @@ -276,14 +271,10 @@ impl LMDBDatabase { kernel_excess_index: get_database(store, LMDB_DB_KERNEL_EXCESS_INDEX)?, kernel_excess_sig_index: get_database(store, LMDB_DB_KERNEL_EXCESS_SIG_INDEX)?, kernel_mmr_size_index: get_database(store, LMDB_DB_KERNEL_MMR_SIZE_INDEX)?, - output_mmr_size_index: get_database(store, LMDB_DB_UTXO_MMR_SIZE_INDEX)?, utxo_commitment_index: get_database(store, LMDB_DB_UTXO_COMMITMENT_INDEX)?, unique_id_index: get_database(store, LMDB_DB_UNIQUE_ID_INDEX)?, contract_index: get_database(store, LMDB_DB_CONTRACT_ID_INDEX)?, - deleted_txo_mmr_position_to_height_index: get_database( - store, - LMDB_DB_DELETED_TXO_MMR_POSITION_TO_HEIGHT_INDEX, - )?, + deleted_txo_hash_to_header_index: get_database(store, LMDB_DB_DELETED_TXO_HASH_TO_HEADER_INDEX)?, orphans_db: get_database(store, LMDB_DB_ORPHANS)?, orphan_header_accumulated_data_db: get_database(store, LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA)?, monero_seed_height_db: get_database(store, LMDB_DB_MONERO_SEED_HEIGHT)?, @@ -293,6 +284,7 @@ impl LMDBDatabase { reorgs: get_database(store, LMDB_DB_REORGS)?, validator_nodes: get_database(store, LMDB_DB_VALIDATOR_NODES)?, validator_nodes_mapping: get_database(store, LMDB_DB_VALIDATOR_NODES_MAPPING)?, + tip_utxo_smt: get_database(store, LMDB_DB_TIP_UTXO_SMT)?, template_registrations: get_database(store, LMDB_DB_TEMPLATE_REGISTRATIONS)?, env, env_config: store.env_config(), @@ -329,8 +321,8 @@ impl LMDBDatabase { InsertChainHeader { header } => { self.insert_header(&write_txn, header.header(), header.accumulated_data())?; }, - InsertBlockBody { block } => { - self.insert_block_body(&write_txn, block.header(), block.block().body.clone())?; + InsertTipBlockBody { block } => { + self.insert_tip_block_body(&write_txn, block.header(), block.block().body.clone())?; }, InsertKernel { header_hash, @@ -342,34 +334,10 @@ impl LMDBDatabase { InsertOutput { header_hash, header_height, - output, - mmr_position, - timestamp, - } => { - self.insert_output( - &write_txn, - header_hash, - *header_height, - output, - *mmr_position, - *timestamp, - )?; - }, - InsertPrunedOutput { - header_hash, - header_height, - output_hash, - mmr_position, timestamp, + output, } => { - self.insert_pruned_output( - &write_txn, - header_hash, - *header_height, - output_hash, - *mmr_position, - *timestamp, - )?; + self.insert_output(&write_txn, header_hash, *header_height, *timestamp, output)?; }, DeleteHeader(height) => { self.delete_header(&write_txn, *height)?; @@ -413,13 +381,8 @@ impl LMDBDatabase { UpdateBlockAccumulatedData { header_hash, values } => { self.update_block_accumulated_data(&write_txn, header_hash, values.clone())?; }, - UpdateDeletedBitmap { deleted } => { - let mut bitmap = self.load_deleted_bitmap_model(&write_txn)?; - bitmap.merge(deleted)?; - bitmap.save()?; - }, - PruneOutputsAtMmrPositions { output_positions } => { - self.prune_outputs_at_positions(&write_txn, output_positions)?; + PruneOutputsSpentAtHash { block_hash } => { + self.prune_outputs_spent_at_hash(&write_txn, block_hash)?; }, DeleteAllInputsInBlock { block_hash } => { self.delete_all_inputs_in_block(&write_txn, block_hash)?; @@ -499,6 +462,9 @@ impl LMDBDatabase { ClearAllReorgs => { lmdb_clear(&write_txn, &self.reorgs)?; }, + InsertTipSmt { smt } => { + self.insert_tip_smt(&write_txn, smt)?; + }, } } write_txn.commit()?; @@ -506,7 +472,7 @@ impl LMDBDatabase { Ok(()) } - fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 27] { + fn all_dbs(&self) -> [(&'static str, &DatabaseRef); 26] { [ ("metadata_db", &self.metadata_db), ("headers_db", &self.headers_db), @@ -520,13 +486,12 @@ impl LMDBDatabase { ("kernel_excess_index", &self.kernel_excess_index), ("kernel_excess_sig_index", &self.kernel_excess_sig_index), ("kernel_mmr_size_index", &self.kernel_mmr_size_index), - ("output_mmr_size_index", &self.output_mmr_size_index), ("utxo_commitment_index", &self.utxo_commitment_index), ("contract_index", &self.contract_index), ("unique_id_index", &self.unique_id_index), ( - "deleted_txo_mmr_position_to_height_index", - &self.deleted_txo_mmr_position_to_height_index, + "deleted_txo_hash_to_header_index", + &self.deleted_txo_hash_to_header_index, ), ("orphans_db", &self.orphans_db), ( @@ -544,23 +509,9 @@ impl LMDBDatabase { ] } - fn prune_output( - &self, - txn: &WriteTransaction<'_>, - key: &OutputKey, - ) -> Result { - let mut output: TransactionOutputRowData = - lmdb_get(txn, &self.utxos_db, key).or_not_found("TransactionOutput", "key", key.to_string())?; - let pruned_output = output - .output - .take() - .ok_or_else(|| ChainStorageError::DataInconsistencyDetected { - function: "prune_output", - details: format!("Attempt to prune output that has already been pruned for key {}", key), - })?; - // output.output is None - lmdb_replace(txn, &self.utxos_db, key, &output)?; - Ok(pruned_output) + fn prune_output(&self, txn: &WriteTransaction<'_>, key: OutputKey) -> Result<(), ChainStorageError> { + lmdb_delete(txn, &self.utxos_db, &key.to_comp_key(), "utxos_db")?; + Ok(()) } fn insert_output( @@ -568,40 +519,40 @@ impl LMDBDatabase { txn: &WriteTransaction<'_>, header_hash: &HashOutput, header_height: u64, + header_timestamp: u64, output: &TransactionOutput, - mmr_position: u32, - timestamp: u64, ) -> Result<(), ChainStorageError> { let output_hash = output.hash(); - let output_key = OutputKey::try_from_parts(&[header_hash.as_slice(), mmr_position.to_be_bytes().as_slice()])?; + let output_key = OutputKey::new(header_hash, &output_hash)?; - lmdb_insert( - txn, - &self.utxo_commitment_index, - output.commitment.as_bytes(), - &output_hash, - "utxo_commitment_index", - )?; + if !output.is_burned() { + lmdb_insert( + txn, + &self.utxo_commitment_index, + output.commitment.as_bytes(), + &output_hash, + "utxo_commitment_index", + )?; + } lmdb_insert( txn, &self.txos_hash_to_index_db, output_hash.as_slice(), - &(mmr_position, output_key.to_vec()), + &(output_key.clone().to_comp_key().to_vec()), "txos_hash_to_index_db", )?; lmdb_insert( txn, &self.utxos_db, - &output_key, + &output_key.to_comp_key(), &TransactionOutputRowData { - output: Some(output.clone()), + output: output.clone(), header_hash: *header_hash, - mmr_position, hash: output_hash, mined_height: header_height, - mined_timestamp: timestamp, + mined_timestamp: header_timestamp, }, "utxos_db", )?; @@ -609,46 +560,6 @@ impl LMDBDatabase { Ok(()) } - fn insert_pruned_output( - &self, - txn: &WriteTransaction<'_>, - header_hash: &HashOutput, - header_height: u64, - output_hash: &HashOutput, - mmr_position: u32, - timestamp: u64, - ) -> Result<(), ChainStorageError> { - if !lmdb_exists(txn, &self.block_hashes_db, header_hash.as_slice())? { - return Err(ChainStorageError::InvalidOperation(format!( - "Unable to insert pruned output because header {} does not exist", - header_hash.to_hex(), - ))); - } - let key = OutputKey::try_from_parts(&[header_hash.as_slice(), mmr_position.to_be_bytes().as_slice()])?; - lmdb_insert( - txn, - &self.txos_hash_to_index_db, - output_hash.as_slice(), - &(mmr_position, key.to_vec()), - "txos_hash_to_index_db", - )?; - lmdb_insert( - txn, - &self.utxos_db, - &key, - &TransactionOutputRowData { - output: None, - header_hash: *header_hash, - mmr_position, - hash: *output_hash, - mined_height: header_height, - mined_timestamp: timestamp, - }, - "utxos_db", - )?; - Ok(()) - } - fn insert_kernel( &self, txn: &WriteTransaction<'_>, @@ -700,9 +611,9 @@ impl LMDBDatabase { &self, txn: &WriteTransaction<'_>, height: u64, + header_timestamp: u64, header_hash: &HashOutput, input: &TransactionInput, - mmr_position: u32, ) -> Result<(), ChainStorageError> { lmdb_delete( txn, @@ -715,28 +626,28 @@ impl LMDBDatabase { ChainStorageError::ValueNotFound { .. } => Ok(()), _ => Err(err), })?; + // do I need to look into changing this index to be input.hash -> header hash + // does the key need to include the mmr pos + // make index safe key + let hash = input.canonical_hash(); lmdb_insert( txn, - &self.deleted_txo_mmr_position_to_height_index, - &mmr_position, + &self.deleted_txo_hash_to_header_index, + &hash.to_vec(), &(height, header_hash), - "deleted_txo_mmr_position_to_height_index", + "deleted_txo_hash_to_header_index", )?; - let hash = input.canonical_hash(); - let key = InputKey::try_from_parts(&[ - header_hash.as_slice(), - mmr_position.to_be_bytes().as_slice(), - hash.as_slice(), - ])?; + let key = InputKey::new(header_hash, &hash)?; lmdb_insert( txn, &self.inputs_db, - &key, + &key.to_comp_key(), &TransactionInputRowDataRef { input: &input.to_compact(), header_hash, - mmr_position, + mined_timestamp: header_timestamp, + height, hash: &hash, }, "inputs_db", @@ -860,13 +771,6 @@ impl LMDBDatabase { &header.height, "kernel_mmr_size_index", )?; - lmdb_insert( - txn, - &self.output_mmr_size_index, - &header.output_mmr_size.to_be_bytes(), - &(header.height, header.hash().as_slice()), - "output_mmr_size_index", - )?; Ok(()) } @@ -921,12 +825,6 @@ impl LMDBDatabase { &header.kernel_mmr_size.to_be_bytes(), "kernel_mmr_size_index", )?; - lmdb_delete( - txn, - &self.output_mmr_size_index, - &header.output_mmr_size.to_be_bytes(), - "output_mmr_size_index", - )?; Ok(()) } @@ -942,16 +840,6 @@ impl LMDBDatabase { let height = self .fetch_height_from_hash(write_txn, block_hash) .or_not_found("Block", "hash", hash_hex)?; - let block_accum_data = - self.fetch_block_accumulated_data(write_txn, height)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData", - field: "height", - value: height.to_string(), - })?; - let mut bitmap = self.load_deleted_bitmap_model(write_txn)?; - bitmap.remove(block_accum_data.deleted())?; - bitmap.save()?; lmdb_delete( write_txn, @@ -984,24 +872,23 @@ impl LMDBDatabase { utxo.hash.as_slice(), "txos_hash_to_index_db", )?; - if let Some(ref output) = utxo.output { - let output_hash = output.hash(); - // if an output was already spent in the block, it was never created as unspent, so dont delete it as it - // does not exist here - if inputs.iter().any(|r| r.input.output_hash() == output_hash) { - continue; - } - // if an output was burned, it was never created as an unspent utxo - if output.is_burned() { - continue; - } - lmdb_delete( - txn, - &self.utxo_commitment_index, - output.commitment.as_bytes(), - "utxo_commitment_index", - )?; + + let output_hash = utxo.output.hash(); + // if an output was already spent in the block, it was never created as unspent, so dont delete it as it + // does not exist here + if inputs.iter().any(|r| r.input.output_hash() == output_hash) { + continue; } + // if an output was burned, it was never created as an unspent utxo + if utxo.output.is_burned() { + continue; + } + lmdb_delete( + txn, + &self.utxo_commitment_index, + utxo.output.commitment.as_bytes(), + "utxo_commitment_index", + )?; } // Move inputs in this block back into the unspent set, any outputs spent within this block they will be removed // by deleting all the block's outputs below @@ -1011,9 +898,9 @@ impl LMDBDatabase { lmdb_delete( txn, - &self.deleted_txo_mmr_position_to_height_index, - &row.mmr_position, - "deleted_txo_mmr_position_to_height_index", + &self.deleted_txo_hash_to_header_index, + &row.hash.to_vec(), + "deleted_txo_hash_to_header_index", )?; if output_rows.iter().any(|r| r.hash == output_hash) { continue; @@ -1029,33 +916,22 @@ impl LMDBDatabase { } })?; - match utxo_mined_info.output { - PrunedOutput::Pruned { output_hash } => { - debug!(target: LOG_TARGET, "Output Transaction Input is spending is pruned"); - return Err(ChainStorageError::InvalidOperation(format!( - "Output Transaction Input: {} is spending is pruned", - output_hash, - ))); - }, - PrunedOutput::NotPruned { output } => { - let rp_hash = match output.proof { - Some(proof) => proof.hash(), - None => FixedHash::zero(), - }; - input.add_output_data( - output.version, - output.features, - output.commitment, - output.script, - output.sender_offset_public_key, - output.covenant, - output.encrypted_data, - output.metadata_signature, - rp_hash, - output.minimum_value_promise, - ); - }, - } + let rp_hash = match utxo_mined_info.output.proof { + Some(proof) => proof.hash(), + None => FixedHash::zero(), + }; + input.add_output_data( + utxo_mined_info.output.version, + utxo_mined_info.output.features, + utxo_mined_info.output.commitment, + utxo_mined_info.output.script, + utxo_mined_info.output.sender_offset_public_key, + utxo_mined_info.output.covenant, + utxo_mined_info.output.encrypted_data, + utxo_mined_info.output.metadata_signature, + rp_hash, + utxo_mined_info.output.minimum_value_promise, + ); trace!(target: LOG_TARGET, "Input moved to UTXO set: {}", input); lmdb_insert( @@ -1184,7 +1060,7 @@ impl LMDBDatabase { // Break function up into smaller pieces #[allow(clippy::too_many_lines)] - fn insert_block_body( + fn insert_tip_block_body( &self, txn: &WriteTransaction<'_>, header: &BlockHeader, @@ -1232,7 +1108,6 @@ impl LMDBDatabase { let mut total_kernel_sum = Commitment::default(); let BlockAccumulatedData { kernels: pruned_kernel_set, - outputs: pruned_output_set, .. } = data; @@ -1251,100 +1126,26 @@ impl LMDBDatabase { })?; self.insert_kernel(txn, &block_hash, &kernel, pos)?; } - let mut output_mmr = MutablePrunedOutputMmr::new(pruned_output_set, Bitmap::create())?; - - let leaf_count = output_mmr.get_leaf_count(); - - // Output hashes added before inputs so that inputs can spend outputs in this transaction (0-conf and combined) - let mut burned_outputs = Vec::new(); - let outputs = outputs - .into_iter() - .enumerate() - .map(|(i, output)| { - output_mmr.push(output.hash().to_vec())?; - // lets check burn - if output.is_burned() { - let index = match output_mmr.find_leaf_index(output.hash().as_slice())? { - Some(index) => { - debug!(target: LOG_TARGET, "Output {} burned in current block", output); - burned_outputs.push(output.commitment.clone()); - index - }, - None => { - return Err(ChainStorageError::UnexpectedResult( - "Output MMR did not contain the expected output".to_string(), - )) - }, - }; - // We need to mark this as spent as well. - if !output_mmr.delete(index) { - return Err(ChainStorageError::InvalidOperation(format!( - "Could not delete index {} from the output MMR", - index - ))); - } - }; - Ok((output, leaf_count + i + 1)) - }) - .collect::, ChainStorageError>>()?; - - let mut spent_zero_conf_commitments = Vec::new(); - // unique_id_index expects inputs to be inserted before outputs - for input in &inputs { - let output_hash = input.output_hash(); - let index = match self.fetch_mmr_leaf_index(txn, MmrTree::Utxo, &output_hash)? { - Some(index) => index, - None => match output_mmr.find_leaf_index(output_hash.as_slice())? { - Some(index) => { - debug!( - target: LOG_TARGET, - "Input {} spends output from current block (0-conf)", input - ); - spent_zero_conf_commitments.push(input.commitment()?); - index - }, - None => return Err(ChainStorageError::UnspendableInput), - }, - }; - - let features = input.features()?; - if let Some(vn_reg) = features - .sidechain_feature - .as_ref() - .and_then(|f| f.validator_node_registration()) - { - self.validator_node_store(txn) - .delete(header.height, vn_reg.public_key(), input.commitment()?)?; - } - - if !output_mmr.delete(index) { - return Err(ChainStorageError::InvalidOperation(format!( - "Could not delete index {} from the output MMR", - index - ))); - } - trace!( - target: LOG_TARGET, - "Inserting input (`{}`, `{}`)", - input.commitment()?.to_hex(), - input.output_hash().to_hex() - ); - self.insert_input(txn, current_header_at_height.height, &block_hash, input, index)?; - } + let k = MetadataKey::TipSmt; + let mut output_smt: OutputSmt = + lmdb_get(&txn, &self.tip_utxo_smt, &k.as_u32())?.ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "Output_smt", + field: "tip", + value: "".to_string(), + })?; - for (output, mmr_count) in outputs { + for output in outputs { trace!( target: LOG_TARGET, "Inserting output (`{}`, `{}`)", output.commitment.to_hex(), output.hash() ); - let mmr_count = u32::try_from(mmr_count).map(|c| c - 1).map_err(|_| { - ChainStorageError::InvalidOperation(format!( - "Output MMR node count ({}) is greater than u32::MAX", - mmr_count - )) - })?; + if !output.is_burned() { + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(header.height).as_slice())?; + output_smt.insert(smt_key, smt_node)?; + } let output_hash = output.hash(); if let Some(vn_reg) = output @@ -1370,55 +1171,47 @@ impl LMDBDatabase { self.insert_template_registration(txn, &record)?; } - self.insert_output( - txn, - &block_hash, - header.height, - &output, - mmr_count, - header.timestamp().as_u64(), - )?; + self.insert_output(txn, &block_hash, header.height, header.timestamp().as_u64(), &output)?; } - for commitment in spent_zero_conf_commitments { - lmdb_delete( - txn, - &self.utxo_commitment_index, - commitment.as_bytes(), - "utxo_commitment_index", - )?; - } - for commitment in burned_outputs { - lmdb_delete( + // unique_id_index expects inputs to be inserted before outputs + for input in &inputs { + let smt_key = NodeKey::try_from(input.commitment()?.as_bytes())?; + match output_smt.delete(&smt_key)? { + DeleteResult::Deleted(_value_hash) => {}, + DeleteResult::KeyNotFound => return Err(ChainStorageError::UnspendableInput), + }; + + let features = input.features()?; + if let Some(vn_reg) = features + .sidechain_feature + .as_ref() + .and_then(|f| f.validator_node_registration()) + { + self.validator_node_store(txn) + .delete(header.height, vn_reg.public_key(), input.commitment()?)?; + } + trace!( + target: LOG_TARGET, + "Inserting input (`{}`, `{}`)", + input.commitment()?.to_hex(), + input.output_hash().to_hex() + ); + self.insert_input( txn, - &self.utxo_commitment_index, - commitment.as_bytes(), - "utxo_commitment_index", + current_header_at_height.height, + current_header_at_height.timestamp.as_u64(), + &block_hash, + input, )?; } - // Merge current deletions with the tip bitmap - let deleted_at_current_height = output_mmr.deleted().clone(); - // Merge the new indexes with the blockchain deleted bitmap - let mut deleted_bitmap = self.load_deleted_bitmap_model(txn)?; - deleted_bitmap.merge(&deleted_at_current_height)?; - - // Set the output MMR to the complete map so that the complete state can be committed to in the final MR - output_mmr.set_deleted(deleted_bitmap.get().clone().into_bitmap()); - output_mmr.compress(); - - // Save the bitmap - deleted_bitmap.save()?; self.insert_block_accumulated_data( txn, header.height, - &BlockAccumulatedData::new( - kernel_mmr.get_pruned_hash_set()?, - output_mmr.mmr().get_pruned_hash_set()?, - deleted_at_current_height, - total_kernel_sum, - ), + &BlockAccumulatedData::new(kernel_mmr.get_pruned_hash_set()?, total_kernel_sum), )?; + self.insert_tip_smt(txn, &output_smt)?; Ok(()) } @@ -1485,6 +1278,11 @@ impl LMDBDatabase { ) } + fn insert_tip_smt(&self, txn: &WriteTransaction<'_>, smt: &OutputSmt) -> Result<(), ChainStorageError> { + let k = MetadataKey::TipSmt; + lmdb_replace(txn, &self.tip_utxo_smt, &k.as_u32(), smt) + } + fn update_block_accumulated_data( &self, write_txn: &WriteTransaction<'_>, @@ -1501,33 +1299,17 @@ impl LMDBDatabase { .fetch_block_accumulated_data(write_txn, height)? .unwrap_or_default(); - if let Some(deleted_diff) = values.deleted_diff { - block_accum_data.deleted = deleted_diff; - } if let Some(kernel_sum) = values.kernel_sum { block_accum_data.kernel_sum = kernel_sum; } if let Some(kernel_hash_set) = values.kernel_hash_set { block_accum_data.kernels = kernel_hash_set; } - if let Some(utxo_hash_set) = values.utxo_hash_set { - block_accum_data.outputs = utxo_hash_set; - } lmdb_replace(write_txn, &self.block_accumulated_data_db, &height, &block_accum_data)?; Ok(()) } - fn load_deleted_bitmap_model<'a, 'b, T>( - &'a self, - txn: &'a T, - ) -> Result, ChainStorageError> - where - T: Deref>, - { - DeletedBitmapModel::load(txn, &self.metadata_db) - } - fn insert_monero_seed_height( &self, write_txn: &WriteTransaction<'_>, @@ -1551,41 +1333,29 @@ impl LMDBDatabase { Ok(()) } - fn prune_outputs_at_positions( + fn prune_outputs_spent_at_hash( &self, write_txn: &WriteTransaction<'_>, - output_positions: &[u32], + block_hash: &HashOutput, ) -> Result<(), ChainStorageError> { - for pos in output_positions { - let (_height, hash) = lmdb_first_after::<_, (u64, Vec)>( + let inputs = + lmdb_fetch_matching_after::(&write_txn, &self.inputs_db, block_hash.as_slice())?; + + for input in inputs { + lmdb_delete( write_txn, - &self.output_mmr_size_index, - &u64::from(pos + 1).to_be_bytes(), - ) - .or_not_found("BlockHeader", "mmr_position", pos.to_string())?; - let key = OutputKey::try_from_parts(&[hash.as_slice(), pos.to_be_bytes().as_slice()])?; - debug!(target: LOG_TARGET, "Pruning output: {}", key); - self.prune_output(write_txn, &key)?; + &self.txos_hash_to_index_db, + input.hash.as_slice(), + "utxos_db", + )?; + let key = OutputKey::new(&block_hash, &input.hash)?; + debug!(target: LOG_TARGET, "Pruning output: {:?}", key); + self.prune_output(write_txn, key)?; } Ok(()) } - #[allow(clippy::ptr_arg)] - fn fetch_mmr_leaf_index( - &self, - txn: &ConstTransaction<'_>, - tree: MmrTree, - hash: &HashOutput, - ) -> Result, ChainStorageError> { - match tree { - MmrTree::Utxo => Ok( - lmdb_get::<_, (u32, Vec)>(txn, &self.txos_hash_to_index_db, hash.deref())?.map(|(index, _)| index), - ), - _ => unimplemented!(), - } - } - #[allow(clippy::ptr_arg)] fn fetch_orphan(&self, txn: &ConstTransaction<'_>, hash: &HashOutput) -> Result, ChainStorageError> { let val: Option = lmdb_get(txn, &self.orphans_db, hash.deref())?; @@ -1673,51 +1443,72 @@ impl LMDBDatabase { txn: &ConstTransaction<'_>, output_hash: &[u8], ) -> Result, ChainStorageError> { - if let Some((index, key)) = lmdb_get::<_, (u32, Vec)>(txn, &self.txos_hash_to_index_db, output_hash)? { + if let Some(key) = lmdb_get::<_, Vec>(txn, &self.txos_hash_to_index_db, output_hash)? { debug!( target: LOG_TARGET, - "Fetch output: {} Found ({}, {})", + "Fetch output: {} Found ({})", to_hex(output_hash), - index, key.to_hex() ); match lmdb_get::<_, TransactionOutputRowData>(txn, &self.utxos_db, &key)? { Some(TransactionOutputRowData { - output: Some(o), - mmr_position, + output: o, mined_height, header_hash, mined_timestamp, .. }) => Ok(Some(UtxoMinedInfo { - output: PrunedOutput::NotPruned { output: o }, - mmr_position, + output: o, mined_height, header_hash, mined_timestamp, })), - Some(TransactionOutputRowData { - output: None, - mmr_position, - mined_height, - hash, + + _ => Ok(None), + } + } else { + debug!( + target: LOG_TARGET, + "Fetch output: {} NOT found in index", + to_hex(output_hash) + ); + Ok(None) + } + } + + fn fetch_input_in_txn( + &self, + txn: &ConstTransaction<'_>, + input_hash: &[u8], + ) -> Result, ChainStorageError> { + if let Some(key) = lmdb_get::<_, Vec>(txn, &self.deleted_txo_hash_to_header_index, input_hash)? { + debug!( + target: LOG_TARGET, + "Fetch input: {} Found ({})", + to_hex(input_hash), + key.to_hex() + ); + match lmdb_get::<_, TransactionInputRowData>(txn, &self.utxos_db, &key)? { + Some(TransactionInputRowData { + input: i, + height, header_hash, mined_timestamp, .. - }) => Ok(Some(UtxoMinedInfo { - output: PrunedOutput::Pruned { output_hash: hash }, - mmr_position, - mined_height, + }) => Ok(Some(TxoMinedInfo { + input: i, + spent_height: height, header_hash, - mined_timestamp, + spent_timestamp: mined_timestamp, })), + _ => Ok(None), } } else { debug!( target: LOG_TARGET, - "Fetch output: {} NOT found in index", - to_hex(output_hash) + "Fetch input: {} NOT found in index", + to_hex(input_hash) ); Ok(None) } @@ -1966,43 +1757,6 @@ impl BlockchainBackend for LMDBDatabase { Ok(chain_header) } - fn fetch_header_containing_utxo_mmr(&self, mmr_position: u64) -> Result { - let txn = self.read_transaction()?; - // LMDB returns the height at the position, so we have to offset the position by 1 so that the mmr_position arg - // is an index starting from 0 - let mmr_position = mmr_position + 1; - - let (height, _hash) = - lmdb_first_after::<_, (u64, Vec)>(&txn, &self.output_mmr_size_index, &mmr_position.to_be_bytes())? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "output_mmr_size_index", - field: "mmr_position", - value: mmr_position.to_string(), - })?; - - let header: BlockHeader = - lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader", - field: "height", - value: height.to_string(), - })?; - let accum_data = self - .fetch_header_accumulated_data_by_height(&txn, height)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData", - field: "height", - value: height.to_string(), - })?; - - let chain_header = ChainHeader::try_construct(header, accum_data).ok_or_else(|| { - ChainStorageError::DataInconsistencyDetected { - function: "fetch_header_containing_utxo_mmr", - details: format!("Accumulated data mismatch at height #{}", height), - } - })?; - Ok(chain_header) - } - fn is_empty(&self) -> Result { let txn = self.read_transaction()?; Ok(lmdb_len(&txn, &self.headers_db)? == 0) @@ -2062,45 +1816,38 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_utxos_in_block( &self, header_hash: &HashOutput, - deleted: Option<&Bitmap>, - ) -> Result<(Vec, Bitmap), ChainStorageError> { + spend_header: Option, + ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - let utxos = lmdb_fetch_matching_after::(&txn, &self.utxos_db, header_hash.deref())? - .into_iter() - .map(|row| { - if deleted.map(|b| b.contains(row.mmr_position)).unwrap_or(false) { - return PrunedOutput::Pruned { output_hash: row.hash }; - } - if let Some(output) = row.output { - PrunedOutput::NotPruned { output } - } else { - PrunedOutput::Pruned { output_hash: row.hash } + let mut utxos: Vec<(TransactionOutput, bool)> = + lmdb_fetch_matching_after::(&txn, &self.utxos_db, header_hash.deref())? + .into_iter() + .map(|row| (row.output, false)) + .collect(); + if let Some(header) = spend_header { + let header_height = + self.fetch_height_from_hash(&txn, header_hash)? + .ok_or(ChainStorageError::ValueNotFound { + entity: "Header", + field: "hash", + value: header.to_hex(), + })?; + for utxo in &mut utxos { + let hash = utxo.0.hash(); + match lmdb_get::<_, (u64, Vec)>(&txn, &self.deleted_txo_hash_to_header_index, hash.as_slice())? { + Some((height, _)) => { + if height <= header_height { + // we know its spend at the header height specified as optional in the fn + utxo.1 = true; + } + }, + _ => {}, } - }) - .collect(); - - let height = - self.fetch_height_from_hash(&txn, header_hash)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader", - field: "hash", - value: header_hash.to_hex(), - })?; - - // Builds a BitMap of the deleted UTXO MMR indexes that occurred at the current height - let acc_data = - self.fetch_block_accumulated_data(&txn, height)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData", - field: "height", - value: height.to_string(), - })?; - - let mut difference_bitmap = Bitmap::create(); - difference_bitmap.or_inplace(acc_data.deleted()); + } + } - Ok((utxos, difference_bitmap)) + Ok(utxos) } fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { @@ -2109,6 +1856,12 @@ impl BlockchainBackend for LMDBDatabase { self.fetch_output_in_txn(&txn, output_hash.as_slice()) } + fn fetch_input(&self, input_hash: &HashOutput) -> Result, ChainStorageError> { + debug!(target: LOG_TARGET, "Fetch input: {}", input_hash.to_hex()); + let txn = self.read_transaction()?; + self.fetch_input_in_txn(&txn, input_hash.as_slice()) + } + fn fetch_unspent_output_hash_by_commitment( &self, commitment: &Commitment, @@ -2117,15 +1870,9 @@ impl BlockchainBackend for LMDBDatabase { lmdb_get::<_, HashOutput>(&txn, &self.utxo_commitment_index, commitment.as_bytes()) } - fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - Ok(lmdb_fetch_matching_after(&txn, &self.utxos_db, header_hash.as_slice())? - .into_iter() - .map(|f: TransactionOutputRowData| match f.output { - Some(o) => PrunedOutput::NotPruned { output: o }, - None => PrunedOutput::Pruned { output_hash: f.hash }, - }) - .collect()) + Ok(lmdb_fetch_matching_after(&txn, &self.utxos_db, header_hash.as_slice())?) } fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { @@ -2142,15 +1889,9 @@ impl BlockchainBackend for LMDBDatabase { let txn = self.read_transaction()?; match tree { MmrTree::Kernel => Ok(lmdb_len(&txn, &self.kernels_db)? as u64), - MmrTree::Utxo => Ok(lmdb_len(&txn, &self.utxos_db)? as u64), } } - fn fetch_mmr_leaf_index(&self, tree: MmrTree, hash: &HashOutput) -> Result, ChainStorageError> { - let txn = self.read_transaction()?; - self.fetch_mmr_leaf_index(&txn, tree, hash) - } - /// Returns the number of blocks in the block orphan pool. fn orphan_count(&self) -> Result { let txn = self.read_transaction()?; @@ -2364,24 +2105,6 @@ impl BlockchainBackend for LMDBDatabase { } } - fn fetch_deleted_bitmap(&self) -> Result { - let txn = self.read_transaction()?; - let deleted_bitmap = self.load_deleted_bitmap_model(&txn)?; - Ok(deleted_bitmap.get().clone()) - } - - fn fetch_header_hash_by_deleted_mmr_positions( - &self, - mmr_positions: Vec, - ) -> Result>, ChainStorageError> { - let txn = self.read_transaction()?; - - mmr_positions - .iter() - .map(|pos| lmdb_get(&txn, &self.deleted_txo_mmr_position_to_height_index, pos)) - .collect() - } - fn delete_oldest_orphans( &mut self, horizon_height: u64, @@ -2549,6 +2272,20 @@ impl BlockchainBackend for LMDBDatabase { } Ok(result) } + + fn fetch_tip_smt(&self) -> Result { + let txn = self.read_transaction()?; + let k = MetadataKey::TipSmt; + let val: Option = lmdb_get(&txn, &self.tip_utxo_smt, &k.as_u32())?; + match val { + Some(smt) => Ok(smt), + _ => Err(ChainStorageError::ValueNotFound { + entity: "TipSmt", + field: "TipSmt", + value: "".to_string(), + }), + } + } } // Fetch the chain metadata @@ -2646,21 +2383,6 @@ fn fetch_accumulated_work(txn: &ConstTransaction<'_>, db: &Database) -> Result, db: &Database) -> Result { - let k = MetadataKey::DeletedBitmap.as_u32(); - let val: Option = lmdb_get(txn, db, &k)?; - match val { - Some(MetadataValue::DeletedBitmap(bitmap)) => Ok(bitmap), - None => Ok(Bitmap::create().into()), - _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata", - field: "DeletedBitmap", - value: "".to_string(), - }), - } -} - // Fetches the pruning horizon from the provided metadata db. fn fetch_pruning_horizon(txn: &ConstTransaction<'_>, db: &Database) -> Result { let k = MetadataKey::PruningHorizon; @@ -2686,9 +2408,9 @@ enum MetadataKey { PruningHorizon, PrunedHeight, HorizonData, - DeletedBitmap, BestBlockTimestamp, MigrationVersion, + TipSmt, } impl MetadataKey { @@ -2707,9 +2429,9 @@ impl fmt::Display for MetadataKey { MetadataKey::PrunedHeight => write!(f, "Effective pruned height"), MetadataKey::BestBlock => write!(f, "Chain tip block hash"), MetadataKey::HorizonData => write!(f, "Database info"), - MetadataKey::DeletedBitmap => write!(f, "Deleted bitmap"), MetadataKey::BestBlockTimestamp => write!(f, "Chain tip block timestamp"), MetadataKey::MigrationVersion => write!(f, "Migration version"), + MetadataKey::TipSmt => write!(f, "Chain tip Sparse Merkle Tree version"), } } } @@ -2723,7 +2445,6 @@ enum MetadataValue { PruningHorizon(u64), PrunedHeight(u64), HorizonData(HorizonData), - DeletedBitmap(DeletedBitmap), BestBlockTimestamp(u64), MigrationVersion(u64), } @@ -2737,78 +2458,12 @@ impl fmt::Display for MetadataValue { MetadataValue::PrunedHeight(height) => write!(f, "Effective pruned height is {}", height), MetadataValue::BestBlock(hash) => write!(f, "Chain tip block hash is {}", hash.to_hex()), MetadataValue::HorizonData(_) => write!(f, "Horizon data"), - MetadataValue::DeletedBitmap(deleted) => { - write!(f, "Deleted Bitmap ({} indexes)", deleted.bitmap().cardinality()) - }, MetadataValue::BestBlockTimestamp(timestamp) => write!(f, "Chain tip block timestamp is {}", timestamp), MetadataValue::MigrationVersion(n) => write!(f, "Migration version {}", n), } } } -/// A struct that wraps a LMDB transaction and provides an interface to valid operations that can be performed -/// on the current deleted bitmap state of the blockchain. -/// A deleted bitmap contains the MMR leaf indexes of spent TXOs. -struct DeletedBitmapModel<'a, T> { - txn: &'a T, - db: &'a Database<'static>, - bitmap: DeletedBitmap, - is_dirty: bool, -} - -impl<'a, 'b, T> DeletedBitmapModel<'a, T> -where T: Deref> -{ - pub fn load(txn: &'a T, db: &'a Database<'static>) -> Result { - let bitmap = fetch_deleted_bitmap(txn, db)?; - Ok(Self { - txn, - db, - bitmap, - is_dirty: false, - }) - } - - /// Returns a reference to the `DeletedBitmap` - pub fn get(&self) -> &DeletedBitmap { - &self.bitmap - } -} - -impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { - /// Merge (union) the given bitmap into this instance. - /// `finish` must be called to persist the bitmap. - pub fn merge(&mut self, deleted: &Bitmap) -> Result<&mut Self, ChainStorageError> { - self.bitmap.bitmap_mut().or_inplace(deleted); - self.is_dirty = true; - Ok(self) - } - - /// Remove (difference) the given bitmap from this instance. - /// `finish` must be called to persist the bitmap. - pub fn remove(&mut self, deleted: &Bitmap) -> Result<&mut Self, ChainStorageError> { - self.bitmap.bitmap_mut().andnot_inplace(deleted); - self.is_dirty = true; - Ok(self) - } - - /// Persist the bitmap if required. This is a no-op if the bitmap has not been modified. - pub fn save(mut self) -> Result<(), ChainStorageError> { - if !self.is_dirty { - return Ok(()); - } - - self.bitmap.bitmap_mut().run_optimize(); - lmdb_replace( - self.txn, - self.db, - &MetadataKey::DeletedBitmap.as_u32(), - &MetadataValue::DeletedBitmap(self.bitmap), - )?; - Ok(()) - } -} - fn run_migrations(db: &LMDBDatabase) -> Result<(), ChainStorageError> { const MIGRATION_VERSION: u64 = 1; let txn = db.read_transaction()?; diff --git a/base_layer/core/src/chain_storage/lmdb_db/mod.rs b/base_layer/core/src/chain_storage/lmdb_db/mod.rs index 971015b386..b2136f2c0b 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/mod.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/mod.rs @@ -37,9 +37,8 @@ mod validator_node_store; #[derive(Serialize, Deserialize, Debug)] pub(crate) struct TransactionOutputRowData { - pub output: Option, + pub output: TransactionOutput, pub header_hash: HashOutput, - pub mmr_position: u32, pub hash: HashOutput, pub mined_height: u64, pub mined_timestamp: u64, @@ -52,7 +51,8 @@ pub(crate) struct TransactionInputRowDataRef<'a> { pub input: &'a TransactionInput, #[allow(clippy::ptr_arg)] pub header_hash: &'a HashOutput, - pub mmr_position: u32, + pub mined_timestamp: u64, + pub height: u64, #[allow(clippy::ptr_arg)] pub hash: &'a HashOutput, } @@ -61,7 +61,8 @@ pub(crate) struct TransactionInputRowDataRef<'a> { pub(crate) struct TransactionInputRowData { pub input: TransactionInput, pub header_hash: HashOutput, - pub mmr_position: u32, + pub mined_timestamp: u64, + pub height: u64, pub hash: HashOutput, } diff --git a/base_layer/core/src/chain_storage/mmr_tree.rs b/base_layer/core/src/chain_storage/mmr_tree.rs index c236624410..0e103ca6a7 100644 --- a/base_layer/core/src/chain_storage/mmr_tree.rs +++ b/base_layer/core/src/chain_storage/mmr_tree.rs @@ -26,14 +26,12 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] pub enum MmrTree { - Utxo, Kernel, } impl Display for MmrTree { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { match self { - MmrTree::Utxo => f.write_str("UTXO"), MmrTree::Kernel => f.write_str("Kernel"), } } diff --git a/base_layer/core/src/chain_storage/mod.rs b/base_layer/core/src/chain_storage/mod.rs index be300dac4b..de617c1799 100644 --- a/base_layer/core/src/chain_storage/mod.rs +++ b/base_layer/core/src/chain_storage/mod.rs @@ -66,9 +66,6 @@ pub use error::{ChainStorageError, Optional, OrNotFound}; mod horizon_data; pub use horizon_data::HorizonData; -mod pruned_output; -pub use pruned_output::PrunedOutput; - mod reorg; pub use reorg::Reorg; diff --git a/base_layer/core/src/chain_storage/pruned_output.rs b/base_layer/core/src/chain_storage/pruned_output.rs deleted file mode 100644 index 4ff2c0a142..0000000000 --- a/base_layer/core/src/chain_storage/pruned_output.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2021, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use serde::{Deserialize, Serialize}; -use tari_common_types::types::{FixedHash, HashOutput}; - -use crate::transactions::transaction_components::TransactionOutput; - -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum PrunedOutput { - Pruned { output_hash: HashOutput }, - NotPruned { output: TransactionOutput }, -} - -impl PrunedOutput { - pub fn is_pruned(&self) -> bool { - matches!(self, PrunedOutput::Pruned { .. }) - } - - pub fn hash(&self) -> FixedHash { - match self { - PrunedOutput::Pruned { output_hash } => *output_hash, - PrunedOutput::NotPruned { output } => output.hash(), - } - } - - pub fn as_transaction_output(&self) -> Option<&TransactionOutput> { - match self { - PrunedOutput::Pruned { .. } => None, - PrunedOutput::NotPruned { output } => Some(output), - } - } - - pub fn into_unpruned_output(self) -> Option { - match self { - PrunedOutput::Pruned { .. } => None, - PrunedOutput::NotPruned { output } => Some(output), - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - impl PrunedOutput { - pub fn sample() -> Self { - Self::Pruned { - output_hash: FixedHash::zero(), - } - } - } - - #[test] - fn coverage_pruned_output() { - let obj = PrunedOutput::sample(); - assert!(obj.is_pruned()); - drop(obj.clone()); - format!("{:?}", obj); - obj.hash(); - obj.as_transaction_output(); - obj.into_unpruned_output(); - } -} diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 103a67d2ef..d9b12a547d 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -67,7 +67,7 @@ fn apply_mmr_to_block(db: &BlockchainDatabase, block: Block) -> Bl let (mut block, mmr_roots) = db.calculate_mmr_roots(block).unwrap(); block.header.input_mr = mmr_roots.input_mr; block.header.output_mr = mmr_roots.output_mr; - block.header.output_mmr_size = mmr_roots.output_mmr_size; + block.header.output_smt_size = mmr_roots.output_smt_size; block.header.kernel_mr = mmr_roots.kernel_mr; block.header.kernel_mmr_size = mmr_roots.kernel_mmr_size; block.header.validator_node_mr = mmr_roots.validator_node_mr; @@ -80,12 +80,7 @@ async fn add_many_chained_blocks( key_manager: &TestKeyManager, ) -> (Vec>, Vec) { let last_header = db.fetch_last_header().unwrap(); - let mut prev_block = db - .fetch_block(last_header.height, true) - .unwrap() - .try_into_block() - .map(Arc::new) - .unwrap(); + let mut prev_block = Arc::new(db.fetch_block(last_header.height, true).unwrap().into_block()); let mut blocks = Vec::with_capacity(size); let mut outputs = Vec::with_capacity(size); for _ in 1..=size { @@ -405,7 +400,7 @@ mod fetch_total_size_stats { #[tokio::test] async fn it_measures_the_number_of_entries() { let db = setup(); - let genesis_output_count = db.fetch_header(0).unwrap().unwrap().output_mmr_size; + let genesis_output_count = db.fetch_header(0).unwrap().unwrap().output_smt_size; let key_manager = create_test_core_key_manager_with_memory_db(); let _block_and_outputs = add_many_chained_blocks(2, &db, &key_manager).await; let stats = db.fetch_total_size_stats().unwrap(); @@ -456,47 +451,6 @@ mod prepare_new_block { } } -mod fetch_header_containing_utxo_mmr { - use super::*; - - #[test] - fn it_returns_genesis() { - let db = setup(); - let genesis = db.fetch_block(0, true).unwrap(); - assert!(!genesis.block().body.outputs().is_empty()); - let mut mmr_position = 0; - genesis.block().body.outputs().iter().for_each(|_| { - let header = db.fetch_header_containing_utxo_mmr(mmr_position).unwrap(); - assert_eq!(header.height(), 0); - mmr_position += 1; - }); - let err = db.fetch_header_containing_utxo_mmr(mmr_position).unwrap_err(); - matches!(err, ChainStorageError::ValueNotFound { .. }); - } - - #[tokio::test] - async fn it_returns_corresponding_header() { - let db = setup(); - let genesis = db.fetch_block(0, true).unwrap(); - let key_manager = create_test_core_key_manager_with_memory_db(); - let _block_and_outputs = add_many_chained_blocks(5, &db, &key_manager).await; - let num_genesis_outputs = genesis.block().body.outputs().len() as u64; - - let header = db.fetch_header_containing_utxo_mmr(num_genesis_outputs - 1).unwrap(); - assert_eq!(header.height(), 0); - - for i in 1..=5 { - let index = num_genesis_outputs + i - 1; - let header = db.fetch_header_containing_utxo_mmr(index).unwrap(); - assert_eq!(header.height(), i, "Incorrect header for MMR index = {}", index); - } - let err = db - .fetch_header_containing_utxo_mmr(num_genesis_outputs + 5) - .unwrap_err(); - matches!(err, ChainStorageError::ValueNotFound { .. }); - } -} - mod fetch_header_containing_kernel_mmr { use super::*; @@ -580,7 +534,7 @@ mod clear_all_pending_headers { .map(|_| { let mut header = BlockHeader::from_previous(prev_header.header()); header.kernel_mmr_size += 1; - header.output_mmr_size += 1; + header.output_smt_size += 1; let accum = BlockHeaderAccumulatedData::builder(&prev_accum) .with_hash(header.hash()) .with_achieved_target_difficulty( diff --git a/base_layer/core/src/chain_storage/utxo_mined_info.rs b/base_layer/core/src/chain_storage/utxo_mined_info.rs index f7757cd37f..c8777d13d7 100644 --- a/base_layer/core/src/chain_storage/utxo_mined_info.rs +++ b/base_layer/core/src/chain_storage/utxo_mined_info.rs @@ -23,39 +23,20 @@ use serde::{Deserialize, Serialize}; use tari_common_types::types::BlockHash; -use crate::chain_storage::PrunedOutput; +use crate::transactions::transaction_components::{TransactionInput, TransactionOutput}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct UtxoMinedInfo { - pub output: PrunedOutput, - pub mmr_position: u32, + pub output: TransactionOutput, pub mined_height: u64, pub header_hash: BlockHash, pub mined_timestamp: u64, } -#[cfg(test)] -mod test { - use tari_common_types::types::FixedHash; - - use super::*; - - impl UtxoMinedInfo { - pub fn sample() -> Self { - Self { - output: PrunedOutput::sample(), - mmr_position: 0, - mined_height: 0, - header_hash: FixedHash::zero(), - mined_timestamp: 0, - } - } - } - - #[test] - fn coverage_utxo_mined_info() { - let obj = UtxoMinedInfo::sample(); - drop(obj.clone()); - format!("{:?}", obj); - } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TxoMinedInfo { + pub input: TransactionInput, + pub spent_height: u64, + pub header_hash: BlockHash, + pub spent_timestamp: u64, } diff --git a/base_layer/core/src/lib.rs b/base_layer/core/src/lib.rs index b9b85910bb..cf4ca9672a 100644 --- a/base_layer/core/src/lib.rs +++ b/base_layer/core/src/lib.rs @@ -74,7 +74,13 @@ mod domain_hashing { use blake2::Blake2b; use digest::consts::U32; use tari_crypto::{hash_domain, hashing::DomainSeparatedHasher}; - use tari_mmr::{pruned_hashset::PrunedHashSet, BalancedBinaryMerkleTree, Hash, MerkleMountainRange, MutableMmr}; + use tari_mmr::{ + pruned_hashset::PrunedHashSet, + sparse_merkle_tree::SparseMerkleTree, + BalancedBinaryMerkleTree, + Hash, + MerkleMountainRange, + }; hash_domain!(KernelMmrHashDomain, "com.tari.base_layer.core.kernel_mmr", 1); @@ -82,16 +88,15 @@ mod domain_hashing { pub type KernelMmr = MerkleMountainRange>; pub type PrunedKernelMmr = MerkleMountainRange; - hash_domain!(OutputMmrHashDomain, "com.tari.base_layer.core.output_mmr", 1); - pub type OutputMmrHasherBlake256 = DomainSeparatedHasher, OutputMmrHashDomain>; - pub type MutableOutputMmr = MutableMmr>; - pub type PrunedOutputMmr = MerkleMountainRange; - pub type MutablePrunedOutputMmr = MutableMmr; + hash_domain!(OutputSmtHashDomain, "com.tari.base_layer.core.output_smt", 1); + pub type OutputSmtHasherBlake256 = DomainSeparatedHasher, OutputSmtHashDomain>; hash_domain!(InputMmrHashDomain, "com.tari.base_layer.core.input_mmr", 1); pub type InputMmrHasherBlake256 = DomainSeparatedHasher, InputMmrHashDomain>; pub type PrunedInputMmr = MerkleMountainRange; + pub type OutputSmt = SparseMerkleTree; + hash_domain!( ValidatorNodeBmtHashDomain, "com.tari.base_layer.core.validator_node_mmr", diff --git a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs index a79ae6604f..8cddc47cba 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs @@ -326,7 +326,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -382,7 +382,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -434,7 +434,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -485,7 +485,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -540,7 +540,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -599,7 +599,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -650,7 +650,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), @@ -692,7 +692,7 @@ mod test { prev_hash: FixedHash::zero(), timestamp: EpochTime::now(), output_mr: FixedHash::zero(), - output_mmr_size: 0, + output_smt_size: 0, kernel_mr: FixedHash::zero(), kernel_mmr_size: 0, input_mr: FixedHash::zero(), diff --git a/base_layer/core/src/proto/block.proto b/base_layer/core/src/proto/block.proto index 96a1ab1b5c..7b5d6f6f70 100644 --- a/base_layer/core/src/proto/block.proto +++ b/base_layer/core/src/proto/block.proto @@ -88,8 +88,6 @@ message HistoricalBlock { Block block = 3; // Accumulated and other pertinent data in the block header acting as a "condensed blockchain snapshot" for the block BlockHeaderAccumulatedData accumulated_data = 4; - repeated bytes pruned_output_hashes = 5; - uint64 pruned_input_count = 7; } // Accumulated and other pertinent data in the block header acting as a "condensed blockchain snapshot" for the block diff --git a/base_layer/core/src/proto/block.rs b/base_layer/core/src/proto/block.rs index 256f23f279..f1d4b96637 100644 --- a/base_layer/core/src/proto/block.rs +++ b/base_layer/core/src/proto/block.rs @@ -22,7 +22,7 @@ use std::convert::{TryFrom, TryInto}; -use tari_common_types::types::{FixedHash, PrivateKey}; +use tari_common_types::types::PrivateKey; use tari_utilities::ByteArray; use super::core as proto; @@ -78,18 +78,10 @@ impl TryFrom for HistoricalBlock { .map(TryInto::try_into) .ok_or_else(|| "accumulated_data in historical block not provided".to_string())??; - let output_hashes: Vec = historical_block - .pruned_output_hashes - .into_iter() - .map(|hash| hash.try_into().map_err(|_| "Malformed pruned hash".to_string())) - .collect::>()?; - Ok(HistoricalBlock::new( block, historical_block.confirmations, accumulated_data, - output_hashes, - historical_block.pruned_input_count, )) } } @@ -98,14 +90,11 @@ impl TryFrom for proto::HistoricalBlock { type Error = String; fn try_from(block: HistoricalBlock) -> Result { - let pruned_output_hashes = block.pruned_outputs().iter().map(|x| x.to_vec()).collect(); - let (block, accumulated_data, confirmations, pruned_input_count) = block.dissolve(); + let (block, accumulated_data, confirmations) = block.dissolve(); Ok(Self { confirmations, accumulated_data: Some(accumulated_data.into()), block: Some(block.try_into()?), - pruned_output_hashes, - pruned_input_count, }) } } diff --git a/base_layer/core/src/proto/block_header.rs b/base_layer/core/src/proto/block_header.rs index 05cb5ecea1..6cafd8b278 100644 --- a/base_layer/core/src/proto/block_header.rs +++ b/base_layer/core/src/proto/block_header.rs @@ -51,7 +51,7 @@ impl TryFrom for BlockHeader { prev_hash: FixedHash::try_from(header.prev_hash).map_err(|err| err.to_string())?, timestamp: EpochTime::from(header.timestamp), output_mr: FixedHash::try_from(header.output_mr).map_err(|err| err.to_string())?, - output_mmr_size: header.output_mmr_size, + output_smt_size: header.output_mmr_size, kernel_mr: FixedHash::try_from(header.kernel_mr).map_err(|err| err.to_string())?, kernel_mmr_size: header.kernel_mmr_size, input_mr: FixedHash::try_from(header.input_mr).map_err(|err| err.to_string())?, @@ -79,7 +79,7 @@ impl From for proto::BlockHeader { nonce: header.nonce, pow: Some(proto::ProofOfWork::from(header.pow)), kernel_mmr_size: header.kernel_mmr_size, - output_mmr_size: header.output_mmr_size, + output_mmr_size: header.output_smt_size, validator_node_merkle_root: header.validator_node_mr.to_vec(), } } diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index b47f3bdadd..c1836f3dd8 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -28,26 +28,17 @@ use std::{ sync::Arc, }; -use croaring::Bitmap; use tari_common::configuration::Network; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, HashOutput, PublicKey, Signature}, + types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, }; use tari_storage::lmdb_store::LMDBConfig; use tari_test_utils::paths::create_temporary_data_path; use super::{create_block, mine_to_difficulty}; use crate::{ - blocks::{ - Block, - BlockAccumulatedData, - BlockHeader, - BlockHeaderAccumulatedData, - ChainBlock, - ChainHeader, - DeletedBitmap, - }, + blocks::{Block, BlockAccumulatedData, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader}, chain_storage::{ create_lmdb_database, BlockAddResult, @@ -63,9 +54,9 @@ use crate::{ HorizonData, LMDBDatabase, MmrTree, - PrunedOutput, Reorg, TemplateRegistrationEntry, + TxoMinedInfo, UtxoMinedInfo, Validators, }, @@ -74,7 +65,7 @@ use crate::{ test_helpers::{block_spec::BlockSpecs, create_consensus_rules, BlockSpec}, transactions::{ test_helpers::{create_test_core_key_manager_with_memory_db, TestKeyManager}, - transaction_components::{TransactionInput, TransactionKernel, WalletOutput}, + transaction_components::{TransactionInput, TransactionKernel, TransactionOutput, WalletOutput}, CryptoFactories, }, validation::{ @@ -82,6 +73,7 @@ use crate::{ mocks::MockValidator, DifficultyCalculator, }, + OutputSmt, }; /// Create a new blockchain database containing the genesis block @@ -246,10 +238,6 @@ impl BlockchainBackend for TempDatabase { .fetch_header_containing_kernel_mmr(mmr_position) } - fn fetch_header_containing_utxo_mmr(&self, mmr_position: u64) -> Result { - self.db.as_ref().unwrap().fetch_header_containing_utxo_mmr(mmr_position) - } - fn is_empty(&self) -> Result { self.db.as_ref().unwrap().is_empty() } @@ -282,15 +270,22 @@ impl BlockchainBackend for TempDatabase { fn fetch_utxos_in_block( &self, header_hash: &HashOutput, - deleted: Option<&Bitmap>, - ) -> Result<(Vec, Bitmap), ChainStorageError> { - self.db.as_ref().unwrap().fetch_utxos_in_block(header_hash, deleted) + spend_header: Option, + ) -> Result, ChainStorageError> { + self.db + .as_ref() + .unwrap() + .fetch_utxos_in_block(header_hash, spend_header) } fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_output(output_hash) } + fn fetch_input(&self, input_hash: &HashOutput) -> Result, ChainStorageError> { + self.db.as_ref().unwrap().fetch_input(input_hash) + } + fn fetch_unspent_output_hash_by_commitment( &self, commitment: &Commitment, @@ -301,7 +296,7 @@ impl BlockchainBackend for TempDatabase { .fetch_unspent_output_hash_by_commitment(commitment) } - fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { self.db.as_ref().unwrap().fetch_outputs_in_block(header_hash) } @@ -313,10 +308,6 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_mmr_size(tree) } - fn fetch_mmr_leaf_index(&self, tree: MmrTree, hash: &HashOutput) -> Result, ChainStorageError> { - self.db.as_ref().unwrap().fetch_mmr_leaf_index(tree, hash) - } - fn orphan_count(&self) -> Result { self.db.as_ref().unwrap().orphan_count() } @@ -365,10 +356,6 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_orphan_chain_block(hash) } - fn fetch_deleted_bitmap(&self) -> Result { - self.db.as_ref().unwrap().fetch_deleted_bitmap() - } - fn delete_oldest_orphans( &mut self, horizon_height: u64, @@ -396,16 +383,6 @@ impl BlockchainBackend for TempDatabase { self.db.as_ref().unwrap().fetch_total_size_stats() } - fn fetch_header_hash_by_deleted_mmr_positions( - &self, - mmr_positions: Vec, - ) -> Result>, ChainStorageError> { - self.db - .as_ref() - .unwrap() - .fetch_header_hash_by_deleted_mmr_positions(mmr_positions) - } - fn bad_block_exists(&self, block_hash: HashOutput) -> Result { self.db.as_ref().unwrap().bad_block_exists(block_hash) } @@ -432,6 +409,10 @@ impl BlockchainBackend for TempDatabase { .unwrap() .fetch_template_registrations(start_height, end_height) } + + fn fetch_tip_smt(&self) -> Result { + self.db.as_ref().unwrap().fetch_tip_smt() + } } pub async fn create_chained_blocks>( diff --git a/base_layer/core/src/test_helpers/mod.rs b/base_layer/core/src/test_helpers/mod.rs index 2eeef7afda..5cce52aae0 100644 --- a/base_layer/core/src/test_helpers/mod.rs +++ b/base_layer/core/src/test_helpers/mod.rs @@ -117,7 +117,7 @@ pub async fn create_block( // Keep times constant in case we need a particular target difficulty block.header.timestamp = prev_block.header.timestamp.increase(spec.block_time); - block.header.output_mmr_size = prev_block.header.output_mmr_size + block.body.outputs().len() as u64; + block.header.output_smt_size = prev_block.header.output_smt_size + block.body.outputs().len() as u64; block.header.kernel_mmr_size = prev_block.header.kernel_mmr_size + block.body.kernels().len() as u64; (block, coinbase_output) diff --git a/base_layer/core/src/transactions/transaction_components/transaction_output.rs b/base_layer/core/src/transactions/transaction_components/transaction_output.rs index 2dcf623a34..98a75955d8 100644 --- a/base_layer/core/src/transactions/transaction_components/transaction_output.rs +++ b/base_layer/core/src/transactions/transaction_components/transaction_output.rs @@ -221,6 +221,17 @@ impl TransactionOutput { ) } + pub fn smt_hash(&self, mined_height: u64) -> FixedHash { + let utxo_hash = self.hash(); + let smt_hash = DomainSeparatedConsensusHasher::::new("smt_hash") + .chain(&utxo_hash) + .chain(&mined_height); + + match self.version { + TransactionOutputVersion::V0 | TransactionOutputVersion::V1 => smt_hash.finalize().into(), + } + } + /// Verify that range proof is valid pub fn verify_range_proof(&self, prover: &RangeProofService) -> Result<(), TransactionError> { match self.features.range_proof_type { diff --git a/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs b/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs index a533a754ec..3f0a3b593f 100644 --- a/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs +++ b/base_layer/core/src/validation/aggregate_body/aggregate_body_chain_validator.rs @@ -27,7 +27,7 @@ use tari_common_types::types::FixedHash; use tari_utilities::hex::Hex; use crate::{ - chain_storage::{BlockchainBackend, PrunedOutput}, + chain_storage::BlockchainBackend, consensus::{ConsensusConstants, ConsensusManager}, transactions::{ aggregated_body::AggregateBody, @@ -116,29 +116,22 @@ fn validate_input_not_pruned( .fetch_output(&input.output_hash())? .ok_or(ValidationError::UnknownInput)?; - match output_mined_info.output { - PrunedOutput::Pruned { .. } => { - return Err(ValidationError::ContainsSTxO); - }, - PrunedOutput::NotPruned { output } => { - let rp_hash = match output.proof { - Some(proof) => proof.hash(), - None => FixedHash::zero(), - }; - input.add_output_data( - output.version, - output.features, - output.commitment, - output.script, - output.sender_offset_public_key, - output.covenant, - output.encrypted_data, - output.metadata_signature, - rp_hash, - output.minimum_value_promise, - ); - }, - } + let rp_hash = match output_mined_info.output.proof { + Some(proof) => proof.hash(), + None => FixedHash::zero(), + }; + input.add_output_data( + output_mined_info.output.version, + output_mined_info.output.features, + output_mined_info.output.commitment, + output_mined_info.output.script, + output_mined_info.output.sender_offset_public_key, + output_mined_info.output.covenant, + output_mined_info.output.encrypted_data, + output_mined_info.output.metadata_signature, + rp_hash, + output_mined_info.output.minimum_value_promise, + ); } } diff --git a/base_layer/core/src/validation/block_body/test.rs b/base_layer/core/src/validation/block_body/test.rs index d2feef8c70..76b3874c16 100644 --- a/base_layer/core/src/validation/block_body/test.rs +++ b/base_layer/core/src/validation/block_body/test.rs @@ -90,7 +90,7 @@ async fn it_passes_if_large_output_block_is_valid() { .unwrap(); block.header.input_mr = mmr_roots.input_mr; block.header.output_mr = mmr_roots.output_mr; - block.header.output_mmr_size = mmr_roots.output_mmr_size; + block.header.output_smt_size = mmr_roots.output_smt_size; block.header.kernel_mr = mmr_roots.kernel_mr; block.header.kernel_mmr_size = mmr_roots.kernel_mmr_size; block.header.validator_node_mr = mmr_roots.validator_node_mr; @@ -136,7 +136,7 @@ async fn it_passes_if_large_block_is_valid() { .unwrap(); block.header.input_mr = mmr_roots.input_mr; block.header.output_mr = mmr_roots.output_mr; - block.header.output_mmr_size = mmr_roots.output_mmr_size; + block.header.output_smt_size = mmr_roots.output_smt_size; block.header.kernel_mr = mmr_roots.kernel_mr; block.header.kernel_mmr_size = mmr_roots.kernel_mmr_size; block.header.validator_node_mr = mmr_roots.validator_node_mr; @@ -163,7 +163,7 @@ async fn it_passes_if_block_is_valid() { .unwrap(); block.header.input_mr = mmr_roots.input_mr; block.header.output_mr = mmr_roots.output_mr; - block.header.output_mmr_size = mmr_roots.output_mmr_size; + block.header.output_smt_size = mmr_roots.output_smt_size; block.header.kernel_mr = mmr_roots.kernel_mr; block.header.kernel_mmr_size = mmr_roots.kernel_mmr_size; block.header.validator_node_mr = mmr_roots.validator_node_mr; diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index 3e5dfabeb7..e9c0e0cf59 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -214,13 +214,6 @@ pub fn check_not_duplicate_txo( db: &B, output: &TransactionOutput, ) -> Result<(), ValidationError> { - if let Some(index) = db.fetch_mmr_leaf_index(MmrTree::Utxo, &output.hash())? { - warn!( - target: LOG_TARGET, - "Validation failed due to previously spent output: {} (MMR index = {})", output, index - ); - return Err(ValidationError::ContainsTxO); - } if db .fetch_unspent_output_hash_by_commitment(&output.commitment)? .is_some() @@ -277,18 +270,18 @@ pub fn check_mmr_roots(header: &BlockHeader, mmr_roots: &MmrRoots) -> Result<(), kind: "Utxo", })); }; - if header.output_mmr_size != mmr_roots.output_mmr_size { + if header.output_smt_size != mmr_roots.output_smt_size { warn!( target: LOG_TARGET, "Block header output MMR size in {} does not match. Expected: {}, Actual: {}", header.hash().to_hex(), - header.output_mmr_size, - mmr_roots.output_mmr_size + header.output_smt_size, + mmr_roots.output_smt_size ); return Err(ValidationError::BlockError(BlockValidationError::MismatchedMmrSize { - mmr_tree: MmrTree::Utxo.to_string(), - expected: mmr_roots.output_mmr_size, - actual: header.output_mmr_size, + mmr_tree: "Utxo".to_string(), + expected: mmr_roots.output_smt_size, + actual: header.output_smt_size, })); } if header.input_mr != mmr_roots.input_mr { diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index fc6297cc4b..2a8166aeb4 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -89,7 +89,7 @@ mod header_validators { header.prev_hash = *prev.hash(); // These have to be unique header.kernel_mmr_size = 2 + i; - header.output_mmr_size = 4001 + i; + header.output_smt_size = 4001 + i; let chain_header = create_chain_header(header, prev.accumulated_data()); acc.push(chain_header); @@ -265,7 +265,7 @@ async fn chain_balance_validation() { let mut header1 = BlockHeader::from_previous(genesis.header()); header1.kernel_mmr_size += 1; - header1.output_mmr_size += 1; + header1.output_smt_size += 1; let achieved_difficulty = AchievedTargetDifficulty::try_construct( genesis.header().pow_algo(), genesis.accumulated_data().target_difficulty, @@ -282,10 +282,9 @@ async fn chain_balance_validation() { txn.insert_chain_header(header1.clone()); let mut mmr_position = 4; - let mut mmr_leaf_index = 4; txn.insert_kernel(kernel.clone(), *header1.hash(), mmr_position); - txn.insert_utxo(coinbase.clone(), *header1.hash(), 1, mmr_leaf_index, 0); + txn.insert_utxo(coinbase.clone(), *header1.hash(), 1, 0); db.commit(txn).unwrap(); utxo_sum = &coinbase.commitment + &utxo_sum; @@ -326,7 +325,7 @@ async fn chain_balance_validation() { let mut header2 = BlockHeader::from_previous(header1.header()); header2.kernel_mmr_size += 1; - header2.output_mmr_size += 1; + header2.output_smt_size += 1; let achieved_difficulty = AchievedTargetDifficulty::try_construct( genesis.header().pow_algo(), genesis.accumulated_data().target_difficulty, @@ -343,8 +342,7 @@ async fn chain_balance_validation() { txn.insert_chain_header(header2.clone()); utxo_sum = &coinbase.commitment + &utxo_sum; kernel_sum = &kernel.excess + &kernel_sum; - mmr_leaf_index += 1; - txn.insert_utxo(coinbase, *header2.hash(), 2, mmr_leaf_index, 0); + txn.insert_utxo(coinbase, *header2.hash(), 2, 0); mmr_position += 1; txn.insert_kernel(kernel, *header2.hash(), mmr_position); @@ -477,7 +475,7 @@ async fn chain_balance_validation_burned() { burned_sum = &burned_sum + kernel2.get_burn_commitment().unwrap(); let mut header1 = BlockHeader::from_previous(genesis.header()); header1.kernel_mmr_size += 2; - header1.output_mmr_size += 2; + header1.output_smt_size += 2; let achieved_difficulty = AchievedTargetDifficulty::try_construct( genesis.header().pow_algo(), genesis.accumulated_data().target_difficulty, @@ -494,16 +492,14 @@ async fn chain_balance_validation_burned() { txn.insert_chain_header(header1.clone()); let mut mmr_position = 4; - let mut mmr_leaf_index = 4; txn.insert_kernel(kernel.clone(), *header1.hash(), mmr_position); - txn.insert_utxo(coinbase.clone(), *header1.hash(), 1, mmr_leaf_index, 0); + txn.insert_utxo(coinbase.clone(), *header1.hash(), 1, 0); mmr_position = 5; - mmr_leaf_index = 5; txn.insert_kernel(kernel2.clone(), *header1.hash(), mmr_position); - txn.insert_pruned_utxo(burned.hash(), *header1.hash(), header1.height(), mmr_leaf_index, 0); + // txn.insert_pruned_utxo(burned.hash(), *header1.hash(), header1.height(), 0); db.commit(txn).unwrap(); utxo_sum = &coinbase.commitment + &utxo_sum; diff --git a/base_layer/core/tests/chain_storage_tests/chain_backend.rs b/base_layer/core/tests/chain_storage_tests/chain_backend.rs index 3c07f67221..596655f74e 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_backend.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_backend.rs @@ -176,7 +176,7 @@ fn test_utxo_order() { db.write(tx).unwrap(); - let read_utxos = db.fetch_utxos_in_block(&block_hash, None).unwrap().0; + let read_utxos = db.fetch_utxos_in_block(&block_hash).unwrap(); assert_eq!(utxos.len(), read_utxos.len()); for i in 0..2000 { assert_eq!(&utxos[i], read_utxos[i].as_transaction_output().unwrap()); diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index a08ece6d7a..3f497d7378 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -19,10 +19,8 @@ // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - use std::{convert::TryFrom, sync::Arc}; -use croaring::Bitmap; use rand::{rngs::OsRng, RngCore}; use tari_common_types::types::{Commitment, FixedHash}; use tari_core::{ @@ -52,13 +50,12 @@ use tari_core::{ }, }, KernelMmr, - KernelMmrHasherBlake256, - MutableOutputMmr, + OutputSmt, }; -use tari_crypto::tari_utilities::hex::Hex; use tari_key_manager::key_manager_service::KeyManagerInterface; -use tari_mmr::{Hash, MutableMmr}; +use tari_mmr::sparse_merkle_tree::{NodeKey, ValueHash}; use tari_script::script; +use tari_utilities::{hex::Hex, ByteArray}; pub async fn create_coinbase( value: MicroMinotari, @@ -146,10 +143,7 @@ fn print_new_genesis_block_values() { // Note: An em empty MMR will have a root of `MerkleMountainRange::::null_hash()` let kernel_mr = KernelMmr::new(Vec::new()).get_merkle_root().unwrap(); - let output_mr = MutableOutputMmr::new(Vec::new(), Bitmap::create()) - .unwrap() - .get_merkle_root() - .unwrap(); + let output_mr = FixedHash::try_from(OutputSmt::new().hash().as_slice()).unwrap(); // Note: This is printed in the same order as needed for 'fn get_xxxx_genesis_block_raw()' println!(); @@ -182,27 +176,22 @@ pub async fn create_genesis_block( // Calculate the MMR Merkle roots for the genesis block template and update the header. fn update_genesis_block_mmr_roots(template: NewBlockTemplate) -> Result { - type BaseLayerKernelMutableMmr = MutableMmr>; - let NewBlockTemplate { header, mut body, .. } = template; // Make sure the body components are sorted. If they already are, this is a very cheap call. body.sort(); let kernel_hashes: Vec> = body.kernels().iter().map(|k| k.hash().to_vec()).collect(); - let out_hashes: Vec> = body.outputs().iter().map(|out| out.hash().to_vec()).collect(); let mut header = BlockHeader::from(header); - header.kernel_mr = FixedHash::try_from( - BaseLayerKernelMutableMmr::new(kernel_hashes, Bitmap::create()) - .unwrap() - .get_merkle_root()?, - ) - .unwrap(); - let mut mmr = MutableOutputMmr::new(Vec::>::new(), Bitmap::create()).unwrap(); - for output in out_hashes { - let _ = mmr.push(output).unwrap(); + let kernel_mmr = KernelMmr::new(kernel_hashes); + header.kernel_mr = FixedHash::try_from(kernel_mmr.get_merkle_root()?).unwrap(); + let mut mmr = OutputSmt::new(); + for output in body.outputs() { + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(header.height).as_slice())?; + mmr.insert(smt_key, smt_node).unwrap(); } - header.output_mr = FixedHash::try_from(mmr.get_merkle_root()?).unwrap(); + header.output_mr = FixedHash::try_from(mmr.hash().as_slice()).unwrap(); Ok(Block { header, body }) } diff --git a/base_layer/core/tests/tests/async_db.rs b/base_layer/core/tests/tests/async_db.rs index a762ee3dfd..5b95613a4e 100644 --- a/base_layer/core/tests/tests/async_db.rs +++ b/base_layer/core/tests/tests/async_db.rs @@ -25,7 +25,7 @@ use std::ops::Deref; use tari_common::configuration::Network; use tari_core::{ blocks::Block, - chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, PrunedOutput}, + chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult}, transactions::{ key_manager::TransactionKeyManagerInterface, tari_amount::T, @@ -108,9 +108,9 @@ fn fetch_async_utxo() { let db2 = AsyncBlockchainDb::new(adb); let utxo_check = db.fetch_utxo(utxo.hash()).await.unwrap().unwrap(); - assert_eq!(utxo_check, PrunedOutput::NotPruned { output: utxo }); + assert_eq!(utxo_check, utxo); let stxo_check = db2.fetch_utxo(stxo.hash()).await.unwrap().unwrap(); - assert_eq!(stxo_check, PrunedOutput::NotPruned { output: stxo }); + assert_eq!(stxo_check, stxo); }); }); } diff --git a/base_layer/core/tests/tests/base_node_rpc.rs b/base_layer/core/tests/tests/base_node_rpc.rs index e42e970fb9..863842e4b4 100644 --- a/base_layer/core/tests/tests/base_node_rpc.rs +++ b/base_layer/core/tests/tests/base_node_rpc.rs @@ -230,7 +230,7 @@ async fn test_base_node_wallet_rpc() { .prepare_new_block(chain_block(&block1, vec![], &consensus_manager, &key_manager).await) .unwrap(); - block2.header.output_mmr_size += 1; + block2.header.output_smt_size += 1; block2.header.kernel_mmr_size += 1; base_node.local_nci.submit_block(block2).await.unwrap(); diff --git a/base_layer/core/tests/tests/node_comms_interface.rs b/base_layer/core/tests/tests/node_comms_interface.rs index d3be85f339..dc8da87d17 100644 --- a/base_layer/core/tests/tests/node_comms_interface.rs +++ b/base_layer/core/tests/tests/node_comms_interface.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::TryFrom; - use tari_common::configuration::Network; use tari_comms::test_utils::mocks::create_connectivity_mock; use tari_core::{ @@ -482,13 +480,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await; let mut txn = DbTransaction::new(); - txn.insert_utxo( - utxo.clone(), - *block0.hash(), - 0, - u32::try_from(block0.header().output_mmr_size).unwrap(), - 0, - ); + txn.insert_utxo(utxo.clone(), *block0.hash(), 0, 0); if let Err(e) = store.commit(txn) { panic!("{}", e); } @@ -574,7 +566,6 @@ async fn inbound_fetch_blocks_before_horizon_height() { .await { assert_eq!(received_blocks.len(), 1); - assert_eq!(received_blocks[0].pruned_outputs().len(), 1) } else { panic!(); } diff --git a/base_layer/core/tests/tests/node_service.rs b/base_layer/core/tests/tests/node_service.rs index 435c0fb758..4a710f0c72 100644 --- a/base_layer/core/tests/tests/node_service.rs +++ b/base_layer/core/tests/tests/node_service.rs @@ -727,7 +727,7 @@ async fn local_submit_block() { .prepare_new_block(chain_block(&block0, vec![], &consensus_manager, &key_manager).await) .unwrap(); block1.header.kernel_mmr_size += 1; - block1.header.output_mmr_size += 1; + block1.header.output_smt_size += 1; node.local_nci.submit_block(block1.clone()).await.unwrap(); let event = event_stream_next(&mut event_stream, Duration::from_millis(20000)).await; diff --git a/base_layer/core/tests/tests/node_state_machine.rs b/base_layer/core/tests/tests/node_state_machine.rs index ea5508cc52..8224a253a9 100644 --- a/base_layer/core/tests/tests/node_state_machine.rs +++ b/base_layer/core/tests/tests/node_state_machine.rs @@ -131,7 +131,7 @@ async fn test_listening_lagging() { let mut prev_block = bob_db .prepare_new_block(chain_block(prev_block.block(), vec![], &consensus_manager, &key_manager).await) .unwrap(); - prev_block.header.output_mmr_size += 1; + prev_block.header.output_smt_size += 1; prev_block.header.kernel_mmr_size += 1; bob_local_nci.submit_block(prev_block).await.unwrap(); assert_eq!(bob_db.get_height().unwrap(), 2); diff --git a/base_layer/mmr/Cargo.toml b/base_layer/mmr/Cargo.toml index 305d5ce47d..da42e7b35a 100644 --- a/base_layer/mmr/Cargo.toml +++ b/base_layer/mmr/Cargo.toml @@ -20,6 +20,7 @@ borsh = "0.10" digest = "0.10" log = "0.4" serde = { version = "1.0", features = ["derive"] } +serde_derive = "1.0.119" croaring = { version = "0.9", optional = true } [dev-dependencies] diff --git a/base_layer/mmr/src/lib.rs b/base_layer/mmr/src/lib.rs index 4d82a6e59d..7e897324fd 100644 --- a/base_layer/mmr/src/lib.rs +++ b/base_layer/mmr/src/lib.rs @@ -131,7 +131,6 @@ //! 0 1 2 3 4 5 6 7 8 9 10 11 12 <-- Leaf node indices //! ---------------------------------- //! ``` - pub type Hash = Vec; pub type HashSlice = [u8]; diff --git a/base_layer/mmr/src/sparse_merkle_tree/mod.rs b/base_layer/mmr/src/sparse_merkle_tree/mod.rs index 5cd6ab7281..28a367c492 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/mod.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/mod.rs @@ -85,4 +85,4 @@ mod tree; pub use error::SMTError; pub use node::{BranchNode, EmptyNode, LeafNode, Node, NodeHash, NodeKey, ValueHash, EMPTY_NODE_HASH}; pub use proofs::{ExclusionProof, InclusionProof}; -pub use tree::{SparseMerkleTree, UpdateResult}; +pub use tree::{DeleteResult, SparseMerkleTree, UpdateResult}; diff --git a/base_layer/mmr/src/sparse_merkle_tree/node.rs b/base_layer/mmr/src/sparse_merkle_tree/node.rs index 71f6fa4151..1a7467b669 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/node.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/node.rs @@ -1,13 +1,14 @@ // Copyright 2023. The Tari Project // SPDX-License-Identifier: BSD-3-Clause +use core::marker::PhantomData; use std::{ convert::TryFrom, fmt::{Debug, Formatter}, - marker::PhantomData, }; use digest::{consts::U32, Digest}; +use serde::{Deserialize, Serialize}; use crate::sparse_merkle_tree::{ bit_utils::{bit_to_dir, count_common_prefix, get_bit, height_key, TraverseDirection}, @@ -20,7 +21,7 @@ pub const KEY_LENGTH: usize = 32; macro_rules! hash_type { ($name: ident) => { /// A wrapper around a 32-byte hash value. Provides convenience functions to display as hex or binary - #[derive(Clone, Debug, PartialEq, Eq, PartialOrd)] + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Deserialize, Serialize)] pub struct $name([u8; KEY_LENGTH]); #[allow(clippy::len_without_is_empty)] @@ -176,7 +177,9 @@ impl<'a> ExactSizeIterator for PathIterator<'a> { } } -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound(deserialize = "H:"))] +#[serde(bound(serialize = "H:"))] pub enum Node { Empty(EmptyNode), Leaf(LeafNode), @@ -270,7 +273,7 @@ impl> Node { //------------------------------------- Empty Node ----------------------------------------------------------- /// An empty node. All empty nodes have the same hash, which acts as a marker value for truncated portions of the tree. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct EmptyNode {} impl EmptyNode { @@ -279,6 +282,7 @@ impl EmptyNode { } } +#[derive(Serialize, Deserialize)] //------------------------------------- Leaf Node ----------------------------------------------------------- pub struct LeafNode { key: NodeKey, @@ -382,6 +386,9 @@ impl> LeafNode { } //------------------------------------- Branch Node ---------------------------------------------------------- +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound(deserialize = "H:"))] +#[serde(bound(serialize = "H:"))] pub struct BranchNode { // The height of the branch. It is also the number of bits that all keys below this branch share. height: usize, diff --git a/base_layer/mmr/src/sparse_merkle_tree/tree.rs b/base_layer/mmr/src/sparse_merkle_tree/tree.rs index 3335e7cf36..caa2b38102 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/tree.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/tree.rs @@ -4,6 +4,7 @@ use std::{fmt::Debug, mem}; use digest::{consts::U32, Digest}; +use serde::{Deserialize, Serialize}; use crate::sparse_merkle_tree::{ bit_utils::{traverse_direction, TraverseDirection}, @@ -30,6 +31,9 @@ pub enum DeleteResult { KeyNotFound, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound(deserialize = "H:"))] +#[serde(bound(serialize = "H:"))] pub struct SparseMerkleTree { size: u64, root: Node, diff --git a/base_layer/tari_mining_helper_ffi/src/lib.rs b/base_layer/tari_mining_helper_ffi/src/lib.rs index ae08d9f87f..906c99efd7 100644 --- a/base_layer/tari_mining_helper_ffi/src/lib.rs +++ b/base_layer/tari_mining_helper_ffi/src/lib.rs @@ -386,8 +386,8 @@ mod tests { #[test] fn detect_change_in_consensus_encoding() { - const NONCE: u64 = 4038183075833317878; - let difficulty = Difficulty::from_u64(1197).expect("Failed to create difficulty"); + const NONCE: u64 = 16239591568418635215; + let difficulty = Difficulty::from_u64(2382).expect("Failed to create difficulty"); unsafe { let mut error = -1; let error_ptr = &mut error as *mut c_int; diff --git a/base_layer/wallet/migrations/2023-10-06-10000_drop_mmr/down.sql b/base_layer/wallet/migrations/2023-10-06-10000_drop_mmr/down.sql new file mode 100644 index 0000000000..291a97c5ce --- /dev/null +++ b/base_layer/wallet/migrations/2023-10-06-10000_drop_mmr/down.sql @@ -0,0 +1 @@ +-- This file should undo anything in `up.sql` \ No newline at end of file diff --git a/base_layer/wallet/migrations/2023-10-06-10000_drop_mmr/up.sql b/base_layer/wallet/migrations/2023-10-06-10000_drop_mmr/up.sql new file mode 100644 index 0000000000..61486ca689 --- /dev/null +++ b/base_layer/wallet/migrations/2023-10-06-10000_drop_mmr/up.sql @@ -0,0 +1 @@ +ALTER TABLE outputs DROP mined_mmr_position; \ No newline at end of file diff --git a/base_layer/wallet/src/output_manager_service/storage/database/backend.rs b/base_layer/wallet/src/output_manager_service/storage/database/backend.rs index 6d92f3ec39..c14deb2986 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database/backend.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database/backend.rs @@ -43,7 +43,6 @@ pub trait OutputManagerBackend: Send + Sync + Clone { hash: FixedHash, mined_height: u64, mined_in_block: FixedHash, - mmr_position: u64, confirmed: bool, mined_timestamp: u64, ) -> Result<(), OutputManagerStorageError>; diff --git a/base_layer/wallet/src/output_manager_service/storage/database/mod.rs b/base_layer/wallet/src/output_manager_service/storage/database/mod.rs index 0b4170acdd..0f7d78b478 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database/mod.rs @@ -401,19 +401,11 @@ where T: OutputManagerBackend + 'static hash: HashOutput, mined_height: u64, mined_in_block: HashOutput, - mmr_position: u64, confirmed: bool, mined_timestamp: u64, ) -> Result<(), OutputManagerStorageError> { let db = self.db.clone(); - db.set_received_output_mined_height_and_status( - hash, - mined_height, - mined_in_block, - mmr_position, - confirmed, - mined_timestamp, - )?; + db.set_received_output_mined_height_and_status(hash, mined_height, mined_in_block, confirmed, mined_timestamp)?; Ok(()) } diff --git a/base_layer/wallet/src/output_manager_service/storage/models.rs b/base_layer/wallet/src/output_manager_service/storage/models.rs index 64ac0cbf77..2a77e54b82 100644 --- a/base_layer/wallet/src/output_manager_service/storage/models.rs +++ b/base_layer/wallet/src/output_manager_service/storage/models.rs @@ -49,7 +49,6 @@ pub struct DbWalletOutput { pub status: OutputStatus, pub mined_height: Option, pub mined_in_block: Option, - pub mined_mmr_position: Option, pub mined_timestamp: Option, pub marked_deleted_at_height: Option, pub marked_deleted_in_block: Option, @@ -76,7 +75,6 @@ impl DbWalletOutput { status: OutputStatus::NotStored, mined_height: None, mined_in_block: None, - mined_mmr_position: None, mined_timestamp: None, marked_deleted_at_height: None, marked_deleted_in_block: None, diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs index d7349bef63..a393588d71 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/mod.rs @@ -425,7 +425,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { hash: FixedHash, mined_height: u64, mined_in_block: FixedHash, - mmr_position: u64, confirmed: bool, mined_timestamp: u64, ) -> Result<(), OutputManagerStorageError> { @@ -452,7 +451,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { .set(( outputs::mined_height.eq(mined_height as i64), outputs::mined_in_block.eq(mined_in_block), - outputs::mined_mmr_position.eq(mmr_position as i64), outputs::status.eq(status), outputs::mined_timestamp.eq(timestamp), outputs::marked_deleted_at_height.eq::>(None), @@ -483,7 +481,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { .set(( outputs::mined_height.eq::>(None), outputs::mined_in_block.eq::>>(None), - outputs::mined_mmr_position.eq::>(None), outputs::status.eq(OutputStatus::Invalid as i32), outputs::mined_timestamp.eq::>(None), outputs::marked_deleted_at_height.eq::>(None), @@ -512,7 +509,6 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { .set(( outputs::mined_height.eq::>(None), outputs::mined_in_block.eq::>>(None), - outputs::mined_mmr_position.eq::>(None), outputs::status.eq(OutputStatus::Invalid as i32), outputs::mined_timestamp.eq::>(None), outputs::marked_deleted_at_height.eq::>(None), @@ -837,9 +833,8 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { if output.received_in_tx_id == Some(tx_id.as_i64_wrapped()) { info!( target: LOG_TARGET, - "Cancelling pending inbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", + "Cancelling pending inbound output with Commitment: {} - from TxId: {}", output.commitment.to_hex(), - output.mined_mmr_position, tx_id ); output.update( @@ -855,9 +850,8 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { } else if output.spent_in_tx_id == Some(tx_id.as_i64_wrapped()) { info!( target: LOG_TARGET, - "Cancelling pending outbound output with Commitment: {} - MMR Position: {:?} from TxId: {}", + "Cancelling pending outbound output with Commitment: {} - from TxId: {}", output.commitment.to_hex(), - output.mined_mmr_position, tx_id ); output.update( diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs index 265eebdcdb..63658982fb 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db/output_sql.rs @@ -87,7 +87,6 @@ pub struct OutputSql { pub metadata_signature_u_y: Vec, pub mined_height: Option, pub mined_in_block: Option>, - pub mined_mmr_position: Option, pub marked_deleted_at_height: Option, pub marked_deleted_in_block: Option>, pub received_in_tx_id: Option, @@ -777,7 +776,6 @@ impl OutputSql { status: self.status.try_into()?, mined_height: self.mined_height.map(|mh| mh as u64), mined_in_block, - mined_mmr_position: self.mined_mmr_position.map(|mp| mp as u64), mined_timestamp: self.mined_timestamp, marked_deleted_at_height: self.marked_deleted_at_height.map(|d| d as u64), marked_deleted_in_block, diff --git a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs index 1291cb6641..bab8c13217 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs @@ -151,7 +151,7 @@ where unmined.len(), self.operation_id ); - for (output, mined_height, mined_in_block, mmr_position, mined_timestamp) in &mined { + for (output, mined_height, mined_in_block, mined_timestamp) in &mined { info!( target: LOG_TARGET, "Updating output comm:{}: hash {} as mined at height {} with current tip at {} (Operation ID: @@ -162,15 +162,8 @@ where tip_height, self.operation_id ); - self.update_output_as_mined( - output, - mined_in_block, - *mined_height, - *mmr_position, - tip_height, - *mined_timestamp, - ) - .await?; + self.update_output_as_mined(output, mined_in_block, *mined_height, tip_height, *mined_timestamp) + .await?; } for output in unmined { self.db @@ -196,7 +189,7 @@ where for batch in mined_outputs.chunks(self.config.tx_validator_batch_size) { debug!( target: LOG_TARGET, - "Asking base node for status of {} mmr_positions (Operation ID: {})", + "Asking base node for status of {} commitments (Operation ID: {})", batch.len(), self.operation_id ); @@ -204,105 +197,68 @@ where // We have to send positions to the base node because if the base node cannot find the hash of the output // we can't tell if the output ever existed, as opposed to existing and was spent. // This assumes that the base node has not reorged since the last time we asked. - let deleted_bitmap_response = wallet_client + let response = wallet_client .query_deleted(QueryDeletedRequest { chain_must_include_header: last_mined_header_hash.map(|v| v.to_vec()).unwrap_or_default(), - mmr_positions: batch.iter().filter_map(|ub| ub.mined_mmr_position).collect(), - include_deleted_block_data: true, + hashes: batch.iter().map(|o| o.hash.to_vec()).collect(), }) .await .for_protocol(self.operation_id)?; - for output in batch { - let mined_mmr_position = if let Some(pos) = output.mined_mmr_position { - pos - } else { - warn!( - target: LOG_TARGET, - "Mined Unspent output {} should have `mined_mmr_position`, setting as unmined to revalidate \ - (Operation ID: {})", - output.commitment.to_hex(), - self.operation_id - ); + if response.data.len() != batch.len() { + return Err(OutputManagerProtocolError::new( + self.operation_id, + OutputManagerError::InconsistentBaseNodeDataError( + "Base node did not send back information for all utxos", + ), + )); + } + + for (output, data) in batch.iter().zip(response.data.iter()) { + if data.mined_height == 0 { + // base node thinks this is unmined or does not know of it. self.db .set_output_to_unmined_and_invalid(output.hash) .for_protocol(self.operation_id)?; continue; }; - - if deleted_bitmap_response.deleted_positions.len() != deleted_bitmap_response.blocks_deleted_in.len() || - deleted_bitmap_response.deleted_positions.len() != - deleted_bitmap_response.heights_deleted_at.len() - { - return Err(OutputManagerProtocolError::new( - self.operation_id, - OutputManagerError::InconsistentBaseNodeDataError( - "`deleted_positions`, `blocks_deleted_in` and `heights_deleted_at` should be the same \ - length", - ), - )); - } - - if deleted_bitmap_response.deleted_positions.contains(&mined_mmr_position) { - let position = if let Some(pos) = deleted_bitmap_response - .deleted_positions - .iter() - .position(|dp| dp == &mined_mmr_position) - { - pos - } else { - warn!( - target: LOG_TARGET, - "Deleted positions for Mined Unspent output {} should include the `mined_mmr_position`. \ - setting as unmined to revalidate (Operation ID: {})", - output.commitment.to_hex(), - self.operation_id - ); - self.db - .set_output_to_unmined_and_invalid(output.hash) - .for_protocol(self.operation_id)?; - continue; - }; - - let deleted_height = deleted_bitmap_response.heights_deleted_at[position]; - let deleted_block = match deleted_bitmap_response.blocks_deleted_in[position].clone().try_into() { - Ok(v) => v, - Err(_) => { - debug!(target: LOG_TARGET, "Received malformed deleted_block"); - continue; - }, - }; - - let confirmed = (deleted_bitmap_response.height_of_longest_chain - deleted_height) >= - self.config.num_confirmations_required; - + if data.height_deleted_at == 0 && output.marked_deleted_at_height.is_some() { + // this is mined but not yet spent self.db - .mark_output_as_spent(output.hash, deleted_height, deleted_block, confirmed) + .mark_output_as_unspent(output.hash) .for_protocol(self.operation_id)?; info!( target: LOG_TARGET, - "Updating output comm:{}: hash {} as spent at tip height {} (Operation ID: {})", + "Updating output comm:{}: hash {} as unspent at tip height {} (Operation ID: {})", output.commitment.to_hex(), output.hash.to_hex(), - deleted_bitmap_response.height_of_longest_chain, + response.height_of_longest_chain, self.operation_id ); - } - - if deleted_bitmap_response - .not_deleted_positions - .contains(&mined_mmr_position) && - output.marked_deleted_at_height.is_some() - { + continue; + }; + if data.height_deleted_at > 0 { + let confirmed = (response.height_of_longest_chain.saturating_sub(data.height_deleted_at)) >= + self.config.num_confirmations_required; self.db - .mark_output_as_unspent(output.hash) + .mark_output_as_spent( + output.hash, + data.mined_height, + data.block_deleted_in.clone().try_into().map_err(|_| { + OutputManagerProtocolError::new( + self.operation_id, + OutputManagerError::InconsistentBaseNodeDataError("Base node sent malformed hash"), + ) + })?, + confirmed, + ) .for_protocol(self.operation_id)?; info!( target: LOG_TARGET, - "Updating output comm:{}: hash {} as unspent at tip height {} (Operation ID: {})", + "Updating output comm:{}: hash {} as spent at tip height {} (Operation ID: {})", output.commitment.to_hex(), output.hash.to_hex(), - deleted_bitmap_response.height_of_longest_chain, + response.height_of_longest_chain, self.operation_id ); } @@ -336,7 +292,7 @@ where unmined.len(), self.operation_id ); - for (output, mined_height, mined_in_block, mmr_position, mined_timestamp) in &mined { + for (output, mined_height, mined_in_block, mined_timestamp) in &mined { info!( target: LOG_TARGET, "Updating output comm:{}: hash {} as mined at height {} with current tip at {} (Operation ID: {})", @@ -346,15 +302,8 @@ where tip_height, self.operation_id ); - self.update_output_as_mined( - output, - mined_in_block, - *mined_height, - *mmr_position, - tip_height, - *mined_timestamp, - ) - .await?; + self.update_output_as_mined(output, mined_in_block, *mined_height, tip_height, *mined_timestamp) + .await?; } } @@ -513,14 +462,7 @@ where &self, batch: &[DbWalletOutput], base_node_client: &mut BaseNodeWalletRpcClient, - ) -> Result< - ( - Vec<(DbWalletOutput, u64, BlockHash, u64, u64)>, - Vec, - u64, - ), - OutputManagerError, - > { + ) -> Result<(Vec<(DbWalletOutput, u64, BlockHash, u64)>, Vec, u64), OutputManagerError> { let batch_hashes = batch.iter().map(|o| o.hash.to_vec()).collect(); trace!( target: LOG_TARGET, @@ -559,7 +501,6 @@ where output.clone(), returned_output.mined_height, block_hash, - returned_output.mmr_position, returned_output.mined_timestamp, )), Err(_) => { @@ -583,7 +524,6 @@ where tx: &DbWalletOutput, mined_in_block: &BlockHash, mined_height: u64, - mmr_position: u64, tip_height: u64, mined_timestamp: u64, ) -> Result<(), OutputManagerProtocolError> { @@ -594,7 +534,6 @@ where tx.hash, mined_height, *mined_in_block, - mmr_position, confirmed, mined_timestamp, ) diff --git a/base_layer/wallet/src/schema.rs b/base_layer/wallet/src/schema.rs index a163bbcde2..2347fe7b96 100644 --- a/base_layer/wallet/src/schema.rs +++ b/base_layer/wallet/src/schema.rs @@ -105,7 +105,6 @@ diesel::table! { metadata_signature_u_y -> Binary, mined_height -> Nullable, mined_in_block -> Nullable, - mined_mmr_position -> Nullable, marked_deleted_at_height -> Nullable, marked_deleted_in_block -> Nullable, received_in_tx_id -> Nullable, diff --git a/base_layer/wallet/tests/output_manager_service_tests/service.rs b/base_layer/wallet/tests/output_manager_service_tests/service.rs index e8ac79f023..ed2b161896 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/service.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/service.rs @@ -58,7 +58,7 @@ use tari_core::{ blocks::BlockHeader, borsh::SerializedSize, covenants::Covenant, - proto::base_node::{QueryDeletedResponse, UtxoQueryResponse, UtxoQueryResponses}, + proto::base_node::{QueryDeletedData, QueryDeletedResponse, UtxoQueryResponse, UtxoQueryResponses}, transactions::{ fee::Fee, key_manager::{TransactionKeyManagerBranch, TransactionKeyManagerInterface}, @@ -1381,7 +1381,6 @@ async fn test_txo_validation() { let responses = vec![ UtxoQueryResponse { output: Some(output1_tx_output.clone().try_into().unwrap()), - mmr_position: 1, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output1_tx_output.hash().to_vec(), @@ -1389,7 +1388,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output2_tx_output.clone().try_into().unwrap()), - mmr_position: 2, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output2_tx_output.hash().to_vec(), @@ -1410,10 +1408,20 @@ async fn test_txo_validation() { let query_deleted_response = QueryDeletedResponse { best_block: block4_header.hash().to_vec(), height_of_longest_chain: 4, - deleted_positions: vec![], - not_deleted_positions: vec![1, 2], - heights_deleted_at: vec![], - blocks_deleted_in: vec![], + data: vec![ + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + ], }; oms.base_node_wallet_rpc_mock_state @@ -1528,7 +1536,6 @@ async fn test_txo_validation() { let responses = vec![ UtxoQueryResponse { output: Some(output1_tx_output.clone().try_into().unwrap()), - mmr_position: 1, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output1_tx_output.hash().to_vec(), @@ -1536,7 +1543,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output2_tx_output.clone().try_into().unwrap()), - mmr_position: 2, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output2_tx_output.hash().to_vec(), @@ -1544,7 +1550,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output4_tx_output.clone().try_into().unwrap()), - mmr_position: 4, mined_height: 5, mined_in_block: block5_header.hash().to_vec(), output_hash: output4_tx_output.hash().to_vec(), @@ -1552,7 +1557,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output5_tx_output.clone().try_into().unwrap()), - mmr_position: 5, mined_height: 5, mined_in_block: block5_header.hash().to_vec(), output_hash: output5_tx_output.hash().to_vec(), @@ -1560,7 +1564,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output6_tx_output.clone().try_into().unwrap()), - mmr_position: 6, mined_height: 5, mined_in_block: block5_header.hash().to_vec(), output_hash: output6_tx_output.hash().to_vec(), @@ -1581,10 +1584,48 @@ async fn test_txo_validation() { let mut query_deleted_response = QueryDeletedResponse { best_block: block5_header.hash().to_vec(), height_of_longest_chain: 5, - deleted_positions: vec![1], - not_deleted_positions: vec![2, 4, 5, 6], - heights_deleted_at: vec![5], - blocks_deleted_in: vec![block5_header.hash().to_vec()], + data: vec![ + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 5, + block_deleted_in: block5_header.hash().to_vec(), + }, + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 0, + block_mined_in: Vec::new(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 5, + block_mined_in: block5_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 5, + block_mined_in: block5_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 5, + block_mined_in: block5_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + ], + // deleted_positions: vec![1], + // not_deleted_positions: vec![2, 4, 5, 6], + // heights_deleted_at: vec![5], + // blocks_deleted_in: vec![block5_header.hash().to_vec()], }; oms.base_node_wallet_rpc_mock_state @@ -1605,7 +1646,6 @@ async fn test_txo_validation() { .wait_pop_query_deleted(1, Duration::from_secs(60)) .await .unwrap(); - assert_eq!(query_deleted_calls[0].mmr_positions.len(), 5); let balance = oms.output_manager_handle.get_balance().await.unwrap(); assert_eq!( @@ -1652,7 +1692,6 @@ async fn test_txo_validation() { .wait_pop_query_deleted(1, Duration::from_secs(60)) .await .unwrap(); - assert_eq!(query_deleted_calls[0].mmr_positions.len(), 5); let balance = oms.output_manager_handle.get_balance().await.unwrap(); assert_eq!( @@ -1706,7 +1745,6 @@ async fn test_txo_validation() { let responses = vec![ UtxoQueryResponse { output: Some(output1_tx_output.clone().try_into().unwrap()), - mmr_position: 1, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output1_tx_output.hash().to_vec(), @@ -1714,7 +1752,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output2_tx_output.clone().try_into().unwrap()), - mmr_position: 2, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output2_tx_output.hash().to_vec(), @@ -1722,7 +1759,6 @@ async fn test_txo_validation() { }, UtxoQueryResponse { output: Some(output4_tx_output.clone().try_into().unwrap()), - mmr_position: 4, mined_height: 5, mined_in_block: block5_header_reorg.hash().to_vec(), output_hash: output4_tx_output.hash().to_vec(), @@ -1743,10 +1779,42 @@ async fn test_txo_validation() { let mut query_deleted_response = QueryDeletedResponse { best_block: block5_header_reorg.hash().to_vec(), height_of_longest_chain: 5, - deleted_positions: vec![1], - not_deleted_positions: vec![2, 4, 5, 6], - heights_deleted_at: vec![5], - blocks_deleted_in: vec![block5_header_reorg.hash().to_vec()], + data: vec![ + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 5, + block_deleted_in: block5_header.hash().to_vec(), + }, + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 5, + block_mined_in: block5_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 5, + block_mined_in: block5_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 5, + block_mined_in: block5_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + ], + // deleted_positions: vec![1], + // not_deleted_positions: vec![2, 4, 5, 6], + // heights_deleted_at: vec![5], + // blocks_deleted_in: vec![block5_header_reorg.hash().to_vec()], }; oms.base_node_wallet_rpc_mock_state @@ -1937,7 +2005,6 @@ async fn test_txo_revalidation() { let responses = vec![ UtxoQueryResponse { output: Some(output1_tx_output.clone().try_into().unwrap()), - mmr_position: 1, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output1_tx_output.hash().to_vec(), @@ -1945,7 +2012,6 @@ async fn test_txo_revalidation() { }, UtxoQueryResponse { output: Some(output2_tx_output.clone().try_into().unwrap()), - mmr_position: 2, mined_height: 1, mined_in_block: block1_header.hash().to_vec(), output_hash: output2_tx_output.hash().to_vec(), @@ -1966,10 +2032,24 @@ async fn test_txo_revalidation() { let query_deleted_response = QueryDeletedResponse { best_block: block4_header.hash().to_vec(), height_of_longest_chain: 4, - deleted_positions: vec![], - not_deleted_positions: vec![1, 2], - heights_deleted_at: vec![], - blocks_deleted_in: vec![], + data: vec![ + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + ], + // deleted_positions: vec![], + // not_deleted_positions: vec![1, 2], + // heights_deleted_at: vec![], + // blocks_deleted_in: vec![], }; oms.base_node_wallet_rpc_mock_state @@ -1993,10 +2073,24 @@ async fn test_txo_revalidation() { let query_deleted_response = QueryDeletedResponse { best_block: block4_header.hash().to_vec(), height_of_longest_chain: 4, - deleted_positions: vec![1], - not_deleted_positions: vec![2], - heights_deleted_at: vec![4], - blocks_deleted_in: vec![block4_header.hash().to_vec()], + data: vec![ + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 4, + block_deleted_in: block4_header.hash().to_vec(), + }, + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 0, + block_deleted_in: Vec::new(), + }, + ], + // deleted_positions: vec![1], + // not_deleted_positions: vec![2], + // heights_deleted_at: vec![4], + // blocks_deleted_in: vec![block4_header.hash().to_vec()], }; oms.base_node_wallet_rpc_mock_state @@ -2020,10 +2114,24 @@ async fn test_txo_revalidation() { let query_deleted_response = QueryDeletedResponse { best_block: block4_header.hash().to_vec(), height_of_longest_chain: 4, - deleted_positions: vec![1, 2], - not_deleted_positions: vec![], - heights_deleted_at: vec![4, 4], - blocks_deleted_in: vec![block4_header.hash().to_vec(), block4_header.hash().to_vec()], + data: vec![ + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 4, + block_deleted_in: block4_header.hash().to_vec(), + }, + QueryDeletedData { + mined_height: 1, + block_mined_in: block1_header.hash().to_vec(), + height_deleted_at: 4, + block_deleted_in: block4_header.hash().to_vec(), + }, + ], + // deleted_positions: vec![1, 2], + // not_deleted_positions: vec![], + // heights_deleted_at: vec![4, 4], + // blocks_deleted_in: vec![block4_header.hash().to_vec(), block4_header.hash().to_vec()], }; oms.base_node_wallet_rpc_mock_state diff --git a/base_layer/wallet/tests/output_manager_service_tests/storage.rs b/base_layer/wallet/tests/output_manager_service_tests/storage.rs index 3be2c1146f..411e974e0d 100644 --- a/base_layer/wallet/tests/output_manager_service_tests/storage.rs +++ b/base_layer/wallet/tests/output_manager_service_tests/storage.rs @@ -180,11 +180,9 @@ pub async fn test_db_backend(backend: T) { }); // Set first pending tx to mined but unconfirmed - let mut mmr_pos = 0; for o in &pending_txs[0].outputs_to_be_received { - db.set_received_output_mined_height_and_status(o.hash, 2, FixedHash::zero(), mmr_pos, false, 0) + db.set_received_output_mined_height_and_status(o.hash, 2, FixedHash::zero(), false, 0) .unwrap(); - mmr_pos += 1; } for o in &pending_txs[0].outputs_to_be_spent { db.mark_output_as_spent(o.hash, 3, FixedHash::zero(), false).unwrap(); @@ -202,9 +200,8 @@ pub async fn test_db_backend(backend: T) { // Set second pending tx to mined and confirmed for o in &pending_txs[1].outputs_to_be_received { - db.set_received_output_mined_height_and_status(o.hash, 4, FixedHash::zero(), mmr_pos, true, 0) + db.set_received_output_mined_height_and_status(o.hash, 4, FixedHash::zero(), true, 0) .unwrap(); - mmr_pos += 1; } for o in &pending_txs[1].outputs_to_be_spent { db.mark_output_as_spent(o.hash, 5, FixedHash::zero(), true).unwrap(); @@ -424,7 +421,7 @@ pub async fn test_no_duplicate_outputs() { // add it to the database let result = db.add_unspent_output(kmo.clone()); assert!(result.is_ok()); - let result = db.set_received_output_mined_height_and_status(kmo.hash, 1, FixedHash::zero(), 1, true, 0); + let result = db.set_received_output_mined_height_and_status(kmo.hash, 1, FixedHash::zero(), true, 0); assert!(result.is_ok()); let outputs = db.fetch_mined_unspent_outputs().unwrap(); assert_eq!(outputs.len(), 1); @@ -465,7 +462,7 @@ pub async fn test_mark_as_unmined() { // add it to the database db.add_unspent_output(kmo.clone()).unwrap(); - db.set_received_output_mined_height_and_status(kmo.hash, 1, FixedHash::zero(), 1, true, 0) + db.set_received_output_mined_height_and_status(kmo.hash, 1, FixedHash::zero(), true, 0) .unwrap(); let o = db.get_last_mined_output().unwrap().unwrap(); assert_eq!(o.hash, kmo.hash); diff --git a/base_layer/wallet/tests/support/comms_rpc.rs b/base_layer/wallet/tests/support/comms_rpc.rs index 56eae2e17e..765388bb82 100644 --- a/base_layer/wallet/tests/support/comms_rpc.rs +++ b/base_layer/wallet/tests/support/comms_rpc.rs @@ -163,12 +163,9 @@ impl BaseNodeWalletRpcMockState { height_of_longest_chain: 1, })), query_deleted_response: Arc::new(Mutex::new(QueryDeletedResponse { - deleted_positions: vec![], - not_deleted_positions: vec![], best_block: vec![], height_of_longest_chain: 1, - heights_deleted_at: vec![], - blocks_deleted_in: vec![], + data: Vec::new(), })), fetch_utxos_calls: Arc::new(Mutex::new(Vec::new())), response_delay: Arc::new(Mutex::new(None)), diff --git a/integration_tests/tests/steps/node_steps.rs b/integration_tests/tests/steps/node_steps.rs index ad93a36eba..a99b8c0b20 100644 --- a/integration_tests/tests/steps/node_steps.rs +++ b/integration_tests/tests/steps/node_steps.rs @@ -674,7 +674,7 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { // Meddle with output_mmr_size let mut block: Block = Block::try_from(mine_block_before_submit(&mut client, &world.key_manager).await).unwrap(); - block.header.output_mmr_size += 1; + block.header.output_smt_size += 1; match client.submit_block(grpc::Block::try_from(block).unwrap()).await { Ok(_) => panic!("The block should not have been valid"), Err(e) => assert_eq!(