diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index c99492bb3a..5abf31c824 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -414,7 +414,7 @@ where T: BlockchainBackend + 'static for id in unique_ids { let output = self .blockchain_db - .fetch_utxo_by_unique_id(Some(asset_public_key.clone()), id) + .fetch_utxo_by_unique_id(Some(asset_public_key.clone()), id, None) .await?; if let Some(out) = output { match out.output { diff --git a/base_layer/core/src/blocks/block_header.rs b/base_layer/core/src/blocks/block_header.rs index a5585008a7..f2425590b0 100644 --- a/base_layer/core/src/blocks/block_header.rs +++ b/base_layer/core/src/blocks/block_header.rs @@ -334,6 +334,7 @@ pub(crate) mod hash_serializer { mod test { use crate::blocks::BlockHeader; use tari_crypto::tari_utilities::Hashable; + #[test] fn from_previous() { let mut h1 = crate::proof_of_work::sha3_test::get_header(); diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index 0954633c2b..702064c51e 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -160,7 +160,7 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_utxos_by_mmr_position(start: u64, end: u64, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_by_mmr_position"); - make_async_fn!(fetch_utxo_by_unique_id(parent_public_key: Option,unique_id: HashOutput) -> Option, "fetch_utxo_by_unique_id"); + make_async_fn!(fetch_utxo_by_unique_id(parent_public_key: Option,unique_id: HashOutput, deleted_at: Option) -> Option, "fetch_utxo_by_unique_id"); make_async_fn!(fetch_all_unspent_by_parent_public_key( parent_public_key: PublicKey, diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index 883801fcc7..da7dcc7f9d 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -119,12 +119,13 @@ pub trait BlockchainBackend: Send + Sync { commitment: &Commitment, ) -> Result, ChainStorageError>; - /// Returns the unspent TransactionOutput output that matches the given unique_id if it exists in the current UTXO - /// set, otherwise None is returned. + /// Returns the unspent TransactionOutput output that matches the given unique_id if it exists, otherwise None is + /// returned. fn fetch_utxo_by_unique_id( &self, parent_public_key: Option<&PublicKey>, unique_id: &[u8], + deleted_at: Option, ) -> Result, ChainStorageError>; /// Returns all unspent outputs with a parent public key diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index f16f914ec4..2585a39521 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -320,9 +320,10 @@ where B: BlockchainBackend &self, parent_public_key: Option, unique_id: HashOutput, + deleted_at: Option, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_utxo_by_unique_id(parent_public_key.as_ref(), &unique_id) + db.fetch_utxo_by_unique_id(parent_public_key.as_ref(), &unique_id, deleted_at) } pub fn fetch_all_unspent_by_parent_public_key( diff --git a/base_layer/core/src/chain_storage/lmdb_db/helpers.rs b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs new file mode 100644 index 0000000000..70f6ec8216 --- /dev/null +++ b/base_layer/core/src/chain_storage/lmdb_db/helpers.rs @@ -0,0 +1,49 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::chain_storage::ChainStorageError; +use lmdb_zero::error; +use log::*; +use serde::{de::DeserializeOwned, Serialize}; + +pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb"; + +pub fn serialize(data: &T) -> Result, ChainStorageError> +where T: Serialize { + let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; + let mut buf = Vec::with_capacity(size as usize); + bincode::serialize_into(&mut buf, data).map_err(|e| { + error!(target: LOG_TARGET, "Could not serialize lmdb: {:?}", e); + ChainStorageError::AccessError(e.to_string()) + })?; + Ok(buf) +} + +pub fn deserialize(buf_bytes: &[u8]) -> Result +where T: DeserializeOwned { + bincode::deserialize(buf_bytes) + .map_err(|e| { + error!(target: LOG_TARGET, "Could not deserialize lmdb: {:?}", e); + e + }) + .map_err(|e| error::Error::ValRejected(e.to_string())) +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs b/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs new file mode 100644 index 0000000000..8da331cef0 --- /dev/null +++ b/base_layer/core/src/chain_storage/lmdb_db/key_prefix_cursor.rs @@ -0,0 +1,104 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::chain_storage::{lmdb_db::helpers::deserialize, ChainStorageError}; +use lmdb_zero::{ConstAccessor, Cursor, LmdbResultExt}; +use serde::de::DeserializeOwned; +use std::marker::PhantomData; + +pub struct KeyPrefixCursor<'a, V> { + cursor: Cursor<'a, 'a>, + value_type: PhantomData, + prefix_key: &'a [u8], + access: ConstAccessor<'a>, + has_seeked: bool, +} + +impl<'a, V> KeyPrefixCursor<'a, V> +where V: DeserializeOwned +{ + pub(super) fn new(cursor: Cursor<'a, 'a>, access: ConstAccessor<'a>, prefix_key: &'a [u8]) -> Self { + Self { + cursor, + access, + prefix_key, + value_type: PhantomData, + has_seeked: false, + } + } + + /// Returns the item on or after the key prefix, progressing forwards until the key prefix no longer matches + pub fn next(&mut self) -> Result, V)>, ChainStorageError> { + if !self.has_seeked { + if let Some((k, val)) = self.seek_gte(self.prefix_key)? { + return Ok(Some((k, val))); + } + } + + match self.cursor.next(&self.access).to_opt()? { + Some((k, v)) => Self::deserialize_if_matches(self.prefix_key, k, v), + None => Ok(None), + } + } + + /// Returns the item on or before the given seek key, progressing backwards until the key prefix no longer matches + pub fn prev(&mut self) -> Result, V)>, ChainStorageError> { + if !self.has_seeked { + let prefix_key = self.prefix_key; + if let Some((k, val)) = self.seek_gte(prefix_key)? { + // seek_range_k returns the greater key, so we only want to return the current value that was seeked to + // if it exactly matches the prefix_key + if k == prefix_key { + return Ok(Some((k, val))); + } + } + } + + match self.cursor.prev(&self.access).to_opt()? { + Some((k, v)) => Self::deserialize_if_matches(self.prefix_key, k, v), + None => Ok(None), + } + } + + pub fn seek_gte(&mut self, key: &[u8]) -> Result, V)>, ChainStorageError> { + self.has_seeked = true; + let seek_result = self.cursor.seek_range_k(&self.access, key).to_opt()?; + let (k, v) = match seek_result { + Some(r) => r, + None => return Ok(None), + }; + Self::deserialize_if_matches(key, k, v) + } + + fn deserialize_if_matches( + key_prefix: &[u8], + k: &[u8], + v: &[u8], + ) -> Result, V)>, ChainStorageError> { + let prefix_len = key_prefix.len(); + if k.len() < prefix_len || k[..prefix_len] != *key_prefix { + return Ok(None); + } + let val = deserialize::(v)?; + Ok(Some((k.to_vec(), val))) + } +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index 115a6be9a3..b7841b5fb3 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -20,7 +20,14 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::chain_storage::{error::ChainStorageError, OrNotFound}; +use crate::chain_storage::{ + error::ChainStorageError, + lmdb_db::{ + helpers::{deserialize, serialize}, + key_prefix_cursor::KeyPrefixCursor, + }, + OrNotFound, +}; use lmdb_zero::{ del, error::{self, LmdbResultExt}, @@ -37,34 +44,10 @@ use lmdb_zero::{ use log::*; use serde::{de::DeserializeOwned, Serialize}; use std::fmt::Debug; -use tari_crypto::tari_utilities::hex::{to_hex, Hex}; +use tari_crypto::tari_utilities::hex::to_hex; pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb"; -// TODO: Calling `access` for every lmdb operation has some overhead (an atomic read and set). Check if is possible to -// pass an Accessor instead of the WriteTransaction? - -pub fn serialize(data: &T) -> Result, ChainStorageError> -where T: Serialize { - let size = bincode::serialized_size(&data).map_err(|e| ChainStorageError::AccessError(e.to_string()))?; - let mut buf = Vec::with_capacity(size as usize); - bincode::serialize_into(&mut buf, data).map_err(|e| { - error!(target: LOG_TARGET, "Could not serialize lmdb: {:?}", e); - ChainStorageError::AccessError(e.to_string()) - })?; - Ok(buf) -} - -pub fn deserialize(buf_bytes: &[u8]) -> Result -where T: DeserializeOwned { - bincode::deserialize(buf_bytes) - .map_err(|e| { - error!(target: LOG_TARGET, "Could not deserialize lmdb: {:?}", e); - e - }) - .map_err(|e| error::Error::ValRejected(e.to_string())) -} - pub fn lmdb_insert( txn: &WriteTransaction<'_>, db: &Database, @@ -128,7 +111,7 @@ where }) } -/// Inserts or replaces the item at the given key +/// Inserts or replaces the item at the given key. If the key does not exist, a new entry is created pub fn lmdb_replace(txn: &WriteTransaction<'_>, db: &Database, key: &K, val: &V) -> Result<(), ChainStorageError> where K: AsLmdbBytes + ?Sized, @@ -182,7 +165,7 @@ where pub fn lmdb_delete_keys_starting_with( txn: &WriteTransaction<'_>, db: &Database, - key: &str, + key: &[u8], ) -> Result, ChainStorageError> where V: DeserializeOwned, @@ -193,22 +176,20 @@ where ChainStorageError::AccessError(e.to_string()) })?; - debug!(target: LOG_TARGET, "Deleting rows matching pattern: {}", key); - let mut row = match cursor.seek_range_k(&access, key) { Ok(r) => r, Err(_) => return Ok(vec![]), }; - trace!(target: LOG_TARGET, "Key: {}", row.0); + trace!(target: LOG_TARGET, "Key: {}", to_hex(row.0)); let mut result = vec![]; - while row.0.starts_with(key) { + while row.0[..key.len()] == *key { let val = deserialize::(row.1)?; result.push(val); cursor.del(&mut access, del::NODUPDATA)?; - row = match cursor.next(&access) { - Ok(r) => r, - Err(_) => break, - } + row = match cursor.next(&access).to_opt()? { + Some(r) => r, + None => break, + }; } Ok(result) } @@ -308,67 +289,37 @@ pub fn lmdb_len(txn: &ConstTransaction<'_>, db: &Database) -> Result( - key: &str, - txn: &ConstTransaction<'_>, - db: &Database, -) -> Result, ChainStorageError> +/// Return a cursor that iterates, either backwards or forwards through keys matching the given prefix +pub fn lmdb_get_prefix_cursor<'a, V>( + txn: &'a ConstTransaction<'a>, + db: &'a Database, + prefix_key: &'a [u8], +) -> Result, ChainStorageError> where V: DeserializeOwned, { let access = txn.access(); - let mut cursor = txn.cursor(db).map_err(|e| { + + let cursor = txn.cursor(&*db).map_err(|e| { error!(target: LOG_TARGET, "Could not get read cursor from lmdb: {:?}", e); ChainStorageError::AccessError(e.to_string()) })?; - trace!(target: LOG_TARGET, "Getting rows matching pattern: {}", key); - - let mut row = match cursor.seek_range_k(&access, key) { - Ok(r) => r, - Err(_) => return Ok(vec![]), - }; - trace!(target: LOG_TARGET, "Key: {}", row.0); - let mut result = vec![]; - while row.0.starts_with(key) { - let val = deserialize::(row.1)?; - result.push(val); - row = match cursor.next(&access) { - Ok(r) => r, - Err(_) => break, - } - } - Ok(result) + Ok(KeyPrefixCursor::new(cursor, access, prefix_key)) } -pub fn lmdb_fetch_keys_starting_with_bytes( - key: &[u8], +pub fn lmdb_fetch_matching_after( txn: &ConstTransaction<'_>, db: &Database, + key_prefix: &[u8], ) -> Result, ChainStorageError> where V: DeserializeOwned, { - let access = txn.access(); - let mut cursor = txn.cursor(db).map_err(|e| { - error!(target: LOG_TARGET, "Could not get read cursor from lmdb: {:?}", e); - ChainStorageError::AccessError(e.to_string()) - })?; - - let mut row = match cursor.seek_range_k(&access, key) { - Ok(r) => r, - Err(_) => return Ok(vec![]), - }; - debug!(target: LOG_TARGET, "Row found: {}", row.0.to_vec().to_hex()); + let mut cursor = lmdb_get_prefix_cursor(txn, db, key_prefix)?; let mut result = vec![]; - while &row.0[0..key.len()] == key { - let val = deserialize::(row.1)?; + while let Some((_, val)) = cursor.next()? { result.push(val); - row = match cursor.next(&access) { - Ok(r) => r, - Err(_) => break, - }; - debug!(target: LOG_TARGET, "Row found: {}", row.0.to_vec().to_hex()); } Ok(result) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index bb948f542b..51d269dce7 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -44,12 +44,12 @@ use crate::{ lmdb_delete_key_value, lmdb_delete_keys_starting_with, lmdb_exists, - lmdb_fetch_keys_starting_with, - lmdb_fetch_keys_starting_with_bytes, + lmdb_fetch_matching_after, lmdb_filter_map_values, lmdb_first_after, lmdb_get, lmdb_get_multiple, + lmdb_get_prefix_cursor, lmdb_insert, lmdb_insert_dup, lmdb_last, @@ -74,6 +74,7 @@ use crate::{ transaction::{TransactionInput, TransactionKernel, TransactionOutput}, }, }; +use blake2::Digest; use croaring::Bitmap; use fs2::FileExt; use lmdb_zero::{ConstTransaction, Database, Environment, ReadTransaction, WriteTransaction}; @@ -82,8 +83,10 @@ use serde::{Deserialize, Serialize}; use std::{ convert::TryFrom, fmt, + fmt::Formatter, fs, fs::File, + mem, ops::{Deref, Range}, path::Path, sync::Arc, @@ -339,7 +342,7 @@ impl LMDBDatabase { UpdateDeletedBitmap { deleted } => { let mut bitmap = self.load_deleted_bitmap_model(&write_txn)?; bitmap.merge(deleted)?; - bitmap.finish()?; + bitmap.save()?; }, PruneOutputsAndUpdateHorizon { output_positions, @@ -463,18 +466,20 @@ impl LMDBDatabase { txn: &WriteTransaction<'_>, key: &OutputKey, ) -> Result { - let key = key.get_key(); let mut output: TransactionOutputRowData = - lmdb_get(txn, &self.utxos_db, key.as_str()).or_not_found("TransactionOutput", "key", key.clone())?; + lmdb_get(txn, &self.utxos_db, key.as_bytes()).or_not_found("TransactionOutput", "key", key.to_hex())?; let pruned_output = output .output .take() .ok_or_else(|| ChainStorageError::DataInconsistencyDetected { function: "prune_output", - details: format!("Attempt to prune output that has already been pruned for key {}", key), + details: format!( + "Attempt to prune output that has already been pruned for key {}", + key.to_hex() + ), })?; // output.output is None - lmdb_replace(txn, &self.utxos_db, key.as_str(), &output)?; + lmdb_replace(txn, &self.utxos_db, key.as_bytes(), &output)?; Ok(pruned_output) } @@ -489,8 +494,7 @@ impl LMDBDatabase { let output_hash = output.hash(); let witness_hash = output.witness_hash(); - let key = OutputKey::new(header_hash, mmr_position); - let key_string = key.get_key(); + let key = OutputKey::new(header_hash, mmr_position, &[]); lmdb_insert( txn, @@ -501,9 +505,18 @@ impl LMDBDatabase { )?; if let Some(ref unique_id) = output.features.unique_id { - debug!(target: LOG_TARGET, "unique_id: {}", unique_id.to_hex()); - let key = UniqueIdIndexKey::new(output.features.parent_public_key.as_ref(), unique_id.as_slice()); - + let parent_public_key = output.features.parent_public_key.as_ref(); + let key = UniqueIdIndexKey::new(parent_public_key, unique_id.as_slice()); + debug!( + target: LOG_TARGET, + "inserting index for unique_id <{}, {}> in output {}. Key is {}", + parent_public_key + .map(|p| p.to_hex()) + .unwrap_or_else(|| "".to_string()), + unique_id.to_hex(), + output_hash.to_hex(), + key, + ); lmdb_insert( txn, &*self.unique_id_index, @@ -517,13 +530,13 @@ impl LMDBDatabase { txn, &*self.txos_hash_to_index_db, output_hash.as_slice(), - &(mmr_position, &key_string), + &(mmr_position, key.as_bytes()), "txos_hash_to_index_db", )?; lmdb_insert( txn, &*self.utxos_db, - key_string.as_str(), + key.as_bytes(), &TransactionOutputRowData { output: Some(output.clone()), header_hash: header_hash.clone(), @@ -553,19 +566,18 @@ impl LMDBDatabase { header_hash.to_hex(), ))); } - let key = OutputKey::new(header_hash, mmr_position); - let key_string = key.get_key(); + let key = OutputKey::new(header_hash, mmr_position, &[]); lmdb_insert( txn, &*self.txos_hash_to_index_db, output_hash.as_slice(), - &(mmr_position, key_string.clone()), + &(mmr_position, key.as_bytes()), "txos_hash_to_index_db", )?; lmdb_insert( txn, &*self.utxos_db, - key_string.as_str(), + key.as_bytes(), &TransactionOutputRowData { output: None, header_hash: header_hash.clone(), @@ -586,7 +598,7 @@ impl LMDBDatabase { mmr_position: u32, ) -> Result<(), ChainStorageError> { let hash = kernel.hash(); - let key = format!("{}-{:010}-{}", header_hash.to_hex(), mmr_position, hash.to_hex()); + let key = KernelKey::new(header_hash, mmr_position, &hash); lmdb_insert( txn, @@ -610,7 +622,7 @@ impl LMDBDatabase { lmdb_insert( txn, &*self.kernels_db, - key.as_str(), + key.as_bytes(), &TransactionKernelRowData { kernel: kernel.clone(), header_hash: header_hash.clone(), @@ -644,17 +656,57 @@ impl LMDBDatabase { )?; if let Some(ref unique_id) = input.features.unique_id { - let key = UniqueIdIndexKey::new(input.features.parent_public_key.as_ref(), unique_id.as_slice()); + let parent_public_key = input.features.parent_public_key.as_ref(); + // Move the "current" UTXO entry to a key containing the spend height + let mut key = UniqueIdIndexKey::new(parent_public_key, unique_id.as_slice()); + let expected_output_hash = lmdb_get::<_, HashOutput>(txn, &self.unique_id_index, key.as_bytes())? + .ok_or_else(|| ChainStorageError::DataInconsistencyDetected { + function: "insert_input", + details: format!("unique token ID with key {} does not exist in index", key.to_hex()), + })?; + + let output_hash = input.output_hash(); + if expected_output_hash != output_hash { + // This should have been checked by an upstream validator + return Err(ChainStorageError::DataInconsistencyDetected { + function: "insert_input", + details: format!( + "output hash for unique id key {} did not match the output hash this input is spending. \ + output hash in index {}, hash of spent output: {}", + key.to_hex(), + expected_output_hash.to_hex(), + output_hash.to_hex() + ), + }); + } lmdb_delete(txn, &self.unique_id_index, key.as_bytes(), "unique_id_index")?; + key.set_deleted_height(height); + debug!( + target: LOG_TARGET, + "moving index for unique_id <{}, {}> in output {} to key {}", + parent_public_key + .map(|p| p.to_hex()) + .unwrap_or_else(|| "".to_string()), + unique_id.to_hex(), + output_hash.to_hex(), + key, + ); + lmdb_insert( + txn, + &self.unique_id_index, + key.as_bytes(), + &output_hash, + "unique_id_index", + )?; } let hash = input.hash(); - let key = format!("{}-{:010}-{}", header_hash.to_hex(), mmr_position, hash.to_hex()); + let key = InputKey::new(&header_hash, mmr_position, &hash); lmdb_insert( txn, &*self.inputs_db, - key.as_str(), + key.as_bytes(), &TransactionInputRowData { input, header_hash, @@ -813,20 +865,16 @@ impl LMDBDatabase { // sufficient let hash = header.hash(); - // Check that there are no utxos or kernels linked to this. - if !lmdb_fetch_keys_starting_with::(hash.to_hex().as_str(), txn, &self.kernels_db)? - .is_empty() - { + // Check that there are no utxos or kernels linked to this. + if !lmdb_fetch_matching_after::(txn, &self.kernels_db, &hash)?.is_empty() { return Err(ChainStorageError::InvalidOperation(format!( "Cannot delete header {} ({}) because there are kernels linked to it", header.height, hash.to_hex() ))); } - if !lmdb_fetch_keys_starting_with::(hash.to_hex().as_str(), txn, &self.utxos_db)? - .is_empty() - { + if !lmdb_fetch_matching_after::(txn, &self.utxos_db, &hash)?.is_empty() { return Err(ChainStorageError::InvalidOperation(format!( "Cannot delete header at height {} ({}) because there are UTXOs linked to it", height, @@ -866,9 +914,9 @@ impl LMDBDatabase { let hash_hex = block_hash.to_hex(); debug!(target: LOG_TARGET, "Deleting block `{}`", hash_hex); debug!(target: LOG_TARGET, "Deleting UTXOs..."); - let height = - self.fetch_height_from_hash(write_txn, block_hash) - .or_not_found("Block", "hash", hash_hex.clone())?; + let height = self + .fetch_height_from_hash(write_txn, block_hash) + .or_not_found("Block", "hash", hash_hex)?; let block_accum_data = self.fetch_block_accumulated_data(write_txn, height)? .ok_or_else(|| ChainStorageError::ValueNotFound { @@ -878,7 +926,7 @@ impl LMDBDatabase { })?; let mut bitmap = self.load_deleted_bitmap_model(write_txn)?; bitmap.remove(block_accum_data.deleted())?; - bitmap.finish()?; + bitmap.save()?; lmdb_delete( write_txn, @@ -887,13 +935,18 @@ impl LMDBDatabase { "block_accumulated_data_db", )?; - self.delete_block_inputs_outputs(write_txn, &hash_hex)?; - self.delete_block_kernels(write_txn, &hash_hex)?; + self.delete_block_inputs_outputs(write_txn, height, block_hash)?; + self.delete_block_kernels(write_txn, block_hash)?; Ok(()) } - fn delete_block_inputs_outputs(&self, txn: &WriteTransaction<'_>, hash: &str) -> Result<(), ChainStorageError> { + fn delete_block_inputs_outputs( + &self, + txn: &WriteTransaction<'_>, + height: u64, + hash: &[u8], + ) -> Result<(), ChainStorageError> { let output_rows = lmdb_delete_keys_starting_with::(txn, &self.utxos_db, hash)?; debug!(target: LOG_TARGET, "Deleted {} outputs...", output_rows.len()); let inputs = lmdb_delete_keys_starting_with::(txn, &self.inputs_db, hash)?; @@ -920,9 +973,9 @@ impl LMDBDatabase { output.commitment.as_bytes(), "utxo_commitment_index", )?; - if let Some(ref unique_id) = output.features.unique_id { - let key = UniqueIdIndexKey::new(output.features.parent_public_key.as_ref(), unique_id.as_slice()); - lmdb_delete(txn, &*self.unique_id_index, key.as_bytes(), "unique_id_index")?; + if let Some(unique_id) = output.features.unique_asset_id() { + let key = UniqueIdIndexKey::new(output.features.parent_public_key.as_ref(), unique_id); + lmdb_delete(txn, &self.unique_id_index, key.as_bytes(), "unique_id_index")?; } } } @@ -948,12 +1001,21 @@ impl LMDBDatabase { &row.mmr_position, "deleted_txo_mmr_position_to_height_index", )?; + if let Some(unique_id) = row.input.features.unique_asset_id() { + let mut key = UniqueIdIndexKey::new(row.input.features.parent_public_key.as_ref(), unique_id); + // The output that made this input that is being unspent is now at the head + lmdb_replace(txn, &self.unique_id_index, key.as_bytes(), &output_hash)?; + + // Remove the checkpoint key at current height + key.set_deleted_height(height); + lmdb_delete(txn, &self.unique_id_index, key.as_bytes(), "unique_id_index")?; + } } Ok(()) } - fn delete_block_kernels(&self, txn: &WriteTransaction<'_>, hash: &str) -> Result<(), ChainStorageError> { - let kernels = lmdb_delete_keys_starting_with::(txn, &self.kernels_db, hash)?; + fn delete_block_kernels(&self, txn: &WriteTransaction<'_>, block_hash: &[u8]) -> Result<(), ChainStorageError> { + let kernels = lmdb_delete_keys_starting_with::(txn, &self.kernels_db, block_hash)?; debug!(target: LOG_TARGET, "Deleted {} kernels...", kernels.len()); for kernel in kernels { trace!( @@ -1093,7 +1155,9 @@ impl LMDBDatabase { .. } = data; + let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; let mut kernel_mmr = MerkleMountainRange::::new(pruned_kernel_set); + let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); for kernel in kernels { total_kernel_sum = &total_kernel_sum + &kernel.excess; @@ -1106,9 +1170,7 @@ impl LMDBDatabase { self.insert_kernel(txn, &block_hash, &kernel, pos as u32)?; } - let mut output_mmr = MutableMmr::::new(pruned_output_set, Bitmap::create())?; - let mut witness_mmr = MerkleMountainRange::::new(pruned_proof_set); - + // unique_id_index expects inputs to be inserted before outputs for input in inputs { total_utxo_sum = &total_utxo_sum - &input.commitment; let index = self @@ -1149,7 +1211,7 @@ impl LMDBDatabase { output_mmr.compress(); // Save the bitmap - deleted_bitmap.finish()?; + deleted_bitmap.save()?; self.insert_block_accumulated_data( txn, @@ -1284,8 +1346,8 @@ impl LMDBDatabase { &((pos + 1) as u64).to_be_bytes(), ) .or_not_found("BlockHeader", "mmr_position", pos.to_string())?; - let key = OutputKey::new(&hash, *pos); - debug!(target: LOG_TARGET, "Pruning output: {}", key.get_key()); + let key = OutputKey::new(&hash, *pos, &[]); + debug!(target: LOG_TARGET, "Pruning output: {}", key.to_hex()); self.prune_output(write_txn, &key)?; } @@ -1307,7 +1369,7 @@ impl LMDBDatabase { ) -> Result, ChainStorageError> { match tree { MmrTree::Utxo => { - Ok(lmdb_get::<_, (u32, String)>(txn, &self.txos_hash_to_index_db, hash)?.map(|(index, _)| index)) + Ok(lmdb_get::<_, (u32, Vec)>(txn, &self.txos_hash_to_index_db, hash)?.map(|(index, _)| index)) }, _ => unimplemented!(), } @@ -1355,16 +1417,16 @@ impl LMDBDatabase { output_hash: &HashOutput, ) -> Result, ChainStorageError> { if let Some((index, key)) = - lmdb_get::<_, (u32, String)>(txn, &self.txos_hash_to_index_db, output_hash.as_slice())? + lmdb_get::<_, (u32, Vec)>(txn, &self.txos_hash_to_index_db, output_hash.as_slice())? { debug!( target: LOG_TARGET, "Fetch output: {} Found ({}, {})", output_hash.to_hex(), index, - key + key.to_hex() ); - match lmdb_get::<_, TransactionOutputRowData>(txn, &self.utxos_db, key.as_str())? { + match lmdb_get::<_, TransactionOutputRowData>(txn, &self.utxos_db, &key)? { Some(TransactionOutputRowData { output: Some(o), mmr_position, @@ -1716,12 +1778,10 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_kernels_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - Ok( - lmdb_fetch_keys_starting_with(header_hash.to_hex().as_str(), &txn, &self.kernels_db)? - .into_iter() - .map(|f: TransactionKernelRowData| f.kernel) - .collect(), - ) + Ok(lmdb_fetch_matching_after(&txn, &self.kernels_db, header_hash)? + .into_iter() + .map(|f: TransactionKernelRowData| f.kernel) + .collect()) } fn fetch_kernel_by_excess( @@ -1751,8 +1811,8 @@ impl BlockchainBackend for LMDBDatabase { if let Some((header_hash, mmr_position, hash)) = lmdb_get::<_, (HashOutput, u32, HashOutput)>(&txn, &self.kernel_excess_sig_index, key.as_slice())? { - let key = format!("{}-{:010}-{}", header_hash.to_hex(), mmr_position, hash.to_hex()); - Ok(lmdb_get(&txn, &self.kernels_db, key.as_str())? + let key = KernelKey::new(&header_hash, mmr_position, &hash); + Ok(lmdb_get(&txn, &self.kernels_db, key.as_bytes())? .map(|kernel: TransactionKernelRowData| (kernel.kernel, header_hash))) } else { Ok(None) @@ -1800,15 +1860,11 @@ impl BlockchainBackend for LMDBDatabase { .hash; result.extend( - lmdb_fetch_keys_starting_with::( - hash.to_hex().as_str(), - &txn, - &self.kernels_db, - )? - .into_iter() - .skip(skip_amount) - .take(total_size - result.len()) - .map(|f| f.kernel), + lmdb_fetch_matching_after::(&txn, &self.kernels_db, &hash)? + .into_iter() + .skip(skip_amount) + .take(total_size - result.len()) + .map(|f| f.kernel), ); skip_amount = 0; @@ -1877,30 +1933,26 @@ impl BlockchainBackend for LMDBDatabase { })?; result.extend( - lmdb_fetch_keys_starting_with::( - accum_data.hash.to_hex().as_str(), - &txn, - &self.utxos_db, - )? - .into_iter() - .skip(skip_amount) - .take(total_size - result.len()) - .map(|row| { - if deleted.contains(row.mmr_position) { - return PrunedOutput::Pruned { - output_hash: row.hash, - witness_hash: row.witness_hash, - }; - } - if let Some(output) = row.output { - PrunedOutput::NotPruned { output } - } else { - PrunedOutput::Pruned { - output_hash: row.hash, - witness_hash: row.witness_hash, + lmdb_fetch_matching_after::(&txn, &self.utxos_db, &accum_data.hash)? + .into_iter() + .skip(skip_amount) + .take(total_size - result.len()) + .map(|row| { + if deleted.contains(row.mmr_position) { + return PrunedOutput::Pruned { + output_hash: row.hash, + witness_hash: row.witness_hash, + }; } - } - }), + if let Some(output) = row.output { + PrunedOutput::NotPruned { output } + } else { + PrunedOutput::Pruned { + output_hash: row.hash, + witness_hash: row.witness_hash, + } + } + }), ); // Builds a BitMap of the deleted UTXO MMR indexes that occurred at the current height @@ -1936,19 +1988,34 @@ impl BlockchainBackend for LMDBDatabase { &self, parent_public_key: Option<&PublicKey>, unique_id: &[u8], + deleted_height: Option, ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - let key = UniqueIdIndexKey::new(parent_public_key, unique_id); - debug!( - target: LOG_TARGET, - "Trying to find UTXO with unique index of {}", - key.as_bytes().to_vec().to_hex() - ); - if let Some(hash) = lmdb_get::<_, HashOutput>(&txn, &self.unique_id_index, key.as_bytes())? { - debug!(target: LOG_TARGET, "Found 1 token with hash:{}", hash.to_hex()); - self.fetch_output_in_txn(&txn, &hash) - } else { - Ok(None) + + let mut key = UniqueIdIndexKey::new(parent_public_key, unique_id); + if let Some(height) = deleted_height { + key.set_deleted_height(height); + } + + let output_hash = { + let mut cursor = lmdb_get_prefix_cursor(&txn, &self.unique_id_index, key.as_prefix_bytes())?; + // Seek to the exact matching key or greater + let r = cursor.seek_gte(key.as_bytes())?; + match r { + Some((k, output_hash)) if k == key.as_bytes() => Some(output_hash), + _ => { + // Either return the output hash that matches the key exactly, or the key before the given key. + // Since a key without a deleted height is `0xFF x 8 (u64::MAX)` that key will always return the + // output in the UTxO set or the last TxO matching the + // tuple + cursor.prev()?.map(|(_, v)| v) + }, + } + }; + + match output_hash { + Some(output_hash) => self.fetch_output_in_txn(&txn, &output_hash), + None => Ok(None), } } @@ -1959,7 +2026,7 @@ impl BlockchainBackend for LMDBDatabase { ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; let key = parent_public_key.as_bytes(); - let values: Vec = lmdb_fetch_keys_starting_with_bytes(key, &txn, &self.unique_id_index)?; + let values: Vec = lmdb_fetch_matching_after(&txn, &self.unique_id_index, key)?; let mut result = vec![]; for hash in values.into_iter().skip(range.start).take(range.len()) { if let Some(s) = self.fetch_output_in_txn(&txn, &hash)? { @@ -1971,28 +2038,24 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - Ok( - lmdb_fetch_keys_starting_with(header_hash.to_hex().as_str(), &txn, &self.utxos_db)? - .into_iter() - .map(|f: TransactionOutputRowData| match f.output { - Some(o) => PrunedOutput::NotPruned { output: o }, - None => PrunedOutput::Pruned { - output_hash: f.hash, - witness_hash: f.witness_hash, - }, - }) - .collect(), - ) + Ok(lmdb_fetch_matching_after(&txn, &self.utxos_db, header_hash)? + .into_iter() + .map(|f: TransactionOutputRowData| match f.output { + Some(o) => PrunedOutput::NotPruned { output: o }, + None => PrunedOutput::Pruned { + output_hash: f.hash, + witness_hash: f.witness_hash, + }, + }) + .collect()) } fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { let txn = self.read_transaction()?; - Ok( - lmdb_fetch_keys_starting_with(header_hash.to_hex().as_str(), &txn, &self.inputs_db)? - .into_iter() - .map(|f: TransactionInputRowData| f.input) - .collect(), - ) + Ok(lmdb_fetch_matching_after(&txn, &self.inputs_db, header_hash)? + .into_iter() + .map(|f: TransactionInputRowData| f.input) + .collect()) } fn fetch_mmr_size(&self, tree: MmrTree) -> Result { @@ -2422,22 +2485,64 @@ impl fmt::Display for MetadataValue { } } +#[derive(Debug, Clone)] struct UniqueIdIndexKey { inner: Vec, + prefix_len: usize, } impl UniqueIdIndexKey { + /// Construct a key for the unique_id_index db. + /// + /// # Arguments + /// `parent_public_key` - the parent asset public key to which the token is assigned + /// `unique_id` - a series of bytes representing the token uniquely for the asset pub fn new(parent_public_key: Option<&PublicKey>, unique_id: &[u8]) -> Self { - let mut key = parent_public_key - .map(|k| k.as_bytes().to_vec()) - .unwrap_or_else(|| vec![0u8; 32]); - key.extend_from_slice(unique_id); - Self { inner: key } + let unique_id_hash = HashDigest::default().chain(unique_id).finalize(); + Self::from_raw_parts( + parent_public_key.map(|p| p.as_bytes()).unwrap_or(&[0; 32][..]), + &unique_id_hash, + // u64::MAX + &[0xff; 8][..], + ) + } + + /// Convert the key to a deleted at height key + /// `deleted_height` - The height that the UTXO was deleted + pub fn set_deleted_height(&mut self, deleted_height: u64) -> &mut Self { + let n = self.inner.len() - mem::size_of::(); + self.inner[n..].copy_from_slice(&deleted_height.to_be_bytes()); + self + } + + fn from_raw_parts(parent_public_key_bytes: &[u8], unique_id_bytes: &[u8], deleted_height_bytes: &[u8]) -> Self { + let prefix_len = parent_public_key_bytes.len() + unique_id_bytes.len(); + let mut key = Vec::with_capacity(prefix_len + deleted_height_bytes.len()); + + key.extend_from_slice(parent_public_key_bytes); + key.extend_from_slice(unique_id_bytes); + key.extend_from_slice(deleted_height_bytes); + + Self { inner: key, prefix_len } } pub fn as_bytes(&self) -> &[u8] { self.inner.as_slice() } + + pub fn as_prefix_bytes(&self) -> &[u8] { + &self.inner[..self.prefix_len][..] + } + + pub fn to_hex(&self) -> String { + self.inner.to_hex() + } +} + +impl fmt::Display for UniqueIdIndexKey { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_hex()) + } } /// A struct that wraps a LMDB transaction and provides an interface to valid operations that can be performed @@ -2487,7 +2592,7 @@ impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { } /// Persist the bitmap if required. This is a no-op if the bitmap has not been modified. - pub fn finish(mut self) -> Result<(), ChainStorageError> { + pub fn save(mut self) -> Result<(), ChainStorageError> { if !self.is_dirty { return Ok(()); } @@ -2503,20 +2608,29 @@ impl<'a, 'b> DeletedBitmapModel<'a, WriteTransaction<'b>> { } } -struct OutputKey<'a> { - header_hash: &'a [u8], - mmr_position: u32, +struct CompositeKey { + key: Vec, } -impl<'a> OutputKey<'a> { - pub fn new(header_hash: &'a [u8], mmr_position: u32) -> OutputKey { - OutputKey { - header_hash, - mmr_position, - } +impl CompositeKey { + pub fn new(header_hash: &[u8], mmr_position: u32, hash: &[u8]) -> OutputKey { + let mut key = Vec::with_capacity(header_hash.len() + mem::size_of::() + hash.len()); + key.extend_from_slice(header_hash); + key.extend_from_slice(&mmr_position.to_be_bytes()); + key.extend_from_slice(hash); + + OutputKey { key } } - pub fn get_key(&self) -> String { - format!("{}-{:010}", to_hex(self.header_hash), self.mmr_position) + pub fn as_bytes(&self) -> &[u8] { + &self.key + } + + pub fn to_hex(&self) -> String { + self.key.to_hex() } } + +type InputKey = CompositeKey; +type KernelKey = CompositeKey; +type OutputKey = CompositeKey; diff --git a/base_layer/core/src/chain_storage/lmdb_db/mod.rs b/base_layer/core/src/chain_storage/lmdb_db/mod.rs index 7947da342d..f8370f41d9 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/mod.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/mod.rs @@ -20,6 +20,8 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +mod helpers; +mod key_prefix_cursor; mod lmdb; #[allow(clippy::module_inception)] mod lmdb_db; diff --git a/base_layer/core/src/chain_storage/pruned_output.rs b/base_layer/core/src/chain_storage/pruned_output.rs index 20f54c37ef..c40649321f 100644 --- a/base_layer/core/src/chain_storage/pruned_output.rs +++ b/base_layer/core/src/chain_storage/pruned_output.rs @@ -49,4 +49,11 @@ impl PrunedOutput { PrunedOutput::NotPruned { output } => output.hash(), } } + + pub fn as_transaction_output(&self) -> Option<&TransactionOutput> { + match self { + PrunedOutput::Pruned { .. } => None, + PrunedOutput::NotPruned { output } => Some(output), + } + } } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index afad1d1370..0cebd41ad3 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -45,8 +45,12 @@ use crate::{ use rand::rngs::OsRng; use std::sync::Arc; use tari_common::configuration::Network; -use tari_common_types::types::PublicKey; -use tari_crypto::keys::PublicKey as PublicKeyTrait; +use tari_common_types::types::{CommitmentFactory, PublicKey}; +use tari_crypto::{ + commitment::HomomorphicCommitmentFactory, + keys::{PublicKey as PublicKeyTrait, PublicKey}, + ristretto::RistrettoPublicKey, +}; use tari_test_utils::unpack_enum; use tari_utilities::Hashable; @@ -644,3 +648,139 @@ mod prepare_new_block { assert_eq!(block.header.height, 1); } } + +mod fetch_utxo_by_unique_id { + use super::*; + + #[test] + fn it_returns_none_if_empty() { + let db = setup(); + let asset_pk = RistrettoPublicKey::default(); + let result = db.fetch_utxo_by_unique_id(Some(asset_pk), vec![1, 2, 3], None).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn it_finds_the_utxo_by_unique_id_at_deleted_height() { + let db = setup(); + let unique_id = vec![1u8; 3]; + let (_, asset_pk) = RistrettoPublicKey::random_keypair(&mut OsRng); + + // Height 1 + let (blocks, outputs) = add_many_chained_blocks(1, &db); + + let features = OutputFeatures { + flags: OutputFlags::MINT_NON_FUNGIBLE, + parent_public_key: Some(asset_pk.clone()), + unique_id: Some(unique_id.clone()), + ..Default::default() + }; + let (txns, tx_outputs) = schema_to_transaction(&[txn_schema!( + from: vec![outputs[0].clone()], + to: vec![500 * T], + fee: 5.into(), + lock: 0, + features: features + )]); + + let asset_utxo1 = tx_outputs.iter().find(|o| o.features == features).unwrap(); + + // Height 2 - mint + let (block, _) = create_next_block(blocks.last().unwrap(), txns); + assert!(db.add_block(block).unwrap().is_added()); + + // Height 4 + let (blocks, _) = add_many_chained_blocks(2, &db); + + let info = db + .fetch_utxo_by_unique_id(Some(asset_pk.clone()), unique_id.clone(), None) + .unwrap() + .unwrap(); + assert_eq!(info.output.as_transaction_output().unwrap().features, features); + let expected_commitment = + CommitmentFactory::default().commit_value(&asset_utxo1.spending_key, asset_utxo1.value.as_u64()); + assert_eq!( + info.output.as_transaction_output().unwrap().commitment, + expected_commitment + ); + + let features = OutputFeatures { + flags: OutputFlags::empty(), + parent_public_key: Some(asset_pk.clone()), + unique_id: Some(unique_id.clone()), + ..Default::default() + }; + let (txns, tx_outputs) = schema_to_transaction(&[txn_schema!( + from: vec![asset_utxo1.clone()], + to: vec![50 * T], + fee: 5.into(), + lock: 0, + features: features + )]); + + let asset_utxo2 = tx_outputs.iter().find(|o| o.features == features).unwrap(); + + // Height 5 - spend + let (block, _) = create_next_block(blocks.last().unwrap(), txns); + assert!(db.add_block(block).unwrap().is_added()); + + // Height 10 + let (blocks, _) = add_many_chained_blocks(5, &db); + + // Current UTXO + let info = db + .fetch_utxo_by_unique_id(Some(asset_pk.clone()), unique_id.clone(), None) + .unwrap() + .unwrap(); + let expected_commitment = + CommitmentFactory::default().commit_value(&asset_utxo2.spending_key, asset_utxo2.value.as_u64()); + assert_eq!( + info.output.as_transaction_output().unwrap().commitment, + expected_commitment + ); + + let assert_utxo_not_found = |deleted_height: Option| { + let info = db + .fetch_utxo_by_unique_id(Some(asset_pk.clone()), unique_id.clone(), deleted_height) + .unwrap(); + assert!(info.is_none()); + }; + + let assert_utxo_found = |utxo: &UnblindedOutput, deleted_height: Option| { + let info = db + .fetch_utxo_by_unique_id(Some(asset_pk.clone()), unique_id.clone(), deleted_height) + .unwrap() + .ok_or_else(|| format!("was none at deleted height {:?}", deleted_height)) + .unwrap(); + + let expected_commitment = + CommitmentFactory::default().commit_value(&utxo.spending_key, utxo.value.as_u64()); + assert_eq!( + info.output.as_transaction_output().unwrap().commitment, + expected_commitment + ); + }; + + (0..=4).for_each(|i| { + assert_utxo_not_found(Some(i)); + }); + (5..=10).for_each(|i| { + assert_utxo_found(asset_utxo1, Some(i)); + }); + + let (txns, _) = schema_to_transaction(&[txn_schema!(from: vec![asset_utxo2.clone()], to: vec![T])]); + + // Height 11 - burn + let (block, _) = create_next_block(blocks.last().unwrap(), txns); + assert!(db.add_block(block).unwrap().is_added()); + + assert_utxo_found(asset_utxo2, None); + (0..=4).for_each(|i| { + assert_utxo_not_found(Some(i)); + }); + (5..=10).for_each(|i| { + assert_utxo_found(asset_utxo1, Some(i)); + }); + assert_utxo_found(asset_utxo2, Some(11)); + } +} diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index af4e3086d4..8d2c8dfce2 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -294,11 +294,12 @@ impl BlockchainBackend for TempDatabase { &self, parent_public_key: Option<&PublicKey>, unique_id: &[u8], + deleted_at: Option, ) -> Result, ChainStorageError> { self.db .as_ref() .unwrap() - .fetch_utxo_by_unique_id(parent_public_key, unique_id) + .fetch_utxo_by_unique_id(parent_public_key, unique_id, deleted_at) } fn fetch_all_unspent_by_parent_public_key( diff --git a/base_layer/core/src/transactions/test_helpers.rs b/base_layer/core/src/transactions/test_helpers.rs index 8d1198f0be..0d748551c5 100644 --- a/base_layer/core/src/transactions/test_helpers.rs +++ b/base_layer/core/src/transactions/test_helpers.rs @@ -282,7 +282,7 @@ macro_rules! txn_schema { to_outputs: vec![], fee: $fee, lock_height: $lock, - features: $features, + features: $features.clone(), script: tari_crypto::script![Nop], input_data: None, } @@ -294,7 +294,7 @@ macro_rules! txn_schema { to:$outputs, fee:$fee, lock:$lock, - features: $features + features: $features.clone() ) }}; (from: $input:expr, to: $outputs:expr, features: $features:expr) => {{ diff --git a/base_layer/core/src/transactions/transaction.rs b/base_layer/core/src/transactions/transaction.rs index adf643d786..d309c1d78f 100644 --- a/base_layer/core/src/transactions/transaction.rs +++ b/base_layer/core/src/transactions/transaction.rs @@ -117,7 +117,7 @@ pub struct SideChainCheckpointFeatures { } /// Options for UTXO's -#[derive(Debug, Clone, Hash, PartialEq, Deserialize, Serialize, Eq)] +#[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] pub struct OutputFeatures { /// Flags are the feature flags that differentiate between outputs, eg Coinbase all of which has different rules pub flags: OutputFlags, diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index 6d2f1091ad..49761df970 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -420,7 +420,9 @@ pub fn check_input_is_utxo(db: &B, input: &TransactionInpu } if let Some(unique_id) = &input.features.unique_id { - if let Some(utxo_hash) = db.fetch_utxo_by_unique_id(input.features.parent_public_key.as_ref(), unique_id)? { + if let Some(utxo_hash) = + db.fetch_utxo_by_unique_id(input.features.parent_public_key.as_ref(), unique_id, None)? + { // Check that it is the same utxo in which the unique_id was created if utxo_hash.output.hash() == output_hash { return Ok(()); @@ -475,7 +477,7 @@ pub fn check_not_duplicate_txos(db: &B, body: &AggregateBo Ok(()) } -/// This function checks that the outputs do not already exist in the UTxO set. +/// This function checks that the outputs do not already exist in the TxO set. pub fn check_not_duplicate_txo( db: &B, output: &TransactionOutput, @@ -501,7 +503,7 @@ pub fn check_not_duplicate_txo( if let Some(unique_id) = &output.features.unique_id { // Needs to have a mint flag if output.features.is_non_fungible_mint() && - db.fetch_utxo_by_unique_id(output.features.parent_public_key.as_ref(), unique_id)? + db.fetch_utxo_by_unique_id(output.features.parent_public_key.as_ref(), unique_id, None)? .is_some() { warn!( diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index f18e9ab58d..0c717f4218 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -1052,39 +1052,42 @@ fn asset_unique_id() { ); generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + let unique_id1 = vec![1u8; 3]; // create a new NFT let (_, asset) = PublicKey::random_keypair(&mut rng); let features = OutputFeatures { flags: OutputFlags::MINT_NON_FUNGIBLE, parent_public_key: Some(asset.clone()), - unique_id: Some(vec![1, 2, 3]), + unique_id: Some(unique_id1.clone()), ..Default::default() }; // check the output is not stored in the db - let unique_id = features.unique_asset_id().unwrap(); - let output_hash = db + let output_info = db .db_read_access() .unwrap() - .fetch_utxo_by_unique_id(Some(&asset), unique_id) + .fetch_utxo_by_unique_id(Some(&asset), &unique_id1, None) .unwrap(); - assert!(output_hash.is_none()); + assert!(output_info.is_none()); // mint it to the chain let tx = txn_schema!( from: vec![outputs[1][0].clone()], - to: vec![0 * T], fee: 100.into(), lock: 0, features: features.clone() + to: vec![0 * T], fee: 20.into(), lock: 0, features: features ); - generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + + let result = generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + assert!(result.is_added()); // check it is in the db - let output_hash = db + let output_info = db .db_read_access() .unwrap() - .fetch_utxo_by_unique_id(Some(&asset), unique_id) + .fetch_utxo_by_unique_id(Some(&asset), &unique_id1, None) + .unwrap() .unwrap(); - assert!(output_hash.is_some()); + assert_eq!(output_info.output.as_transaction_output().unwrap().features, features); // attempt to mint the same unique id for the same asset let tx = txn_schema!( @@ -1093,68 +1096,73 @@ fn asset_unique_id() { ); let err = generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap_err(); - assert!(matches!(err, ChainStorageError::KeyExists { .. })); + assert!(matches!(err, ChainStorageError::ValidationError { + source: ValidationError::ContainsDuplicateUtxoUniqueID + })); - // new unique id + // new unique id, does not exist yet + let unique_id2 = vec![2u8; 3]; let features = OutputFeatures { flags: OutputFlags::MINT_NON_FUNGIBLE, parent_public_key: Some(asset.clone()), - unique_id: Some(vec![4, 5, 6]), + unique_id: Some(unique_id2.clone()), ..Default::default() }; - let unique_id = features.unique_asset_id().unwrap(); - let output_hash = db + let output_info = db .db_read_access() .unwrap() - .fetch_utxo_by_unique_id(Some(&asset), unique_id) + .fetch_utxo_by_unique_id(Some(&asset), &unique_id2, None) .unwrap(); - assert!(output_hash.is_none()); + assert!(output_info.is_none()); - // mint + // mint unique_id2 let tx = txn_schema!( from: vec![outputs[1][2].clone()], - to: vec![0 * T], fee: 100.into(), lock: 0, features: features.clone() + to: vec![0 * T], fee: 20.into(), lock: 0, features: features ); - generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + let result = generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + assert!(result.is_added()); // check it is in the db - let output_hash = db + let output_info = db .db_read_access() .unwrap() - .fetch_utxo_by_unique_id(Some(&asset), unique_id) + .fetch_utxo_by_unique_id(Some(&asset), &unique_id2, None) + .unwrap() .unwrap(); - assert!(output_hash.is_some()); + assert_eq!(output_info.output.as_transaction_output().unwrap().features, features); // same id for a different asset is fine let (_, asset2) = PublicKey::random_keypair(&mut rng); let features = OutputFeatures { flags: OutputFlags::MINT_NON_FUNGIBLE, - parent_public_key: Some(asset2), - unique_id: Some(vec![4, 5, 6]), + parent_public_key: Some(asset2.clone()), + unique_id: Some(unique_id1.clone()), ..Default::default() }; - let unique_id = features.unique_asset_id().unwrap(); - let output_hash = db + let output_info = db .db_read_access() .unwrap() - .fetch_utxo_by_unique_id(Some(&asset), unique_id) + .fetch_utxo_by_unique_id(Some(&asset2), &unique_id1, None) .unwrap(); - assert!(output_hash.is_none()); + assert!(output_info.is_none()); // mint let tx = txn_schema!( from: vec![outputs[1][3].clone()], - to: vec![0 * T], fee: 100.into(), lock: 0, features: features.clone() + to: vec![0 * T], fee: 20.into(), lock: 0, features: features ); - generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + let result = generate_new_block(&mut db, &mut blocks, &mut outputs, vec![tx], &consensus_manager).unwrap(); + assert!(result.is_added()); // check it is in the db - let output_hash = db + let output_info = db .db_read_access() .unwrap() - .fetch_utxo_by_unique_id(Some(&asset), unique_id) + .fetch_utxo_by_unique_id(Some(&asset2), &unique_id1, None) + .unwrap() .unwrap(); - assert!(output_hash.is_some()); + assert_eq!(output_info.output.as_transaction_output().unwrap().features, features); } #[test] diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index 644a6df685..bfc0f5f7a6 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -1109,7 +1109,7 @@ async fn consensus_validation_unique_id() { }; let txs = vec![txn_schema!( from: vec![outputs[1][0].clone()], - to: vec![0 * T], fee: 100.into(), lock: 0, features: features.clone() + to: vec![0 * T], fee: 100.into(), lock: 0, features: features )]; generate_new_block(&mut store, &mut blocks, &mut outputs, txs, &consensus_manager).unwrap();