Skip to content

Commit

Permalink
Merge branch 'origin/tomas/fix-block-query' (#1534)
Browse files Browse the repository at this point in the history
* origin/tomas/fix-block-query:
  changelog: add #1534
  [ci] wasm checksums update
  test/storage: fill-in block header for commit if missing
  test/e2e/ledger/genesis_validators: validator wait for tx block height
  rpc: use the new shell last_block query to find last committed block
  core/storage: impl Display for BlockHash
  core/time: impl Display for DateTimeUtc
  shared/queries/shell: expose the last committed block from storage
  core/storage: Store last committed block's hash and time with its height
  • Loading branch information
Fraccaman committed Jun 12, 2023
2 parents 02d4c1c + 0b944d7 commit 63f0b74
Show file tree
Hide file tree
Showing 17 changed files with 224 additions and 89 deletions.
2 changes: 2 additions & 0 deletions .changelog/unreleased/bug-fixes/1534-fix-block-query.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Fix a client block query to avoid seeing pre-committed blocks.
([\#1534](https://github.com/anoma/namada/pull/1534))
15 changes: 13 additions & 2 deletions apps/src/lib/client/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,19 @@ pub async fn query_and_print_epoch<
/// Query the last committed block
pub async fn query_block<C: namada::ledger::queries::Client + Sync>(
client: &C,
) -> crate::facade::tendermint_rpc::endpoint::block::Response {
namada::ledger::rpc::query_block(client).await
) {
let block = namada::ledger::rpc::query_block(client).await;
match block {
Some(block) => {
println!(
"Last committed block ID: {}, height: {}, time: {}",
block.hash, block.height, block.time
);
}
None => {
println!("No block has been committed yet.");
}
}
}

/// Query the results of the last committed block
Expand Down
2 changes: 1 addition & 1 deletion apps/src/lib/node/ledger/shell/finalize_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ where
hash: BlockHash,
byzantine_validators: Vec<Evidence>,
) -> (BlockHeight, bool) {
let height = self.wl_storage.storage.last_height + 1;
let height = self.wl_storage.storage.get_last_block_height() + 1;

self.gas_meter.reset();

Expand Down
2 changes: 1 addition & 1 deletion apps/src/lib/node/ledger/shell/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -663,7 +663,7 @@ where
tracing::info!(
"Committed block hash: {}, height: {}",
root,
self.wl_storage.storage.last_height,
self.wl_storage.storage.get_last_block_height(),
);
response.data = root.0;
response
Expand Down
6 changes: 4 additions & 2 deletions apps/src/lib/node/ledger/storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,14 +354,16 @@ mod tests {
let is_last_write = blocks_write_value.last().unwrap().1;

// The upper bound is arbitrary.
for height in storage.last_height.0..storage.last_height.0 + 10 {
for height in storage.get_last_block_height().0
..storage.get_last_block_height().0 + 10
{
let height = BlockHeight::from(height);
let (value_bytes, _gas) = storage.read_with_height(&key, height)?;
if is_last_write {
let value_bytes =
value_bytes.expect("Should have been written");
let value: BlockHeight = types::decode(value_bytes).unwrap();
assert_eq!(value, storage.last_height);
assert_eq!(value, storage.get_last_block_height());
} else if value_bytes.is_some() {
let value: BlockHeight =
types::decode(value_bytes.unwrap()).unwrap();
Expand Down
56 changes: 40 additions & 16 deletions apps/src/lib/node/ledger/storage/rocksdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
//! - `root`: root hash
//! - `store`: the tree's store
//! - `hash`: block hash
//! - `time`: block time
//! - `epoch`: block epoch
//! - `address_gen`: established address generator
//! - `header`: block's header
Expand Down Expand Up @@ -570,6 +571,7 @@ impl DB for RocksDB {
read_opts.set_iterate_upper_bound(next_height_prefix);
let mut merkle_tree_stores = MerkleTreeStoresRead::default();
let mut hash = None;
let mut time = None;
let mut epoch = None;
let mut pred_epochs = None;
let mut address_gen = None;
Expand Down Expand Up @@ -618,6 +620,11 @@ impl DB for RocksDB {
types::decode(bytes).map_err(Error::CodingError)?,
)
}
"time" => {
time = Some(
types::decode(bytes).map_err(Error::CodingError)?,
)
}
"epoch" => {
epoch = Some(
types::decode(bytes).map_err(Error::CodingError)?,
Expand All @@ -638,22 +645,27 @@ impl DB for RocksDB {
None => unknown_key_error(path)?,
}
}
match (hash, epoch, pred_epochs, address_gen) {
(Some(hash), Some(epoch), Some(pred_epochs), Some(address_gen)) => {
Ok(Some(BlockStateRead {
merkle_tree_stores,
hash,
height,
epoch,
pred_epochs,
results,
next_epoch_min_start_height,
next_epoch_min_start_time,
update_epoch_blocks_delay,
address_gen,
tx_queue,
}))
}
match (hash, time, epoch, pred_epochs, address_gen) {
(
Some(hash),
Some(time),
Some(epoch),
Some(pred_epochs),
Some(address_gen),
) => Ok(Some(BlockStateRead {
merkle_tree_stores,
hash,
height,
time,
epoch,
pred_epochs,
results,
next_epoch_min_start_height,
next_epoch_min_start_time,
update_epoch_blocks_delay,
address_gen,
tx_queue,
})),
_ => Err(Error::Temporary {
error: "Essential data couldn't be read from the DB"
.to_string(),
Expand All @@ -672,6 +684,7 @@ impl DB for RocksDB {
header,
hash,
height,
time,
epoch,
pred_epochs,
next_epoch_min_start_height,
Expand Down Expand Up @@ -803,6 +816,15 @@ impl DB for RocksDB {
.0
.put_cf(block_cf, key.to_string(), types::encode(&hash));
}
// Block time
{
let key = prefix_key
.push(&"time".to_owned())
.map_err(Error::KeyError)?;
batch
.0
.put_cf(block_cf, key.to_string(), types::encode(&time));
}
// Block epoch
{
let key = prefix_key
Expand Down Expand Up @@ -1440,6 +1462,7 @@ mod test {
let merkle_tree = MerkleTree::<Sha256Hasher>::default();
let merkle_tree_stores = merkle_tree.stores();
let hash = BlockHash::default();
let time = DateTimeUtc::now();
let epoch = Epoch::default();
let pred_epochs = Epochs::default();
let height = BlockHeight::default();
Expand All @@ -1454,6 +1477,7 @@ mod test {
header: None,
hash: &hash,
height,
time,
epoch,
results: &results,
pred_epochs: &pred_epochs,
Expand Down
55 changes: 38 additions & 17 deletions core/src/ledger/storage/mockdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ impl DB for MockDB {
let upper_prefix = format!("{}/", height.next_height().raw());
let mut merkle_tree_stores = MerkleTreeStoresRead::default();
let mut hash = None;
let mut time = None;
let mut epoch = None;
let mut pred_epochs = None;
let mut address_gen = None;
Expand Down Expand Up @@ -137,6 +138,11 @@ impl DB for MockDB {
types::decode(bytes).map_err(Error::CodingError)?,
)
}
"time" => {
time = Some(
types::decode(bytes).map_err(Error::CodingError)?,
)
}
"epoch" => {
epoch = Some(
types::decode(bytes).map_err(Error::CodingError)?,
Expand All @@ -157,23 +163,28 @@ impl DB for MockDB {
None => unknown_key_error(path)?,
}
}
match (hash, epoch, pred_epochs, address_gen) {
(Some(hash), Some(epoch), Some(pred_epochs), Some(address_gen)) => {
Ok(Some(BlockStateRead {
merkle_tree_stores,
hash,
height,
epoch,
pred_epochs,
next_epoch_min_start_height,
next_epoch_min_start_time,
update_epoch_blocks_delay,
address_gen,
results,
#[cfg(feature = "ferveo-tpke")]
tx_queue,
}))
}
match (hash, time, epoch, pred_epochs, address_gen) {
(
Some(hash),
Some(time),
Some(epoch),
Some(pred_epochs),
Some(address_gen),
) => Ok(Some(BlockStateRead {
merkle_tree_stores,
hash,
height,
time,
epoch,
pred_epochs,
next_epoch_min_start_height,
next_epoch_min_start_time,
update_epoch_blocks_delay,
address_gen,
results,
#[cfg(feature = "ferveo-tpke")]
tx_queue,
})),
_ => Err(Error::Temporary {
error: "Essential data couldn't be read from the DB"
.to_string(),
Expand All @@ -191,6 +202,7 @@ impl DB for MockDB {
merkle_tree_stores,
header,
hash,
time,
height,
epoch,
pred_epochs,
Expand Down Expand Up @@ -270,6 +282,15 @@ impl DB for MockDB {
.borrow_mut()
.insert(key.to_string(), types::encode(&hash));
}
// Block time
{
let key = prefix_key
.push(&"time".to_owned())
.map_err(Error::KeyError)?;
self.0
.borrow_mut()
.insert(key.to_string(), types::encode(&time));
}
// Block epoch
{
let key = prefix_key
Expand Down
Loading

0 comments on commit 63f0b74

Please sign in to comment.