Skip to content

Commit

Permalink
Fix benchmarks (paritytech#1919)
Browse files Browse the repository at this point in the history
* fixed benchmarks broken by rejecting storage proofs with excessive trie nodes and justifications with extra prevotes

* update weights

* fmt

* accidental paste

* revert changes to millau runtime (separate PR)

* revert comment change
  • Loading branch information
svyatonik authored and serban300 committed Apr 9, 2024
1 parent 74986c0 commit dadb54f
Show file tree
Hide file tree
Showing 11 changed files with 159 additions and 173 deletions.
7 changes: 4 additions & 3 deletions bridges/bin/runtime-common/src/messages_benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use crate::{
AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain,
},
messages_generation::{
encode_all_messages, encode_lane_data, grow_trie, prepare_messages_storage_proof,
encode_all_messages, encode_lane_data, grow_trie_leaf_value, prepare_messages_storage_proof,
},
};

Expand Down Expand Up @@ -204,11 +204,12 @@ where
{
let mut trie =
TrieDBMutBuilderV1::<HasherOf<BridgedChain<B>>>::new(&mut mdb, &mut root).build();
trie.insert(&storage_key, &params.inbound_lane_data.encode())
let inbound_lane_data =
grow_trie_leaf_value(params.inbound_lane_data.encode(), params.size);
trie.insert(&storage_key, &inbound_lane_data)
.map_err(|_| "TrieMut::insert has failed")
.expect("TrieMut::insert should not fail in benchmarks");
}
root = grow_trie(root, &mut mdb, params.size);

// generate storage proof to be delivered to This chain
let storage_proof = record_all_trie_keys::<LayoutV1<HasherOf<BridgedChain<B>>>, _>(&mdb, &root)
Expand Down
52 changes: 16 additions & 36 deletions bridges/bin/runtime-common/src/messages_generation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ use bp_messages::{
};
use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize};
use codec::Encode;
use sp_core::Hasher;
use sp_std::{ops::RangeInclusive, prelude::*};
use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut};

Expand Down Expand Up @@ -65,10 +64,15 @@ where
TrieDBMutBuilderV1::<HasherOf<BridgedChain<B>>>::new(&mut mdb, &mut root).build();

// insert messages
for nonce in message_nonces {
for (i, nonce) in message_nonces.into_iter().enumerate() {
let message_key = MessageKey { lane_id: lane, nonce };
let message_payload = match encode_message(nonce, &message_payload) {
Some(message_payload) => message_payload,
Some(message_payload) =>
if i == 0 {
grow_trie_leaf_value(message_payload, size)
} else {
message_payload
},
None => continue,
};
let storage_key = storage_keys::message_key(
Expand All @@ -94,46 +98,22 @@ where
storage_keys.push(storage_key);
}
}
root = grow_trie(root, &mut mdb, size);

// generate storage proof to be delivered to This chain
let storage_proof = record_all_trie_keys::<LayoutV1<HasherOf<BridgedChain<B>>>, _>(&mdb, &root)
.map_err(|_| "record_all_trie_keys has failed")
.expect("record_all_trie_keys should not fail in benchmarks");

(root, storage_proof)
}

/// Populate trie with dummy keys+values until trie has at least given size.
pub fn grow_trie<H: Hasher>(
mut root: H::Out,
mdb: &mut MemoryDB<H>,
trie_size: StorageProofSize,
) -> H::Out {
let (iterations, leaf_size, minimal_trie_size) = match trie_size {
StorageProofSize::Minimal(_) => return root,
StorageProofSize::HasLargeLeaf(size) => (1, size, size),
StorageProofSize::HasExtraNodes(size) => (8, 1, size),
};

let mut key_index = 0;
loop {
// generate storage proof to be delivered to This chain
let storage_proof = record_all_trie_keys::<LayoutV1<H>, _>(mdb, &root)
.map_err(|_| "record_all_trie_keys has failed")
.expect("record_all_trie_keys should not fail in benchmarks");
let size: usize = storage_proof.iter().map(|n| n.len()).sum();
if size > minimal_trie_size as _ {
return root
}

let mut trie = TrieDBMutBuilderV1::<H>::from_existing(mdb, &mut root).build();
for _ in 0..iterations {
trie.insert(&key_index.encode(), &vec![42u8; leaf_size as _])
.map_err(|_| "TrieMut::insert has failed")
.expect("TrieMut::insert should not fail in benchmarks");
key_index += 1;
}
trie.commit();
/// Add extra data to the trie leaf value so that it'll be of given size.
pub fn grow_trie_leaf_value(mut value: Vec<u8>, size: StorageProofSize) -> Vec<u8> {
match size {
StorageProofSize::Minimal(_) => (),
StorageProofSize::HasLargeLeaf(size) if size as usize > value.len() => {
value.extend(sp_std::iter::repeat(42u8).take(size as usize - value.len()));
},
StorageProofSize::HasLargeLeaf(_) => (),
}
value
}
13 changes: 9 additions & 4 deletions bridges/bin/runtime-common/src/parachains_benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
#![cfg(feature = "runtime-benchmarks")]

use crate::{
messages_benchmarking::insert_header_to_grandpa_pallet, messages_generation::grow_trie,
messages_benchmarking::insert_header_to_grandpa_pallet,
messages_generation::grow_trie_leaf_value,
};

use bp_parachains::parachain_head_storage_key_at_source;
Expand Down Expand Up @@ -59,17 +60,21 @@ where
TrieDBMutBuilderV1::<RelayBlockHasher>::new(&mut mdb, &mut state_root).build();

// insert parachain heads
for parachain in parachains {
for (i, parachain) in parachains.into_iter().enumerate() {
let storage_key =
parachain_head_storage_key_at_source(R::ParasPalletName::get(), *parachain);
trie.insert(&storage_key.0, &parachain_head.encode())
let leaf_data = if i == 0 {
grow_trie_leaf_value(parachain_head.encode(), size)
} else {
parachain_head.encode()
};
trie.insert(&storage_key.0, &leaf_data)
.map_err(|_| "TrieMut::insert has failed")
.expect("TrieMut::insert should not fail in benchmarks");
storage_keys.push(storage_key);
parachain_heads.push((*parachain, parachain_head.hash()))
}
}
state_root = grow_trie(state_root, &mut mdb, size);

// generate heads storage proof
let proof = record_all_trie_keys::<LayoutV1<RelayBlockHasher>, _>(&mdb, &state_root)
Expand Down
13 changes: 9 additions & 4 deletions bridges/modules/grandpa/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@

use crate::*;

use bp_header_chain::justification::required_justification_precommits;
use bp_runtime::BasicOperatingMode;
use bp_test_utils::{
accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND,
Expand All @@ -66,21 +67,25 @@ const MAX_VOTE_ANCESTRIES_RANGE_END: u32 =
MAX_VOTE_ANCESTRIES_RANGE_BEGIN + MAX_VOTE_ANCESTRIES_RANGE_BEGIN;

// the same with validators - if there are too much validators, let's run benchmarks on subrange
fn validator_set_range_end<T: Config<I>, I: 'static>() -> u32 {
fn precommits_range_end<T: Config<I>, I: 'static>() -> u32 {
let max_bridged_authorities = T::BridgedChain::MAX_AUTHORITIES_COUNT;
if max_bridged_authorities > 128 {
sp_std::cmp::max(128, max_bridged_authorities / 5)
} else {
max_bridged_authorities
}
};
required_justification_precommits(max_bridged_authorities)
}

/// Prepare header and its justification to submit using `submit_finality_proof`.
fn prepare_benchmark_data<T: Config<I>, I: 'static>(
precommits: u32,
ancestors: u32,
) -> (BridgedHeader<T, I>, GrandpaJustification<BridgedHeader<T, I>>) {
let authority_list = accounts(precommits as u16)
// going from precommits to total authorities count
let total_authorities_count = (3 * precommits - 1) / 2;

let authority_list = accounts(total_authorities_count as u16)
.iter()
.map(|id| (AuthorityId::from(*id), 1))
.collect::<Vec<_>>();
Expand Down Expand Up @@ -114,7 +119,7 @@ benchmarks_instance_pallet! {
// This is the "gold standard" benchmark for this extrinsic, and it's what should be used to
// annotate the weight in the pallet.
submit_finality_proof {
let p in 1 .. validator_set_range_end::<T, I>();
let p in 1 .. precommits_range_end::<T, I>();
let v in MAX_VOTE_ANCESTRIES_RANGE_BEGIN..MAX_VOTE_ANCESTRIES_RANGE_END;
let caller: T::AccountId = whitelisted_caller();
let (header, justification) = prepare_benchmark_data::<T, I>(p, v);
Expand Down
34 changes: 17 additions & 17 deletions bridges/modules/grandpa/src/weights.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
//! Autogenerated weights for pallet_bridge_grandpa
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-02-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2023-03-01, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
Expand Down Expand Up @@ -93,19 +93,19 @@ impl<T: frame_system::Config> WeightInfo for BridgeWeight<T> {
/// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// The range of component `p` is `[1, 5]`.
/// The range of component `p` is `[1, 4]`.
///
/// The range of component `v` is `[50, 100]`.
fn submit_finality_proof(p: u32, v: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `416 + p * (40 ±0)`
// Measured: `394 + p * (60 ±0)`
// Estimated: `4745`
// Minimum execution time: 221_703 nanoseconds.
Weight::from_parts(39_358_497, 4745)
// Standard Error: 85_573
.saturating_add(Weight::from_ref_time(40_593_280).saturating_mul(p.into()))
// Standard Error: 7_808
.saturating_add(Weight::from_ref_time(1_529_400).saturating_mul(v.into()))
// Minimum execution time: 221_810 nanoseconds.
Weight::from_parts(33_157_392, 4745)
// Standard Error: 109_045
.saturating_add(Weight::from_ref_time(41_100_656).saturating_mul(p.into()))
// Standard Error: 7_754
.saturating_add(Weight::from_ref_time(1_534_466).saturating_mul(v.into()))
.saturating_add(T::DbWeight::get().reads(6_u64))
.saturating_add(T::DbWeight::get().writes(6_u64))
}
Expand Down Expand Up @@ -148,19 +148,19 @@ impl WeightInfo for () {
/// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68),
/// added: 2048, mode: MaxEncodedLen)
///
/// The range of component `p` is `[1, 5]`.
/// The range of component `p` is `[1, 4]`.
///
/// The range of component `v` is `[50, 100]`.
fn submit_finality_proof(p: u32, v: u32) -> Weight {
// Proof Size summary in bytes:
// Measured: `416 + p * (40 ±0)`
// Measured: `394 + p * (60 ±0)`
// Estimated: `4745`
// Minimum execution time: 221_703 nanoseconds.
Weight::from_parts(39_358_497, 4745)
// Standard Error: 85_573
.saturating_add(Weight::from_ref_time(40_593_280).saturating_mul(p.into()))
// Standard Error: 7_808
.saturating_add(Weight::from_ref_time(1_529_400).saturating_mul(v.into()))
// Minimum execution time: 221_810 nanoseconds.
Weight::from_parts(33_157_392, 4745)
// Standard Error: 109_045
.saturating_add(Weight::from_ref_time(41_100_656).saturating_mul(p.into()))
// Standard Error: 7_754
.saturating_add(Weight::from_ref_time(1_534_466).saturating_mul(v.into()))
.saturating_add(RocksDbWeight::get().reads(6_u64))
.saturating_add(RocksDbWeight::get().writes(6_u64))
}
Expand Down
4 changes: 2 additions & 2 deletions bridges/modules/messages/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ benchmarks_instance_pallet! {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
size: StorageProofSize::HasExtraNodes(1024),
size: StorageProofSize::HasLargeLeaf(1024),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
Expand Down Expand Up @@ -272,7 +272,7 @@ benchmarks_instance_pallet! {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
size: StorageProofSize::HasExtraNodes(16 * 1024),
size: StorageProofSize::HasLargeLeaf(16 * 1024),
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
Expand Down
Loading

0 comments on commit dadb54f

Please sign in to comment.