From ffcd5232a89b556dcf558db429c6c3d9a020cc27 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 23 Sep 2022 19:51:57 +0200 Subject: [PATCH 01/42] Independence of Slot-based algorithms from system Timestamp (#12224) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove timestamp from SlotInfo * Expose as millis instead of secs * Nits * Fix test after field removal * Yet another test fix * On the fly timestamp computation * Removed slot timestamp from logs * Removed reference to timestamp from slots subsystem * Slot based algorithm tests do not require timstamp inherent anymore * Remove junk files * Further tests cleanup * Trigger pipeline * Apply code suggestions * Trigger pipeline Co-authored-by: André Silva --- Cargo.lock | 1 - bin/node-template/node/src/service.rs | 4 +- bin/node/cli/src/service.rs | 4 +- client/consensus/aura/src/lib.rs | 26 ++++++------- client/consensus/babe/src/tests.rs | 22 +++++------ client/consensus/slots/Cargo.toml | 1 - client/consensus/slots/src/lib.rs | 56 +++++++++------------------ client/consensus/slots/src/slots.rs | 14 +------ primitives/timestamp/src/lib.rs | 25 ++++++------ 9 files changed, 56 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1349ce488fa7..74784eb9a6c24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8246,7 +8246,6 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-timestamp", "substrate-test-runtime-client", "thiserror", ] diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index caa01761636df..6ec9a33749a69 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -126,7 +126,7 @@ pub fn new_partial( slot_duration, ); - Ok((timestamp, slot)) + Ok((slot, timestamp)) }, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), @@ -269,7 +269,7 @@ pub fn new_full(mut config: Configuration) -> Result slot_duration, ); - Ok((timestamp, slot)) + Ok((slot, timestamp)) }, force_authoring, backoff_authoring_blocks, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index f2b4feb58075f..a3098eac6402f 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -223,7 +223,7 @@ pub fn new_partial( let uncles = sp_authorship::InherentDataProvider::<::Header>::check_inherents(); - Ok((timestamp, slot, uncles)) + Ok((slot, timestamp, uncles)) }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), @@ -453,7 +453,7 @@ pub fn new_full_base( &parent, )?; - Ok((timestamp, slot, uncles, storage_proof)) + Ok((slot, timestamp, uncles, storage_proof)) } }, force_authoring, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 0bdc663815051..c538200bb315c 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -569,7 +569,7 @@ mod tests { traits::{Block as BlockT, Header as _}, Digest, }; - use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; + use sp_timestamp::Timestamp; use std::{ task::Poll, time::{Duration, Instant}, @@ -579,6 +579,8 @@ mod tests { TestClient, }; + const SLOT_DURATION_MS: u64 = 1000; + type Error = sp_blockchain::Error; struct DummyFactory(Arc); @@ -619,8 +621,6 @@ mod tests { } } - const SLOT_DURATION: u64 = 1000; - type AuraVerifier = import_queue::AuraVerifier< PeersFullClient, AuthorityPair, @@ -628,7 +628,7 @@ mod tests { dyn CreateInherentDataProviders< TestBlock, (), - InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + InherentDataProviders = (InherentDataProvider,), >, >, >; @@ -648,17 +648,15 @@ mod tests { let client = client.as_client(); let slot_duration = slot_duration(&*client).expect("slot duration available"); - assert_eq!(slot_duration.as_millis() as u64, SLOT_DURATION); + assert_eq!(slot_duration.as_millis() as u64, SLOT_DURATION_MS); import_queue::AuraVerifier::new( client, Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - - Ok((timestamp, slot)) + Ok((slot,)) }), CheckForEquivocation::Yes, None, @@ -736,13 +734,12 @@ mod tests { sync_oracle: DummyOracle, justification_sync_link: (), create_inherent_data_providers: |_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - Ok((timestamp, slot)) + Ok((slot,)) }, force_authoring: false, backoff_authoring_blocks: Some( @@ -875,7 +872,6 @@ mod tests { let res = executor::block_on(worker.on_slot(SlotInfo { slot: 0.into(), - timestamp: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), inherent_data: InherentData::new(), duration: Duration::from_millis(1000), diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index a3467e0020200..58f5e7b8eb6d4 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -43,7 +43,7 @@ use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; -use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; +use sp_timestamp::Timestamp; use std::{cell::RefCell, task::Poll, time::Duration}; type Item = DigestItem; @@ -68,6 +68,8 @@ type Mutator = Arc; type BabeBlockImport = PanickingBlockImport>>; +const SLOT_DURATION_MS: u64 = 1000; + #[derive(Clone)] struct DummyFactory { client: Arc, @@ -239,7 +241,7 @@ pub struct TestVerifier { dyn CreateInherentDataProviders< TestBlock, (), - InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + InherentDataProviders = (InherentDataProvider,), >, >, >, @@ -321,13 +323,11 @@ impl TestNetFactory for BabeTestNet { client: client.clone(), select_chain: longest_chain, create_inherent_data_providers: Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - - Ok((timestamp, slot)) + Ok((slot,)) }), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), @@ -433,13 +433,11 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static env: environ, sync_oracle: DummyOracle, create_inherent_data_providers: Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - - Ok((timestamp, slot)) + Ok((slot,)) }), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 208e31971257d..fae499ad7c7c6 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -31,7 +31,6 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } -sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index b9f8c03f2ac88..7c5d5d4a73bc1 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -42,7 +42,6 @@ use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; -use sp_timestamp::Timestamp; use std::{fmt::Debug, ops::Deref, time::Duration}; /// The changes that need to applied to the storage to create the state for a block. @@ -252,7 +251,7 @@ pub trait SimpleSlotWorker { where Self: Sync, { - let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); + let slot = slot_info.slot; let telemetry = self.telemetry(); let logging_target = self.logging_target(); @@ -316,23 +315,14 @@ pub trait SimpleSlotWorker { return None } - debug!( - target: logging_target, - "Starting authorship at slot {}; timestamp = {}", slot, *timestamp, - ); + debug!(target: logging_target, "Starting authorship at slot: {slot}"); - telemetry!( - telemetry; - CONSENSUS_DEBUG; - "slots.starting_authorship"; - "slot_num" => *slot, - "timestamp" => *timestamp, - ); + telemetry!(telemetry; CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => slot); let proposer = match self.proposer(&slot_info.chain_head).await { Ok(p) => p, Err(err) => { - warn!(target: logging_target, "Unable to author block in slot {:?}: {}", slot, err,); + warn!(target: logging_target, "Unable to author block in slot {slot:?}: {err}"); telemetry!( telemetry; @@ -440,44 +430,35 @@ impl + Send + Sync, B: BlockT> /// Slot specific extension that the inherent data provider needs to implement. pub trait InherentDataProviderExt { - /// The current timestamp that will be found in the - /// [`InherentData`](`sp_inherents::InherentData`). - fn timestamp(&self) -> Timestamp; - /// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`). fn slot(&self) -> Slot; } /// Small macro for implementing `InherentDataProviderExt` for inherent data provider tuple. macro_rules! impl_inherent_data_provider_ext_tuple { - ( T, S $(, $TN:ident)* $( , )?) => { - impl InherentDataProviderExt for (T, S, $($TN),*) + ( S $(, $TN:ident)* $( , )?) => { + impl InherentDataProviderExt for (S, $($TN),*) where - T: Deref, S: Deref, { - fn timestamp(&self) -> Timestamp { - *self.0.deref() - } - fn slot(&self) -> Slot { - *self.1.deref() + *self.0.deref() } } } } -impl_inherent_data_provider_ext_tuple!(T, S); -impl_inherent_data_provider_ext_tuple!(T, S, A); -impl_inherent_data_provider_ext_tuple!(T, S, A, B); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I, J); +impl_inherent_data_provider_ext_tuple!(S); +impl_inherent_data_provider_ext_tuple!(S, A); +impl_inherent_data_provider_ext_tuple!(S, A, B); +impl_inherent_data_provider_ext_tuple!(S, A, B, C); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J); /// Start a new slot worker. /// @@ -806,7 +787,6 @@ mod test { super::slots::SlotInfo { slot: slot.into(), duration: SLOT_DURATION, - timestamp: Default::default(), inherent_data: Default::default(), ends_at: Instant::now() + SLOT_DURATION, chain_head: Header::new( diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index accf24b6b4e78..f3dc485a8e819 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -50,8 +50,6 @@ pub fn time_until_next_slot(slot_duration: Duration) -> Duration { pub struct SlotInfo { /// The slot number as found in the inherent data. pub slot: Slot, - /// Current timestamp as found in the inherent data. - pub timestamp: sp_timestamp::Timestamp, /// The instant at which the slot ends. pub ends_at: Instant, /// The inherent data. @@ -72,7 +70,6 @@ impl SlotInfo { /// `ends_at` is calculated using `timestamp` and `duration`. pub fn new( slot: Slot, - timestamp: sp_timestamp::Timestamp, inherent_data: InherentData, duration: Duration, chain_head: B::Header, @@ -80,7 +77,6 @@ impl SlotInfo { ) -> Self { Self { slot, - timestamp, inherent_data, duration, chain_head, @@ -175,7 +171,6 @@ where ); } - let timestamp = inherent_data_providers.timestamp(); let slot = inherent_data_providers.slot(); let inherent_data = inherent_data_providers.create_inherent_data()?; @@ -183,14 +178,7 @@ where if slot > self.last_slot { self.last_slot = slot; - break Ok(SlotInfo::new( - slot, - timestamp, - inherent_data, - self.slot_duration, - chain_head, - None, - )) + break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)) } } } diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index b98a87c37f69d..d88b1839babe6 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -56,6 +56,17 @@ impl Timestamp { pub fn checked_sub(self, other: Self) -> Option { self.0.checked_sub(other.0).map(Self) } + + /// The current timestamp using the system time. + #[cfg(feature = "std")] + pub fn current() -> Self { + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .expect("Current time is always after unix epoch; qed") + .into() + } } impl sp_std::ops::Deref for Timestamp { @@ -165,18 +176,6 @@ impl TimestampInherentData for InherentData { } } -/// The current timestamp using the system time. -/// -/// This timestamp is the time since the UNIX epoch. -#[cfg(feature = "std")] -fn current_timestamp() -> std::time::Duration { - use std::time::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .expect("Current time is always after unix epoch; qed") -} - /// Provide duration since unix epoch in millisecond for timestamp inherent. #[cfg(feature = "std")] pub struct InherentDataProvider { @@ -190,7 +189,7 @@ impl InherentDataProvider { pub fn from_system_time() -> Self { Self { max_drift: std::time::Duration::from_secs(60).into(), - timestamp: current_timestamp().into(), + timestamp: Timestamp::current(), } } From fb779212ca6b59bd158d72deeab2502cb9670cca Mon Sep 17 00:00:00 2001 From: Frederik Gartenmeister Date: Sat, 24 Sep 2022 08:59:44 +0200 Subject: [PATCH 02/42] Const impls of base arithmetics for `Weights` with `u64` (#12322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Const impls * Adding explainatory comments * Update primitives/weights/src/weight_v2.rs Doc comment suggestions Co-authored-by: Bastian Köcher * Update primitives/weights/src/weight_v2.rs Doc comment suggestions Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- primitives/weights/src/weight_v2.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index a4e4da4f8a7b3..af0f469ebaaeb 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -160,6 +160,34 @@ impl Weight { Self { ref_time: 0 } } + /// Constant version of Add with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn add(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time + scalar } + } + + /// Constant version of Sub with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn sub(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time - scalar } + } + + /// Constant version of Div with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn div(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time / scalar } + } + + /// Constant version of Mul with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn mul(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time * scalar } + } + /// Returns true if any of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn any_gt(self, other: Self) -> bool { From 9476d2157a0d266677dee144a8a7bc44a6b821e1 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Sat, 24 Sep 2022 17:17:44 +0200 Subject: [PATCH 03/42] Add base-weight to `System::Extrinsic*` events (#12329) * Add base-weight to events Signed-off-by: Oliver Tale-Yazdi * Fix test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- bin/node/executor/tests/basic.rs | 13 +- frame/system/src/extensions/check_weight.rs | 12 +- frame/system/src/lib.rs | 8 +- frame/system/src/mock.rs | 1 + frame/system/src/tests.rs | 312 +++++++++++--------- 5 files changed, 198 insertions(+), 148 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 99a9b83596acf..fc4e138faafc2 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -311,10 +311,19 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - let transfer_weight = default_transfer_call().get_dispatch_info().weight; + let transfer_weight = default_transfer_call().get_dispatch_info().weight.saturating_add( + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic, + ); let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } .get_dispatch_info() - .weight; + .weight + .saturating_add( + ::BlockWeights::get() + .get(DispatchClass::Mandatory) + .base_extrinsic, + ); executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 466231b8455ec..15a88913cd337 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -342,7 +342,7 @@ mod tests { .get(DispatchClass::Operational) .max_total .unwrap_or_else(|| weights.max_block); - let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; + let base_weight = weights.get(DispatchClass::Operational).base_extrinsic; let weight = operational_limit - base_weight; let okay = @@ -378,11 +378,11 @@ mod tests { // Max normal is 768 (75%) // 10 is taken for block execution weight // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) - // And Operational can be 256 to produce a full block (-5 for base) + // And Operational can be 246 to produce a full block (-10 for base) let max_normal = DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; @@ -406,7 +406,7 @@ mod tests { let max_normal = DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; @@ -414,7 +414,7 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - // Extra 15 here from block execution + base extrinsic weight + // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); @@ -433,7 +433,7 @@ mod tests { ..Default::default() }; let dispatch_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 36360f8fae2c2..dc74157da79de 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1509,9 +1509,15 @@ impl Pallet { } /// To be called immediately after an extrinsic has been applied. + /// + /// Emits an `ExtrinsicSuccess` or `ExtrinsicFailed` event depending on the outcome. + /// The emitted event contains the post-dispatch corrected weight including + /// the base-weight for its dispatch class. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { - info.weight = extract_actual_weight(r, &info); + info.weight = extract_actual_weight(r, &info) + .saturating_add(T::BlockWeights::get().get(info.class).base_extrinsic); info.pays_fee = extract_actual_pays_fee(r, &info); + Self::deposit_event(match r { Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, Err(err) => { diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 1c0511787eb76..b6fc121612050 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -67,6 +67,7 @@ parameter_types! { weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); }) .for_class(DispatchClass::Operational, |weights| { + weights.base_extrinsic = Weight::from_ref_time(10); weights.max_total = Some(MAX_BLOCK_WEIGHT); weights.reserved = Some( MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 92563f4ad1747..c42131c450228 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -169,6 +169,10 @@ fn deposit_event_should_work() { }] ); + let normal_base = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + System::reset_events(); System::initialize(&2, &[0u8; 32].into(), &Default::default()); System::deposit_event(SysEvent::NewAccount { account: 32 }); @@ -194,14 +198,17 @@ fn deposit_event_should_work() { }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { dispatch_info: Default::default() }.into(), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + } + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: Default::default() + dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } } .into(), topics: vec![] @@ -223,6 +230,9 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::initialize(&1, &[0u8; 32].into(), &Default::default()); System::note_finished_initialize(); + let normal_base = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; let pre_info = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); @@ -267,144 +277,168 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { }), pre_info, ); + // Also works for operational. + let operational_base = ::BlockWeights::get() + .get(DispatchClass::Operational) + .base_extrinsic; + assert!(normal_base != operational_base, "Test pre-condition violated"); + let pre_info = DispatchInfo { + weight: Weight::from_ref_time(1000), + class: DispatchClass::Operational, + ..Default::default() + }; + System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(300), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(3), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(4), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(5), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(6), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(500), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(7), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(999), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(8), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(9), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(10), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - ] - ); + let got = System::events(); + let want = vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(4), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(5), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(6), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(500).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(7), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(999).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(8), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(9), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(10), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(11), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300).saturating_add(operational_base), + class: DispatchClass::Operational, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + ]; + for (i, event) in want.into_iter().enumerate() { + assert_eq!(got[i], event, "Event mismatch at index {}", i); + } }); } From d0214e7c77de639d60301d682ced7e155def15da Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sat, 24 Sep 2022 21:32:35 +0200 Subject: [PATCH 04/42] re add the migration checks for staking (#12330) Co-authored-by: parity-processbot <> --- frame/staking/src/migrations.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index f47545af694cf..8f37ae30dd056 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -35,6 +35,11 @@ pub mod v12 { impl OnRuntimeUpgrade for MigrateToV12 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, &'static str> { + frame_support::ensure!( + StorageVersion::::get() == Releases::V11_0_0, + "Expected v11 before upgrading to v12" + ); + frame_support::ensure!( T::HistoryDepth::get() == HistoryDepth::::get(), "Provided value of HistoryDepth should be same as the existing storage value" @@ -129,6 +134,11 @@ pub mod v11 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V11_0_0, + "wrong version after the upgrade" + ); + let old_pallet_name = N::get(); let new_pallet_name =

::name(); From 4219b3ab45ff2201eb8aecbbb444f9ae055f528e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20M=C3=BCller?= Date: Sun, 25 Sep 2022 11:24:35 +0200 Subject: [PATCH 05/42] Allow specifying immediate finalize for `manual-seal` (#12106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexander Theißen --- client/consensus/manual-seal/src/lib.rs | 54 +++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index c5dd169e281f2..4672e7275a56b 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -247,6 +247,60 @@ pub async fn run_instant_seal( .await } +/// Runs the background authorship task for the instant seal engine. +/// instant-seal creates a new block for every transaction imported into +/// the transaction pool. +/// +/// This function will finalize the block immediately as well. If you don't +/// want this behavior use `run_instant_seal` instead. +pub async fn run_instant_seal_and_finalize( + InstantSealParams { + block_import, + env, + client, + pool, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }: InstantSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, +{ + // Creates and finalizes blocks as soon as transactions are imported + // into the transaction pool. + let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: true, + parent_hash: None, + sender: None, + }); + + run_manual_seal(ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }) + .await +} + #[cfg(test)] mod tests { use super::*; From badc92ac20dbf006595a6af9418da9942527cbd7 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sun, 25 Sep 2022 23:22:54 +0200 Subject: [PATCH 06/42] Removed OuterCall alias & doc fixes (#12349) --- frame/alliance/src/lib.rs | 2 +- frame/collective/src/lib.rs | 6 +++--- .../election-provider-multi-phase/src/unsigned.rs | 14 +++++++------- frame/multisig/src/benchmarking.rs | 2 +- frame/ranked-collective/src/lib.rs | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 82c7e21dba3af..2ef6718538122 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -243,7 +243,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// The outer call dispatch type. + /// The runtime call dispatch type. type Proposal: Parameter + Dispatchable + From> diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index e934924033552..ae68ae2fe3e16 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -180,10 +180,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - /// The outer origin type. + /// The runtime origin type. type RuntimeOrigin: From>; - /// The outer call dispatch type. + /// The runtime call dispatch type. type Proposal: Parameter + Dispatchable< RuntimeOrigin = >::RuntimeOrigin, @@ -191,7 +191,7 @@ pub mod pallet { > + From> + GetDispatchInfo; - /// The outer event type. + /// The runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index cf8df237bafb0..833f80c90d13e 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -856,7 +856,7 @@ mod tests { use crate::{ mock::{ roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Runtime, RuntimeCall as OuterCall, RuntimeOrigin, System, + MinerMaxWeight, MultiPhase, Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, UnsignedPhase, }, CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, @@ -1070,8 +1070,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: witness(), }; - let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(RuntimeOrigin::none()); + let runtime_call: RuntimeCall = call.into(); + let _ = runtime_call.dispatch(RuntimeOrigin::none()); }) } @@ -1096,8 +1096,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: correct_witness, }; - let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(RuntimeOrigin::none()); + let runtime_call: RuntimeCall = call.into(); + let _ = runtime_call.dispatch(RuntimeOrigin::none()); }) } @@ -1560,7 +1560,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); let call = extrinsic.call; - assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned { .. }))); + assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1577,7 +1577,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); let call = match extrinsic.call { - OuterCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, + RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 8d0651002305b..c0b0097b07236 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -41,7 +41,7 @@ fn setup_multi( signatories.push(signatory); } signatories.sort(); - // Must first convert to outer call type. + // Must first convert to runtime call type. let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_data = OpaqueCall::::from_encoded(call.encode()); diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index bdd5d26373980..fa3a473fe7d73 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -361,7 +361,7 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - /// The outer event type. + /// The runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; From fbd7e5aa99e20b7019576ecf598bca2d7019cddd Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Mon, 26 Sep 2022 08:05:05 +0200 Subject: [PATCH 07/42] [Enhancement] Remove optional Pool subscription from fast-unstake (#12344) * [Enhancement] Remove optional Pool subscription from fast-unstake * remove nomination-pools pallet dependency * fixes * more fixes * more fixes * more fixes --- Cargo.lock | 1 - frame/fast-unstake/Cargo.toml | 2 - frame/fast-unstake/src/benchmarking.rs | 30 +-- frame/fast-unstake/src/lib.rs | 95 ++------ frame/fast-unstake/src/mock.rs | 50 +--- frame/fast-unstake/src/tests.rs | 302 +++++++------------------ frame/fast-unstake/src/types.rs | 3 - 7 files changed, 105 insertions(+), 378 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74784eb9a6c24..6ea79a120361e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5729,7 +5729,6 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "pallet-nomination-pools", "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 1fa118dba4a8d..69aeaff35993c 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -28,7 +28,6 @@ sp-staking = { default-features = false, path = "../../primitives/staking" } pallet-balances = { default-features = false, path = "../balances" } pallet-timestamp = { default-features = false, path = "../timestamp" } pallet-staking = { default-features = false, path = "../staking" } -pallet-nomination-pools = { default-features = false, path = "../nomination-pools" } frame-election-provider-support = { default-features = false, path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } @@ -55,7 +54,6 @@ std = [ "sp-std/std", "pallet-staking/std", - "pallet-nomination-pools/std", "pallet-balances/std", "pallet-timestamp/std", "frame-election-provider-support/std", diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 68a3da0d40af3..5690d5ce6f29f 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -26,7 +26,6 @@ use frame_support::{ traits::{Currency, EnsureOrigin, Get, Hooks}, }; use frame_system::RawOrigin; -use pallet_nomination_pools::{Pallet as Pools, PoolId}; use pallet_staking::Pallet as Staking; use sp_runtime::traits::{StaticLookup, Zero}; use sp_staking::EraIndex; @@ -76,25 +75,6 @@ pub(crate) fn fast_unstake_events() -> Vec> { .collect::>() } -fn setup_pool() -> PoolId { - let depositor = frame_benchmarking::account::("depositor_42", 0, USER_SEED); - let depositor_lookup = l::(depositor.clone()); - - let stake = Pools::::depositor_min_bond(); - CurrencyOf::::make_free_balance_be(&depositor, stake * 10u32.into()); - - Pools::::create( - RawOrigin::Signed(depositor.clone()).into(), - stake, - depositor_lookup.clone(), - depositor_lookup.clone(), - depositor_lookup, - ) - .unwrap(); - - pallet_nomination_pools::LastPoolId::::get() -} - fn setup_staking(v: u32, until: EraIndex) { let ed = CurrencyOf::::minimum_balance(); @@ -131,10 +111,8 @@ benchmarks! { // on_idle, we we don't check anyone, but fully unbond and move them to another pool. on_idle_unstake { let who = create_unexposed_nominator::(); - let pool_id = setup_pool::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - Some(pool_id) )); ErasToCheckPerBlock::::put(1); @@ -143,7 +121,7 @@ benchmarks! { on_idle_full_block::(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), maybe_pool_id: Some(pool_id) }) + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap() }) ); } : { @@ -172,7 +150,6 @@ benchmarks! { let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - None, )); // no one is queued thus far. @@ -185,7 +162,7 @@ benchmarks! { let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked, maybe_pool_id: None }) + Some(UnstakeRequest { stash: who.clone(), checked }) ); assert!(matches!( fast_unstake_events::().last(), @@ -199,7 +176,7 @@ benchmarks! { assert_eq!(Queue::::count(), 0); } - :_(RawOrigin::Signed(who.clone()), None) + :_(RawOrigin::Signed(who.clone())) verify { assert_eq!(Queue::::count(), 1); } @@ -208,7 +185,6 @@ benchmarks! { let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - None )); assert_eq!(Queue::::count(), 1); whitelist_account!(who); diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 51416808f48c8..5acc9940debf1 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -19,8 +19,7 @@ //! //! If a nominator is not exposed in any `ErasStakers` (i.e. "has not actively backed any //! validators in the last `BondingDuration` days"), then they can register themselves in this -//! pallet, unstake faster than having to wait an entire bonding duration, and potentially move -//! into a nomination pool. +//! pallet, unstake faster than having to wait an entire bonding duration. //! //! Appearing in the exposure of a validator means being exposed equal to that validator from the //! point of view of the staking system. This usually means earning rewards with the validator, and @@ -43,8 +42,7 @@ //! to prevent them from accidentally exposing themselves behind a validator etc. //! //! Once processed, if successful, no additional fee for the checking process is taken, and the -//! staker is instantly unbonded. Optionally, if they have asked to join a pool, their *entire* -//! stake is joined into their pool of choice. +//! staker is instantly unbonded. //! //! If unsuccessful, meaning that the staker was exposed sometime in the last `BondingDuration` eras //! they will end up being slashed for the amount of wasted work they have inflicted on the chian. @@ -85,7 +83,6 @@ pub mod pallet { use frame_election_provider_support::ElectionProvider; use frame_support::pallet_prelude::*; use frame_system::{pallet_prelude::*, RawOrigin}; - use pallet_nomination_pools::PoolId; use pallet_staking::Pallet as Staking; use sp_runtime::{ traits::{Saturating, Zero}, @@ -109,12 +106,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: - frame_system::Config - + pallet_staking::Config< - CurrencyBalance = ::CurrencyBalance, - > + pallet_nomination_pools::Config - { + pub trait Config: frame_system::Config + pallet_staking::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent> @@ -139,10 +131,9 @@ pub mod pallet { /// The map of all accounts wishing to be unstaked. /// - /// Points the `AccountId` wishing to unstake to the optional `PoolId` they wish to join - /// thereafter. + /// Keeps track of `AccountId` wishing to unstake. #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, Option>; + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; /// Number of eras to check per block. /// @@ -158,7 +149,7 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A staker was unstaked. - Unstaked { stash: T::AccountId, maybe_pool_id: Option, result: DispatchResult }, + Unstaked { stash: T::AccountId, result: DispatchResult }, /// A staker was slashed for requesting fast-unstake whilst being exposed. Slashed { stash: T::AccountId, amount: BalanceOf }, /// A staker was partially checked for the given eras, but the process did not finish. @@ -213,16 +204,13 @@ pub mod pallet { /// they are guaranteed to remain eligible, because the call will chill them as well. /// /// If the check works, the entire staking data is removed, i.e. the stash is fully - /// unstaked, and they potentially join a pool with their entire bonded stake. + /// unstaked. /// /// If the check fails, the stash remains chilled and waiting for being unbonded as in with /// the normal staking system, but they lose part of their unbonding chunks due to consuming /// the chain's resources. #[pallet::weight(::WeightInfo::register_fast_unstake())] - pub fn register_fast_unstake( - origin: OriginFor, - maybe_pool_id: Option, - ) -> DispatchResult { + pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; let ledger = @@ -243,12 +231,11 @@ pub mod pallet { Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; // enqueue them. - Queue::::insert(ledger.stash, maybe_pool_id); + Queue::::insert(ledger.stash, ()); Ok(()) } - /// Deregister oneself from the fast-unstake (also cancels joining the pool if that was - /// supplied on `register_fast_unstake` . + /// Deregister oneself from the fast-unstake. /// /// This is useful if one is registered, they are still waiting, and they change their mind. /// @@ -327,17 +314,12 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked, maybe_pool_id } = match Head::::take() - .or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, maybe_pool_id)| UnstakeRequest { - stash, - maybe_pool_id, - checked: Default::default(), - }) - .next() - }) { + let UnstakeRequest { stash, mut checked } = match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, _)| UnstakeRequest { stash, checked: Default::default() }) + .next() + }) { None => { // There's no `Head` and nothing in the `Queue`, nothing to do here. return T::DbWeight::get().reads(4) @@ -392,48 +374,15 @@ pub mod pallet { // `stash` is not exposed in any era now -- we can let go of them now. let num_slashing_spans = Staking::::slashing_spans(&stash).iter().count() as u32; - let ctrl = match pallet_staking::Bonded::::get(&stash) { - Some(ctrl) => ctrl, - None => { - Self::deposit_event(Event::::Errored { stash }); - return ::WeightInfo::on_idle_unstake() - }, - }; - - let ledger = match pallet_staking::Ledger::::get(ctrl) { - Some(ledger) => ledger, - None => { - Self::deposit_event(Event::::Errored { stash }); - return ::WeightInfo::on_idle_unstake() - }, - }; - - let unstake_result = pallet_staking::Pallet::::force_unstake( + let result = pallet_staking::Pallet::::force_unstake( RawOrigin::Root.into(), stash.clone(), num_slashing_spans, ); - let pool_stake_result = if let Some(pool_id) = maybe_pool_id { - pallet_nomination_pools::Pallet::::join( - RawOrigin::Signed(stash.clone()).into(), - ledger.total, - pool_id, - ) - } else { - Ok(()) - }; + log!(info, "unstaked {:?}, outcome: {:?}", stash, result); - let result = unstake_result.and(pool_stake_result); - log!( - info, - "unstaked {:?}, maybe_pool {:?}, outcome: {:?}", - stash, - maybe_pool_id, - result - ); - - Self::deposit_event(Event::::Unstaked { stash, maybe_pool_id, result }); + Self::deposit_event(Event::::Unstaked { stash, result }); ::WeightInfo::on_idle_unstake() } else { // eras remaining to be checked. @@ -471,11 +420,7 @@ pub mod pallet { // Not exposed in these eras. match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => { - Head::::put(UnstakeRequest { - stash: stash.clone(), - checked, - maybe_pool_id, - }); + Head::::put(UnstakeRequest { stash: stash.clone(), checked }); Self::deposit_event(Event::::Checking { stash, eras: unchecked_eras_to_check, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index b9cf16e18e8d1..62f343709e245 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -17,19 +17,10 @@ use crate::{self as fast_unstake}; use frame_support::{ - assert_ok, - pallet_prelude::*, - parameter_types, - traits::{ConstU64, ConstU8, Currency}, - weights::constants::WEIGHT_PER_SECOND, - PalletId, -}; -use sp_runtime::{ - traits::{Convert, IdentityLookup}, - FixedU128, + pallet_prelude::*, parameter_types, traits::ConstU64, weights::constants::WEIGHT_PER_SECOND, }; +use sp_runtime::traits::{Convert, IdentityLookup}; -use frame_system::RawOrigin; use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::prelude::*; @@ -153,7 +144,7 @@ impl pallet_staking::Config for Runtime { type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; - type OnStakerSlash = Pools; + type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -172,29 +163,6 @@ impl Convert for U256ToBalance { } } -parameter_types! { - pub const PostUnbondingPoolsWindow: u32 = 10; - pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub static MaxMetadataLen: u32 = 10; - pub static CheckLevel: u8 = 255; -} - -impl pallet_nomination_pools::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - type Currency = Balances; - type CurrencyBalance = Balance; - type RewardCounter = FixedU128; - type BalanceToU256 = BalanceToU256; - type U256ToBalance = U256ToBalance; - type StakingInterface = Staking; - type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; - type MaxMetadataLen = MaxMetadataLen; - type MaxUnbonding = ConstU32<8>; - type MaxPointsToBalance = ConstU8<10>; - type PalletId = PoolsPalletId; -} - parameter_types! { pub static SlashPerEra: u32 = 100; } @@ -218,7 +186,6 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp, Balances: pallet_balances, Staking: pallet_staking, - Pools: pallet_nomination_pools, FastUnstake: fast_unstake, } ); @@ -287,10 +254,6 @@ impl ExtBuilder { let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // create one default pool. - let _ = pallet_nomination_pools::GenesisConfig:: { ..Default::default() } - .assimilate_storage(&mut storage); - let validators_range = VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA; let nominators_range = NOMINATOR_PREFIX..NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA; @@ -337,11 +300,6 @@ impl ExtBuilder { // because we read this value as a measure of how many validators we have. pallet_staking::ValidatorCount::::put(VALIDATORS_PER_ERA as u32); - - // make a pool - let amount_to_bond = Pools::depositor_min_bond(); - Balances::make_free_balance_be(&10, amount_to_bond * 5); - assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); }); ext } @@ -359,14 +317,12 @@ pub(crate) fn run_to_block(n: u64, on_idle: bool) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Staking::on_finalize(System::block_number()); - Pools::on_finalize(System::block_number()); FastUnstake::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); Balances::on_initialize(System::block_number()); Staking::on_initialize(System::block_number()); - Pools::on_initialize(System::block_number()); FastUnstake::on_initialize(System::block_number()); if on_idle { FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block); diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index a51c1acdf06eb..5586443ce797c 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -20,20 +20,15 @@ use super::*; use crate::{mock::*, types::*, weights::WeightInfo, Event}; use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; -use pallet_nomination_pools::{BondedPools, LastPoolId, RewardPools}; use pallet_staking::{CurrentEra, IndividualExposure, RewardDestination}; -use sp_runtime::{traits::BadOrigin, DispatchError, ModuleError}; +use sp_runtime::traits::BadOrigin; use sp_staking::StakingInterface; #[test] fn test_setup_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(BondedPools::::count(), 1); - assert_eq!(RewardPools::::count(), 1); assert_eq!(Staking::bonding_duration(), 3); - let last_pool = LastPoolId::::get(); - assert_eq!(last_pool, 1); }); } @@ -41,7 +36,7 @@ fn test_setup_works() { fn register_works() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Ensure stash is in the queue. assert_ne!(Queue::::get(1), None); }); @@ -56,7 +51,7 @@ fn cannot_register_if_not_bonded() { } // Attempt to fast unstake. assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), Error::::NotController ); }); @@ -66,10 +61,10 @@ fn cannot_register_if_not_bonded() { fn cannot_register_if_in_queue() { ExtBuilder::default().build_and_execute(|| { // Insert some Queue item - Queue::::insert(1, Some(1_u32)); + Queue::::insert(1, ()); // Cannot re-register, already in queue assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::AlreadyQueued ); }); @@ -79,10 +74,10 @@ fn cannot_register_if_in_queue() { fn cannot_register_if_head() { ExtBuilder::default().build_and_execute(|| { // Insert some Head item for stash - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); // Controller attempts to regsiter assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::AlreadyHead ); }); @@ -95,7 +90,7 @@ fn cannot_register_if_has_unlocking_chunks() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); // Cannot register for fast unstake with unlock chunks active assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::NotFullyBonded ); }); @@ -105,7 +100,7 @@ fn cannot_register_if_has_unlocking_chunks() { fn deregister_works() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); // Ensure stash no longer exists in the queue. @@ -117,7 +112,7 @@ fn deregister_works() { fn cannot_deregister_if_not_controller() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Stash tries to deregister. assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(1)), Error::::NotController); }); @@ -135,9 +130,9 @@ fn cannot_deregister_if_not_queued() { fn cannot_deregister_already_head() { ExtBuilder::default().build_and_execute(|| { // Controller attempts to register, should fail - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); }); @@ -169,15 +164,15 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // set up Queue item - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_eq!(Queue::::get(1), Some(())); }); } @@ -189,8 +184,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -209,7 +204,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); // when: another 1 era. @@ -225,11 +220,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // when: then 5 eras, we only need 2 more. @@ -251,11 +242,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: not enough weight to unstake: @@ -267,11 +254,7 @@ mod on_idle { assert_eq!(fast_unstake_events_since_last_call(), vec![]); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: enough weight to get over at least one iteration: then we are unblocked and can @@ -287,7 +270,7 @@ mod on_idle { // then we finish the unbonding: assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }] + vec![Event::Unstaked { stash: 1, result: Ok(()) }] ); assert_eq!(Head::::get(), None,); @@ -302,11 +285,11 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -317,11 +300,7 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); assert_eq!(Queue::::count(), 4); @@ -338,11 +317,7 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 5, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }), + Some(UnstakeRequest { stash: 5, checked: bounded_vec![3, 2, 1, 0] }), ); assert_eq!(Queue::::count(), 3); @@ -350,7 +325,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) }, + Event::Unstaked { stash: 1, result: Ok(()) }, Event::Checking { stash: 5, eras: vec![3, 2, 1, 0] } ] ); @@ -364,10 +339,10 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register multi accounts for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), Some(1))); - assert_eq!(Queue::::get(3), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_eq!(Queue::::get(3), Some(())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -397,9 +372,9 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Unstaked { stash: 1, result: Ok(()) }, Event::Checking { stash: 3, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 3, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Unstaked { stash: 3, result: Ok(()) }, ] ); @@ -409,14 +384,14 @@ mod on_idle { } #[test] - fn successful_unstake_without_pool_join() { + fn successful_unstake() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(BondingDuration::get() + 1); CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_eq!(Queue::::get(1), Some(None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -427,11 +402,7 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -441,55 +412,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn successful_unstake_joining_bad_pool() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(0))); - assert_eq!(Queue::::get(1), Some(Some(0))); - - // process on idle - next_block(true); - - // assert queue item has been moved to head - assert_eq!(Queue::::get(1), None); - - // assert head item present - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(0) - }) - ); - - next_block(true); - assert_eq!(Head::::get(), None,); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { - stash: 1, - maybe_pool_id: Some(0), - result: Err(DispatchError::Module(ModuleError { - index: 4, - error: [0, 0, 0, 0], - message: None - })) - } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); @@ -503,8 +426,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -515,11 +438,7 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -529,11 +448,10 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -545,8 +463,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -557,40 +475,28 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -604,11 +510,10 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![2] }, Event::Checking { stash: 1, eras: vec![1] }, Event::Checking { stash: 1, eras: vec![0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -623,39 +528,31 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_eq!(Queue::::get(1), Some(None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: a new era happens right before one is free. @@ -670,7 +567,6 @@ mod on_idle { stash: 1, // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], - maybe_pool_id: None }) ); @@ -685,7 +581,7 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![1] }, Event::Checking { stash: 1, eras: vec![0] }, Event::Checking { stash: 1, eras: vec![4] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); @@ -700,23 +596,19 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // process 2 blocks next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // when @@ -726,21 +618,13 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // then we register a new era. @@ -752,22 +636,14 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 4], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4] }) ); // progress to end next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 4, 1], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4, 1] }) ); // but notice that we don't care about era 0 instead anymore! we're done. @@ -781,12 +657,11 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![2] }, Event::Checking { stash: 1, eras: vec![4] }, Event::Checking { stash: 1, eras: vec![1] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -812,26 +687,18 @@ mod on_idle { assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3, 2], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -872,17 +739,13 @@ mod on_idle { assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3, 2], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -909,10 +772,7 @@ mod on_idle { RuntimeOrigin::signed(VALIDATOR_PREFIX), vec![VALIDATOR_PREFIX] )); - assert_ok!(FastUnstake::register_fast_unstake( - RuntimeOrigin::signed(VALIDATOR_PREFIX), - None - )); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX))); // but they indeed are exposed! assert!(pallet_staking::ErasStakers::::contains_key( @@ -943,17 +803,13 @@ mod on_idle { assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); // let them register: - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42))); // 2 block's enough to unstake them. next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 42, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 42, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -962,7 +818,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 42, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 42, maybe_pool_id: None, result: Ok(()) } + Event::Unstaked { stash: 42, result: Ok(()) } ] ); }); @@ -990,7 +846,7 @@ mod signed_extension { ExtBuilder::default().build_and_execute(|| { // given: stash for 2 is 1. // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // then // stash can't. @@ -1010,7 +866,7 @@ mod signed_extension { ExtBuilder::default().build_and_execute(|| { // given: stash for 2 is 1. // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); ErasToCheckPerBlock::::put(1); CurrentEra::::put(BondingDuration::get()); @@ -1018,7 +874,7 @@ mod signed_extension { assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); // then diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index ae8702e56a842..e8d538dce4802 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -23,7 +23,6 @@ use frame_support::{ traits::{Currency, Get, IsSubType}, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; -use pallet_nomination_pools::PoolId; use scale_info::TypeInfo; use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_staking::EraIndex; @@ -42,8 +41,6 @@ pub struct UnstakeRequest, - /// The pool they wish to join, if any. - pub(crate) maybe_pool_id: Option, } #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] From a0ec652e341f694f182a3c5fcc80d4c3fb280003 Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Mon, 26 Sep 2022 15:46:59 +0800 Subject: [PATCH 08/42] Remove discarded blocks and states from database by default (#11983) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 1.Add pruning param "canonical" in sc-cli. 2.Make PruningMode's default value to ArchiveCanonical. * Update tests in sc-state-db. * Update tests in sc-state-db. * 1.Add a new value `AllWithNonFinalized` in `enum BlocksPruning` which Corresponds to `blocks_pruning 0` in CLI . 2.Change value `All` to `AllFinalized` in `enum BlocksPruning` and make it to keep full finalized block history. * Make some corresponding adjustments based on the content in the conversation. * Update client/db/src/lib.rs Co-authored-by: Bastian Köcher * Apply suggestions from code review. * 1.Change `blocks_pruning` to be like `state_pruning` . * Fmt and add some doc. * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Update doc. * Change `new_test_with_tx_storage` to take `BlocksPruning`. * Fmt Co-authored-by: Bastian Köcher --- bin/node/cli/benches/block_production.rs | 2 +- bin/node/cli/benches/transaction_pool.rs | 2 +- bin/node/testing/src/bench.rs | 2 +- client/cli/src/config.rs | 4 +- client/cli/src/params/pruning_params.rs | 31 +++- client/db/benches/state_access.rs | 2 +- client/db/src/lib.rs | 215 +++++++++++++++++++---- client/service/test/src/client/mod.rs | 4 +- client/service/test/src/lib.rs | 2 +- test-utils/client/src/lib.rs | 5 +- 10 files changed, 211 insertions(+), 58 deletions(-) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 0a734fa447448..4fcebb123d9e3 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -74,7 +74,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index e6084fba8242a..a8839642ddc26 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -68,7 +68,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 7980cc102fb38..59f1fa94c9b20 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -392,7 +392,7 @@ impl BenchDb { trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - blocks_pruning: sc_client_db::BlocksPruning::All, + blocks_pruning: sc_client_db::BlocksPruning::KeepAll, }; let task_executor = TaskExecutor::new(); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index bc5941914de89..fad2ec7bc4a93 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -251,11 +251,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `BlocksPruning::All`. + /// `BlocksPruning::KeepFinalized`. fn blocks_pruning(&self) -> Result { self.pruning_params() .map(|x| x.blocks_pruning()) - .unwrap_or_else(|| Ok(BlocksPruning::All)) + .unwrap_or_else(|| Ok(BlocksPruning::KeepFinalized)) } /// Get the chain ID (string). diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 34a0982e63d95..b764e4722e94d 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -30,13 +30,16 @@ pub struct PruningParams { /// or for all of the canonical blocks (i.e 'archive-canonical'). #[clap(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, - /// Specify the number of finalized blocks to keep in the database. + /// Specify the blocks pruning mode, a number of blocks to keep or 'archive'. /// - /// Default is to keep all blocks. + /// Default is to keep all finalized blocks. + /// otherwise, all blocks can be kept (i.e 'archive'), + /// or for all canonical blocks (i.e 'archive-canonical'), + /// or for the last N blocks (i.e a number). /// /// NOTE: only finalized blocks are subject for removal! #[clap(alias = "keep-blocks", long, value_name = "COUNT")] - pub blocks_pruning: Option, + pub blocks_pruning: Option, } impl PruningParams { @@ -46,9 +49,12 @@ impl PruningParams { .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), + "archive-canonical" => Ok(PruningMode::ArchiveCanonical), bc => bc .parse() - .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) + .map_err(|_| { + error::Error::Input("Invalid state pruning mode specified".to_string()) + }) .map(PruningMode::blocks_pruning), }) .transpose() @@ -56,9 +62,18 @@ impl PruningParams { /// Get the block pruning value from the parameters pub fn blocks_pruning(&self) -> error::Result { - Ok(match self.blocks_pruning { - Some(n) => BlocksPruning::Some(n), - None => BlocksPruning::All, - }) + match self.blocks_pruning.as_ref() { + Some(bp) => match bp.as_str() { + "archive" => Ok(BlocksPruning::KeepAll), + "archive-canonical" => Ok(BlocksPruning::KeepFinalized), + bc => bc + .parse() + .map_err(|_| { + error::Error::Input("Invalid blocks pruning mode specified".to_string()) + }) + .map(BlocksPruning::Some), + }, + None => Ok(BlocksPruning::KeepFinalized), + } } } diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 78aed7858e342..714dda82d61b7 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -122,7 +122,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend trie_cache_maximum_size, state_pruning: Some(PruningMode::ArchiveAll), source: DatabaseSource::ParityDb { path }, - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, }; Backend::new(settings, 100).expect("Creates backend") diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 79ef7e9b6625d..32c4c9ef85ed9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -320,10 +320,12 @@ pub struct DatabaseSettings { } /// Block pruning settings. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum BlocksPruning { - /// Keep full block history. - All, + /// Keep full block history, of every block that was ever imported. + KeepAll, + /// Keep full finalized block history. + KeepFinalized, /// Keep N recent finalized blocks. Some(u32), } @@ -1061,19 +1063,27 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay) + Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self { + pub fn new_test_with_tx_storage( + blocks_pruning: BlocksPruning, + canonicalization_delay: u64, + ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); + let state_pruning = match blocks_pruning { + BlocksPruning::KeepAll => PruningMode::ArchiveAll, + BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical, + BlocksPruning::Some(n) => PruningMode::blocks_pruning(n), + }; let db_setting = DatabaseSettings { trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), + state_pruning: Some(state_pruning), source: DatabaseSource::Custom { db, require_create_flag: true }, - blocks_pruning: BlocksPruning::Some(blocks_pruning), + blocks_pruning, }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1707,32 +1717,47 @@ impl Backend { finalized: NumberFor, displaced: &FinalizationOutcome>, ) -> ClientResult<()> { - if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized >= keep.into() { - let number = finalized.saturating_sub(keep.into()); - self.prune_block(transaction, BlockId::::number(number))?; - } + match self.blocks_pruning { + BlocksPruning::KeepAll => {}, + BlocksPruning::Some(blocks_pruning) => { + // Always keep the last finalized block + let keep = std::cmp::max(blocks_pruning, 1); + if finalized >= keep.into() { + let number = finalized.saturating_sub(keep.into()); + self.prune_block(transaction, BlockId::::number(number))?; + } + self.prune_displaced_branches(transaction, finalized, displaced)?; + }, + BlocksPruning::KeepFinalized => { + self.prune_displaced_branches(transaction, finalized, displaced)?; + }, + } + Ok(()) + } - // Also discard all blocks from displaced branches - for h in displaced.leaves() { - let mut number = finalized; - let mut hash = *h; - // Follow displaced chains back until we reach a finalized block. - // Since leaves are discarded due to finality, they can't have parents - // that are canonical, but not yet finalized. So we stop deleting as soon as - // we reach canonical chain. - while self.blockchain.hash(number)? != Some(hash) { - let id = BlockId::::hash(hash); - match self.blockchain.header(id)? { - Some(header) => { - self.prune_block(transaction, id)?; - number = header.number().saturating_sub(One::one()); - hash = *header.parent_hash(); - }, - None => break, - } + fn prune_displaced_branches( + &self, + transaction: &mut Transaction, + finalized: NumberFor, + displaced: &FinalizationOutcome>, + ) -> ClientResult<()> { + // Discard all blocks from displaced branches + for h in displaced.leaves() { + let mut number = finalized; + let mut hash = *h; + // Follow displaced chains back until we reach a finalized block. + // Since leaves are discarded due to finality, they can't have parents + // that are canonical, but not yet finalized. So we stop deleting as soon as + // we reach canonical chain. + while self.blockchain.hash(number)? != Some(hash) { + let id = BlockId::::hash(hash); + match self.blockchain.header(id)? { + Some(header) => { + self.prune_block(transaction, id)?; + number = header.number().saturating_sub(One::one()); + hash = *header.parent_hash(); + }, + None => break, } } } @@ -1752,6 +1777,13 @@ impl Backend { columns::BODY, id, )?; + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::JUSTIFICATIONS, + id, + )?; if let Some(index) = read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { @@ -2506,7 +2538,7 @@ pub(crate) mod tests { trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, }, 0, ) @@ -3176,7 +3208,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(2, 0); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3210,9 +3242,114 @@ pub(crate) mod tests { assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); } + #[test] + fn prune_blocks_on_finalize_in_keep_all() { + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + for i in 1..3 { + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + } + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + + #[test] + fn prune_blocks_on_finalize_with_fork_in_keep_all() { + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2 + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![2.into()], + None, + ) + .unwrap(); + insert_block( + &backend, + 3, + fork_hash_root, + None, + H256::random(), + vec![3.into(), 11.into()], + None, + ) + .unwrap(); + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + + for i in 1..5 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + assert_eq!(bc.info().best_number, 4); + for i in 0..5 { + assert!(bc.hash(i).unwrap().is_some()); + } + } + #[test] fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3273,7 +3410,7 @@ pub(crate) mod tests { #[test] fn indexed_data_block_body() { - let backend = Backend::::new_test_with_tx_storage(1, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3315,7 +3452,7 @@ pub(crate) mod tests { #[test] fn index_invalid_size() { - let backend = Backend::::new_test_with_tx_storage(1, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3350,7 +3487,7 @@ pub(crate) mod tests { #[test] fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); @@ -3397,7 +3534,7 @@ pub(crate) mod tests { #[test] fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..2 { diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 2ab1415f8ca31..e0f47110d9046 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() { DatabaseSettings { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1426,7 +1426,7 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::blocks_pruning(1)), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 11c1cbaf7afb1..23245d46cba10 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -237,7 +237,7 @@ fn node_config< database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index be4549c9957c0..d3e71f0ad28d6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -26,7 +26,7 @@ pub use sc_client_api::{ execution_extensions::{ExecutionExtensions, ExecutionStrategies}, BadBlocks, ForkBlocks, }; -pub use sc_client_db::{self, Backend}; +pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; @@ -102,7 +102,8 @@ impl /// Create new `TestClientBuilder` with default backend and storage chain mode pub fn with_tx_storage(blocks_pruning: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0)); + let backend = + Arc::new(Backend::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), 0)); Self::with_backend(backend) } } From 0dbeaa0e98e78a848046a815f979bfce67f98c6d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:08:34 +0100 Subject: [PATCH 09/42] re-export weight file for fast-unstsake pallet (#12352) --- frame/fast-unstake/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 5acc9940debf1..9bfb29f8457fa 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -90,7 +90,7 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; - use weights::WeightInfo; + pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] #[codec(mel_bound(T: Config))] From f360c87073cb5f59b7b55cfbfef5d91a40d1a217 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 26 Sep 2022 15:10:09 +0300 Subject: [PATCH 10/42] Move transactions protocol to its own crate (#12264) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move transaction protocol to its own crate * Update Cargo.lock * Fix binaries * Update client/network/transactions/src/lib.rs Co-authored-by: Dmitry Markin * Update client/service/src/builder.rs Co-authored-by: Bastian Köcher * Apply review comments * Revert one change and apply cargo-fmt * Remove Transaction from Message * Add array-bytes * trigger CI * Add comment about codec index Co-authored-by: Dmitry Markin Co-authored-by: Bastian Köcher --- Cargo.lock | 24 ++ bin/node-template/node/src/service.rs | 3 +- bin/node/cli/src/service.rs | 3 +- client/beefy/Cargo.toml | 1 + client/beefy/src/lib.rs | 4 +- client/cli/Cargo.toml | 1 + client/cli/src/params/network_params.rs | 5 +- client/finality-grandpa/src/lib.rs | 8 +- client/network/common/Cargo.toml | 3 + client/network/common/src/config.rs | 128 +++++++++++ client/network/{ => common}/src/error.rs | 3 +- client/network/common/src/lib.rs | 8 + client/network/common/src/service.rs | 29 --- client/network/{ => common}/src/utils.rs | 0 client/network/src/config.rs | 209 +----------------- client/network/src/discovery.rs | 3 +- client/network/src/lib.rs | 13 +- client/network/src/peer_info.rs | 2 +- client/network/src/protocol.rs | 13 +- client/network/src/protocol/message.rs | 10 +- client/network/src/service.rs | 77 ++----- client/network/src/service/tests.rs | 56 ++--- client/network/test/src/lib.rs | 14 +- client/network/transactions/Cargo.toml | 28 +++ client/network/transactions/src/config.rs | 98 ++++++++ .../src/lib.rs} | 108 ++++----- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 62 ++++-- client/service/src/config.rs | 6 +- client/service/src/error.rs | 3 +- client/service/src/lib.rs | 7 +- client/service/test/src/lib.rs | 7 +- 32 files changed, 466 insertions(+), 471 deletions(-) rename client/network/{ => common}/src/error.rs (96%) rename client/network/{ => common}/src/utils.rs (100%) create mode 100644 client/network/transactions/Cargo.toml create mode 100644 client/network/transactions/src/config.rs rename client/network/{src/transactions.rs => transactions/src/lib.rs} (84%) diff --git a/Cargo.lock b/Cargo.lock index 6ea79a120361e..a9a0eef551179 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -468,6 +468,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-network-common", "sc-network-gossip", "sc-network-test", "sc-utils", @@ -7934,6 +7935,7 @@ dependencies = [ "sc-client-db", "sc-keystore", "sc-network", + "sc-network-common", "sc-service", "sc-telemetry", "sc-tracing", @@ -8547,7 +8549,9 @@ dependencies = [ "bitflags", "bytes", "futures", + "futures-timer", "libp2p", + "linked_hash_set", "parity-scale-codec", "prost-build 0.10.4", "sc-consensus", @@ -8558,6 +8562,7 @@ dependencies = [ "sp-consensus", "sp-finality-grandpa", "sp-runtime", + "substrate-prometheus-endpoint", "thiserror", ] @@ -8663,6 +8668,24 @@ dependencies = [ "substrate-test-runtime-client", ] +[[package]] +name = "sc-network-transactions" +version = "0.10.0-dev" +dependencies = [ + "array-bytes", + "futures", + "hex", + "libp2p", + "log", + "parity-scale-codec", + "pin-project", + "sc-network-common", + "sc-peerset", + "sp-consensus", + "sp-runtime", + "substrate-prometheus-endpoint", +] + [[package]] name = "sc-offchain" version = "4.0.0-dev" @@ -8851,6 +8874,7 @@ dependencies = [ "sc-network-common", "sc-network-light", "sc-network-sync", + "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-rpc-server", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 6ec9a33749a69..96de6e17f3bfd 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -191,7 +191,7 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -238,6 +238,7 @@ pub fn new_full(mut config: Configuration) -> Result rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, + tx_handler_controller, config, telemetry: telemetry.as_mut(), })?; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index a3098eac6402f..6c29f0c08ee13 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -354,7 +354,7 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -392,6 +392,7 @@ pub fn new_full_base( transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, system_rpc_tx, + tx_handler_controller, telemetry: telemetry.as_mut(), })?; diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 47a3be859cbbb..a125d4c8d4f07 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -27,6 +27,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../client/finality-grandpa" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 41eeec43d64bd..ad527b2929585 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -83,8 +83,8 @@ pub(crate) mod beefy_protocol_name { /// For standard protocol name see [`beefy_protocol_name::standard_name`]. pub fn beefy_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { - let mut cfg = sc_network::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); +) -> sc_network_common::config::NonDefaultSetConfig { + let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); cfg.allow_non_reserved(25, 25); cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e5cd6167596c0..37a8fd2e0b64d 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -34,6 +34,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../service" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 74c2db92c3215..0450b5f0e2566 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -19,11 +19,10 @@ use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use clap::Args; use sc_network::{ - config::{ - NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig, - }, + config::{NetworkConfiguration, NodeKeyConfig}, multiaddr::Protocol, }; +use sc_network_common::config::{NonReservedPeerMode, SetConfig, TransportConfig}; use sc_service::{ config::{Multiaddr, MultiaddrWithPeerId}, ChainSpec, ChainType, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 7e47b70bd6b98..d5c05fea78aa2 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -688,18 +688,18 @@ pub struct GrandpaParams { /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { +) -> sc_network_common::config::NonDefaultSetConfig { use communication::grandpa_protocol_name; - sc_network::config::NonDefaultSetConfig { + sc_network_common::config::NonDefaultSetConfig { notifications_protocol: protocol_name, fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. max_notification_size: 1024 * 1024, - set_config: sc_network::config::SetConfig { + set_config: sc_network_common::config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, + non_reserved_mode: sc_network_common::config::NonReservedPeerMode::Deny, }, } } diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 47d43e8b4b03f..1ee7b15538366 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -24,7 +24,10 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +futures-timer = "3.0.2" libp2p = "0.46.1" +linked_hash_set = "0.1.3" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 8b7e045780d7d..fb23cd0174922 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,6 +18,8 @@ //! Configuration of the networking layer. +use crate::protocol; + use libp2p::{multiaddr, Multiaddr, PeerId}; use std::{fmt, str, str::FromStr}; @@ -171,3 +173,129 @@ impl From for ParseErr { Self::MultiaddrParse(err) } } + +/// Configuration for a set of nodes. +#[derive(Clone, Debug)] +pub struct SetConfig { + /// Maximum allowed number of incoming substreams related to this set. + pub in_peers: u32, + /// Number of outgoing substreams related to this set that we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically + /// refused. + pub non_reserved_mode: NonReservedPeerMode, +} + +impl Default for SetConfig { + fn default() -> Self { + Self { + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } + } +} + +/// Extension to [`SetConfig`] for sets that aren't the default set. +/// +/// > **Note**: As new fields might be added in the future, please consider using the `new` method +/// > and modifiers instead of creating this struct manually. +#[derive(Clone, Debug)] +pub struct NonDefaultSetConfig { + /// Name of the notifications protocols of this set. A substream on this set will be + /// considered established once this protocol is open. + /// + /// > **Note**: This field isn't present for the default set, as this is handled internally + /// > by the networking code. + pub notifications_protocol: protocol::ProtocolName, + /// If the remote reports that it doesn't support the protocol indicated in the + /// `notifications_protocol` field, then each of these fallback names will be tried one by + /// one. + /// + /// If a fallback is used, it will be reported in + /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` + pub fallback_names: Vec, + /// Maximum allowed size of single notifications. + pub max_notification_size: u64, + /// Base configuration. + pub set_config: SetConfig, +} + +impl NonDefaultSetConfig { + /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. + pub fn new(notifications_protocol: protocol::ProtocolName, max_notification_size: u64) -> Self { + Self { + notifications_protocol, + max_notification_size, + fallback_names: Vec::new(), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } + + /// Modifies the configuration to allow non-reserved nodes. + pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { + self.set_config.in_peers = in_peers; + self.set_config.out_peers = out_peers; + self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; + } + + /// Add a node to the list of reserved nodes. + pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { + self.set_config.reserved_nodes.push(peer); + } + + /// Add a list of protocol names used for backward compatibility. + /// + /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. + pub fn add_fallback_names(&mut self, fallback_names: Vec) { + self.fallback_names.extend(fallback_names); + } +} + +/// Configuration for the transport layer. +#[derive(Clone, Debug)] +pub enum TransportConfig { + /// Normal transport mode. + Normal { + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + enable_mdns: bool, + + /// If true, allow connecting to private IPv4 addresses (as defined in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have + /// been passed in `::sc_network::config::NetworkConfiguration::boot_nodes`. + allow_private_ipv4: bool, + }, + + /// Only allow connections within the same process. + /// Only addresses of the form `/memory/...` will be supported. + MemoryOnly, +} + +/// The policy for connections to non-reserved peers. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum NonReservedPeerMode { + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, +} + +impl NonReservedPeerMode { + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(Self::Accept), + "deny" => Some(Self::Deny), + _ => None, + } + } +} diff --git a/client/network/src/error.rs b/client/network/common/src/error.rs similarity index 96% rename from client/network/src/error.rs rename to client/network/common/src/error.rs index b4287ffbd55db..4326b1af52836 100644 --- a/client/network/src/error.rs +++ b/client/network/common/src/error.rs @@ -18,9 +18,8 @@ //! Substrate network possible errors. -use crate::config::TransportConfig; +use crate::{config::TransportConfig, protocol::ProtocolName}; use libp2p::{Multiaddr, PeerId}; -use sc_network_common::protocol::ProtocolName; use std::fmt; diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 3a30d24900199..36e67f11e5cff 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -19,8 +19,16 @@ //! Common data structures of the networking layer. pub mod config; +pub mod error; pub mod message; pub mod protocol; pub mod request_responses; pub mod service; pub mod sync; +pub mod utils; + +/// Minimum Requirements for a Hash within Networking +pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} + +impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static +{} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 88583832e4c38..aa4967ba51700 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -604,35 +604,6 @@ where } } -/// Provides ability to propagate transactions over the network. -pub trait NetworkTransaction { - /// You may call this when new transactions are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - fn trigger_repropagate(&self); - - /// You must call when new transaction is imported by the transaction pool. - /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - fn propagate_transaction(&self, hash: H); -} - -impl NetworkTransaction for Arc -where - T: ?Sized, - T: NetworkTransaction, -{ - fn trigger_repropagate(&self) { - T::trigger_repropagate(self) - } - - fn propagate_transaction(&self, hash: H) { - T::propagate_transaction(self, hash) - } -} - /// Provides ability to announce blocks to the network. pub trait NetworkBlock { /// Make sure an important block is propagated to peers. diff --git a/client/network/src/utils.rs b/client/network/common/src/utils.rs similarity index 100% rename from client/network/src/utils.rs rename to client/network/common/src/utils.rs diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 202a628884d79..b2adfa81d065b 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -27,24 +27,24 @@ pub use sc_network_common::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, sync::warp::WarpSyncProvider, + ExHashT, }; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::ExHashT; - use core::{fmt, iter}; -use futures::future; use libp2p::{ identity::{ed25519, Keypair}, multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName, sync::ChainSync}; +use sc_network_common::{ + config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, + sync::ChainSync, +}; use sp_runtime::traits::Block as BlockT; use std::{ - collections::HashMap, error::Error, fs, future::Future, @@ -52,16 +52,14 @@ use std::{ net::Ipv4Addr, path::{Path, PathBuf}, pin::Pin, - str, sync::Arc, }; use zeroize::Zeroize; /// Network initialization parameters. -pub struct Params +pub struct Params where B: BlockT + 'static, - H: ExHashT, { /// Assigned role for our node (full, light, ...). pub role: Role, @@ -70,21 +68,12 @@ where /// default. pub executor: Option + Send>>) + Send>>, - /// How to spawn the background task dedicated to the transactions handler. - pub transactions_handler_executor: Box + Send>>) + Send>, - /// Network layer configuration. pub network_config: NetworkConfiguration, /// Client that contains the blockchain. pub chain: Arc, - /// Pool of transactions. - /// - /// The network worker will fetch transactions from this object in order to propagate them on - /// the network. - pub transaction_pool: Arc>, - /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, @@ -166,66 +155,6 @@ impl fmt::Display for Role { } } -/// Result of the transaction import. -#[derive(Clone, Copy, Debug)] -pub enum TransactionImport { - /// Transaction is good but already known by the transaction pool. - KnownGood, - /// Transaction is good and not yet known. - NewGood, - /// Transaction is invalid. - Bad, - /// Transaction import was not performed. - None, -} - -/// Future resolving to transaction import result. -pub type TransactionImportFuture = Pin + Send>>; - -/// Transaction pool interface -pub trait TransactionPool: Send + Sync { - /// Get transactions from the pool that are ready to be propagated. - fn transactions(&self) -> Vec<(H, B::Extrinsic)>; - /// Get hash of transaction. - fn hash_of(&self, transaction: &B::Extrinsic) -> H; - /// Import a transaction into the pool. - /// - /// This will return future. - fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap>); - /// Get transaction by hash. - fn transaction(&self, hash: &H) -> Option; -} - -/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always -/// empty and discards all incoming transactions. -/// -/// Requires the "hash" type to implement the `Default` trait. -/// -/// Useful for testing purposes. -pub struct EmptyTransactionPool; - -impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(H, B::Extrinsic)> { - Vec::new() - } - - fn hash_of(&self, _transaction: &B::Extrinsic) -> H { - Default::default() - } - - fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { - Box::pin(future::ready(TransactionImport::KnownGood)) - } - - fn on_broadcasted(&self, _: HashMap>) {} - - fn transaction(&self, _h: &H) -> Option { - None - } -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { @@ -394,132 +323,6 @@ impl NetworkConfiguration { } } -/// Configuration for a set of nodes. -#[derive(Clone, Debug)] -pub struct SetConfig { - /// Maximum allowed number of incoming substreams related to this set. - pub in_peers: u32, - /// Number of outgoing substreams related to this set that we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically - /// refused. - pub non_reserved_mode: NonReservedPeerMode, -} - -impl Default for SetConfig { - fn default() -> Self { - Self { - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - } - } -} - -/// Extension to [`SetConfig`] for sets that aren't the default set. -/// -/// > **Note**: As new fields might be added in the future, please consider using the `new` method -/// > and modifiers instead of creating this struct manually. -#[derive(Clone, Debug)] -pub struct NonDefaultSetConfig { - /// Name of the notifications protocols of this set. A substream on this set will be - /// considered established once this protocol is open. - /// - /// > **Note**: This field isn't present for the default set, as this is handled internally - /// > by the networking code. - pub notifications_protocol: ProtocolName, - /// If the remote reports that it doesn't support the protocol indicated in the - /// `notifications_protocol` field, then each of these fallback names will be tried one by - /// one. - /// - /// If a fallback is used, it will be reported in - /// [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - pub fallback_names: Vec, - /// Maximum allowed size of single notifications. - pub max_notification_size: u64, - /// Base configuration. - pub set_config: SetConfig, -} - -impl NonDefaultSetConfig { - /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self { - Self { - notifications_protocol, - max_notification_size, - fallback_names: Vec::new(), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } - - /// Modifies the configuration to allow non-reserved nodes. - pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { - self.set_config.in_peers = in_peers; - self.set_config.out_peers = out_peers; - self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; - } - - /// Add a node to the list of reserved nodes. - pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { - self.set_config.reserved_nodes.push(peer); - } - - /// Add a list of protocol names used for backward compatibility. - /// - /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. - pub fn add_fallback_names(&mut self, fallback_names: Vec) { - self.fallback_names.extend(fallback_names); - } -} - -/// Configuration for the transport layer. -#[derive(Clone, Debug)] -pub enum TransportConfig { - /// Normal transport mode. - Normal { - /// If true, the network will use mDNS to discover other libp2p nodes on the local network - /// and connect to them if they support the same chain. - enable_mdns: bool, - - /// If true, allow connecting to private IPv4 addresses (as defined in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have - /// been passed in [`NetworkConfiguration::boot_nodes`]. - allow_private_ipv4: bool, - }, - - /// Only allow connections within the same process. - /// Only addresses of the form `/memory/...` will be supported. - MemoryOnly, -} - -/// The policy for connections to non-reserved peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, -} - -impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(Self::Accept), - "deny" => Some(Self::Deny), - _ => None, - } - } -} - /// The configuration of a node's secret key, describing the type of key /// and how it is obtained. A node's identity keypair is the result of /// the evaluation of the node key configuration. diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index ab93662968dc2..8422e34485125 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,7 +46,6 @@ //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. -use crate::utils::LruHashSet; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; @@ -72,7 +71,7 @@ use libp2p::{ }, }; use log::{debug, error, info, trace, warn}; -use sc_network_common::config::ProtocolId; +use sc_network_common::{config::ProtocolId, utils::LruHashSet}; use sp_core::hexdisplay::HexDisplay; use std::{ cmp, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 320104d0f9554..d17f47328b804 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -251,12 +251,9 @@ mod protocol; mod request_responses; mod service; mod transport; -mod utils; pub mod config; -pub mod error; pub mod network_state; -pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; @@ -269,8 +266,8 @@ pub use sc_network_common::{ request_responses::{IfDisconnected, RequestFailure}, service::{ KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, - NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, - NetworkTransaction, Signature, SigningError, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, Signature, + SigningError, }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, @@ -295,9 +292,3 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; - -/// Minimum Requirements for a Hash within Networking -pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static -{} diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index d668cb25ea455..c62c2ea1c5d98 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -16,7 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ @@ -33,6 +32,7 @@ use libp2p::{ Multiaddr, }; use log::{debug, error, trace}; +use sc_network_common::utils::interval; use smallvec::SmallVec; use std::{ collections::hash_map::Entry, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c7a3cf4b2160f..fbf651de9d49a 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -16,10 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - config, error, - utils::{interval, LruHashSet}, -}; +use crate::config; use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; @@ -45,7 +42,8 @@ use sc_consensus::import_queue::{ BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, }; use sc_network_common::{ - config::ProtocolId, + config::{NonReservedPeerMode, ProtocolId}, + error, protocol::ProtocolName, request_responses::RequestFailure, sync::{ @@ -57,6 +55,7 @@ use sc_network_common::{ OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PollBlockAnnounceValidation, SyncStatus, }, + utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; use sp_consensus::BlockOrigin; @@ -341,7 +340,7 @@ where bootnodes, reserved_nodes: default_sets_reserved.clone(), reserved_only: network_config.default_peers_set.non_reserved_mode == - config::NonReservedPeerMode::Deny, + NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -352,7 +351,7 @@ where } let reserved_only = - set_cfg.set_config.non_reserved_mode == config::NonReservedPeerMode::Deny; + set_cfg.set_config.non_reserved_mode == NonReservedPeerMode::Deny; sets.push(sc_peerset::SetConfig { in_peers: set_cfg.set_config.in_peers, diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 50c4a264a5f95..3e1281753b82c 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -36,9 +36,6 @@ pub type Message = generic::Message< ::Extrinsic, >; -/// A set of transactions. -pub type Transactions = Vec; - /// Remote call response. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct RemoteCallResponse { @@ -59,7 +56,7 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { - use super::{RemoteCallResponse, RemoteReadResponse, Transactions}; + use super::{RemoteCallResponse, RemoteReadResponse}; use bitflags::bitflags; use codec::{Decode, Encode, Input, Output}; use sc_client_api::StorageProof; @@ -146,9 +143,10 @@ pub mod generic { BlockResponse(BlockResponse), /// Block announce. BlockAnnounce(BlockAnnounce

), - /// Transactions. - Transactions(Transactions), /// Consensus protocol message. + // NOTE: index is incremented by 1 due to transaction-related + // message that was removed + #[codec(index = 6)] Consensus(ConsensusMessage), /// Remote method call request. RemoteCallRequest(RemoteCallRequest), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index dceb57d9e695c..180482e75ece2 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -29,9 +29,8 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, - config::{Params, TransportConfig}, + config::Params, discovery::DiscoveryConfig, - error::Error, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, @@ -39,7 +38,7 @@ use crate::{ self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready, }, - transactions, transport, ExHashT, ReputationChange, + transport, ReputationChange, }; use codec::Encode as _; @@ -60,7 +59,8 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ - config::MultiaddrWithPeerId, + config::{MultiaddrWithPeerId, TransportConfig}, + error::Error, protocol::{ event::{DhtEvent, Event}, ProtocolName, @@ -73,6 +73,7 @@ use sc_network_common::{ NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, sync::{SyncState, SyncStatus}, + ExHashT, }; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -101,7 +102,7 @@ mod out_events; mod tests; pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; -use sc_network_common::service::{NetworkBlock, NetworkRequest, NetworkTransaction}; +use sc_network_common::service::{NetworkBlock, NetworkRequest}; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -121,7 +122,7 @@ pub struct NetworkService { /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: TracingUnboundedSender>, + to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, @@ -144,7 +145,7 @@ where /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(mut params: Params) -> Result { + pub fn new(mut params: Params) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -215,21 +216,6 @@ where fs::create_dir_all(path)?; } - let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( - params.protocol_id.clone(), - params - .chain - .hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - params.fork_id.clone(), - ); - params - .network_config - .extra_sets - .insert(0, transactions_handler_proto.set_config()); - info!( target: "sub-libp2p", "🏷 Local node identity is: {}", @@ -244,11 +230,8 @@ where params.protocol_id.clone(), ¶ms.fork_id, ¶ms.network_config, - iter::once(Vec::new()) - .chain( - (0..params.network_config.extra_sets.len() - 1) - .map(|_| default_notif_handshake_message.clone()), - ) + (0..params.network_config.extra_sets.len()) + .map(|_| default_notif_handshake_message.clone()) .collect(), params.metrics_registry.as_ref(), params.chain_sync, @@ -465,13 +448,6 @@ where _marker: PhantomData, }); - let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( - service.clone(), - params.transaction_pool, - params.metrics_registry.as_ref(), - )?; - (params.transactions_handler_executor)(tx_handler.run().boxed()); - Ok(NetworkWorker { external_addresses, num_connected, @@ -482,9 +458,9 @@ where from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, - tx_handler_controller, metrics, boot_node_ids, + _marker: Default::default(), }) } @@ -1149,20 +1125,6 @@ where } } -impl NetworkTransaction for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); - } - - fn propagate_transaction(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); - } -} - impl NetworkBlock> for NetworkService where B: BlockT + 'static, @@ -1249,9 +1211,7 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. -enum ServiceToWorkerMsg { - PropagateTransaction(H), - PropagateTransactions, +enum ServiceToWorkerMsg { RequestJustification(B::Hash, NumberFor), ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), @@ -1309,7 +1269,7 @@ where /// The import queue that was passed at initialization. import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. - from_service: TracingUnboundedReceiver>, + from_service: TracingUnboundedReceiver>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. @@ -1319,8 +1279,9 @@ where /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc>>, - /// Controller for the handler of incoming and outgoing transactions. - tx_handler_controller: transactions::TransactionsHandlerController, + /// Marker to pin the `H` generic. Serves no purpose except to not break backwards + /// compatibility. + _marker: PhantomData, } impl Future for NetworkWorker @@ -1376,10 +1337,6 @@ where .behaviour_mut() .user_protocol_mut() .clear_justification_requests(), - ServiceToWorkerMsg::PropagateTransaction(hash) => - this.tx_handler_controller.propagate_transaction(hash), - ServiceToWorkerMsg::PropagateTransactions => - this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1922,8 +1879,6 @@ where SyncState::Downloading => true, }; - this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); - this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index a9505c5341c3d..c8f137f79c6dc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -21,7 +21,7 @@ use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, protocol::event::Event, service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, }; @@ -135,12 +135,8 @@ fn build_test_full_node( let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), network_config, chain: client.clone(), - transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, fork_id, import_queue, @@ -178,23 +174,23 @@ fn build_nodes_one_proto() -> ( let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), @@ -203,7 +199,7 @@ fn build_nodes_one_proto() -> ( }, }], listen_addresses: vec![], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -368,13 +364,13 @@ fn lots_of_incoming_peers_works() { let (main_node, _) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, + set_config: SetConfig { in_peers: u32::MAX, ..Default::default() }, }], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -387,11 +383,11 @@ fn lots_of_incoming_peers_works() { for _ in 0..32 { let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![], - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), peer_id: main_node_peer_id, @@ -399,7 +395,7 @@ fn lots_of_incoming_peers_works() { ..Default::default() }, }], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -504,23 +500,23 @@ fn fallback_name_working() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: NEW_PROTOCOL_NAME.into(), fallback_names: vec![PROTOCOL_NAME.into()], max_notification_size: 1024 * 1024, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), @@ -529,7 +525,7 @@ fn fallback_name_working() { }, }], listen_addresses: vec![], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -572,7 +568,7 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -599,7 +595,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, boot_nodes: vec![boot_node], ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); @@ -632,11 +628,8 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, - default_peers_set: config::SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, + transport: TransportConfig::MemoryOnly, + default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -652,10 +645,7 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - default_peers_set: config::SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, + default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -668,7 +658,7 @@ fn ensure_public_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, public_addresses: vec![public_address], ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index e78b91a4e04ee..2f6b788e368b3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -47,16 +47,14 @@ use sc_consensus::{ ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, Verifier, }; -pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - config::{ - NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, SyncMode, - TransportConfig, - }, + config::{NetworkConfiguration, Role, SyncMode}, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{ + MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, + }, protocol::ProtocolName, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, @@ -879,12 +877,8 @@ where let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), network_config, chain: client.clone(), - transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, fork_id, import_queue, diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml new file mode 100644 index 0000000000000..5578bb2c7191e --- /dev/null +++ b/client/network/transactions/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "Substrate transaction protocol" +name = "sc-network-transactions" +version = "0.10.0-dev" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2021" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network-transactions" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +array-bytes = "4.1" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +futures = "0.3.21" +hex = "0.4.0" +libp2p = "0.46.1" +log = "0.4.17" +pin-project = "1.0.10" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/config.rs b/client/network/transactions/src/config.rs new file mode 100644 index 0000000000000..abb8cccd301ac --- /dev/null +++ b/client/network/transactions/src/config.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Configuration of the transaction protocol + +use futures::prelude::*; +use sc_network_common::ExHashT; +use sp_runtime::traits::Block as BlockT; +use std::{collections::HashMap, future::Future, pin::Pin, time}; + +/// Interval at which we propagate transactions; +pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +pub(crate) const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximum allowed size for a transactions notification. +pub(crate) const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum number of transaction validation request we keep at any moment. +pub(crate) const MAX_PENDING_TRANSACTIONS: usize = 8192; + +/// Result of the transaction import. +#[derive(Clone, Copy, Debug)] +pub enum TransactionImport { + /// Transaction is good but already known by the transaction pool. + KnownGood, + /// Transaction is good and not yet known. + NewGood, + /// Transaction is invalid. + Bad, + /// Transaction import was not performed. + None, +} + +/// Future resolving to transaction import result. +pub type TransactionImportFuture = Pin + Send>>; + +/// Transaction pool interface +pub trait TransactionPool: Send + Sync { + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Get hash of transaction. + fn hash_of(&self, transaction: &B::Extrinsic) -> H; + /// Import a transaction into the pool. + /// + /// This will return future. + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); + /// Get transaction by hash. + fn transaction(&self, hash: &H) -> Option; +} + +/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always +/// empty and discards all incoming transactions. +/// +/// Requires the "hash" type to implement the `Default` trait. +/// +/// Useful for testing purposes. +pub struct EmptyTransactionPool; + +impl TransactionPool for EmptyTransactionPool { + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + Vec::new() + } + + fn hash_of(&self, _transaction: &B::Extrinsic) -> H { + Default::default() + } + + fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { + Box::pin(future::ready(TransactionImport::KnownGood)) + } + + fn on_broadcasted(&self, _: HashMap>) {} + + fn transaction(&self, _h: &H) -> Option { + None + } +} diff --git a/client/network/src/transactions.rs b/client/network/transactions/src/lib.rs similarity index 84% rename from client/network/src/transactions.rs rename to client/network/transactions/src/lib.rs index da4547aefeab3..b75bd411b39c4 100644 --- a/client/network/src/transactions.rs +++ b/client/network/transactions/src/lib.rs @@ -26,27 +26,22 @@ //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. -use crate::{ - config::{self, TransactionImport, TransactionImportFuture, TransactionPool}, - error, - protocol::message, - service::NetworkService, - utils::{interval, LruHashSet}, - ExHashT, -}; - +use crate::config::*; use codec::{Decode, Encode}; use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::{ - config::ProtocolId, + config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, + error, protocol::{ event::{Event, ObservedRole}, ProtocolName, }, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, + utils::{interval, LruHashSet}, + ExHashT, }; use sp_runtime::traits::Block as BlockT; use std::{ @@ -54,27 +49,14 @@ use std::{ iter, num::NonZeroUsize, pin::Pin, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, + sync::Arc, task::Poll, - time, }; -/// Interval at which we propagate transactions; -const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); - -/// Maximum number of known transaction hashes to keep for a peer. -/// -/// This should be approx. 2 blocks full of transactions for the network to function properly. -const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. +pub mod config; -/// Maximum allowed size for a transactions notification. -const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; - -/// Maximum number of transaction validation request we keep at any moment. -const MAX_PENDING_TRANSACTIONS: usize = 8192; +/// A set of transactions. +pub type Transactions = Vec; mod rep { use sc_peerset::ReputationChange as Rep; @@ -141,7 +123,7 @@ impl TransactionsHandlerPrototype { pub fn new>( protocol_id: ProtocolId, genesis_hash: Hash, - fork_id: Option, + fork_id: Option<&str>, ) -> Self { let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { @@ -158,16 +140,16 @@ impl TransactionsHandlerPrototype { } /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> config::NonDefaultSetConfig { - config::NonDefaultSetConfig { + pub fn set_config(&self) -> NonDefaultSetConfig { + NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, - set_config: config::SetConfig { + set_config: SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: config::NonReservedPeerMode::Deny, + non_reserved_mode: NonReservedPeerMode::Deny, }, } } @@ -176,23 +158,25 @@ impl TransactionsHandlerPrototype { /// the behaviour of the handler while it's running. /// /// Important: the transactions handler is initially disabled and doesn't gossip transactions. - /// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it. - pub fn build( + /// Gossiping is enabled when major syncing is done. + pub fn build< + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, + >( self, - service: Arc>, + service: S, transaction_pool: Arc>, metrics_registry: Option<&Registry>, - ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { let event_stream = service.event_stream("transactions-handler"); let (to_handler, from_controller) = mpsc::unbounded(); - let gossip_enabled = Arc::new(AtomicBool::new(false)); let handler = TransactionsHandler { protocol_name: self.protocol_name, propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), - gossip_enabled: gossip_enabled.clone(), service, event_stream, peers: HashMap::new(), @@ -205,7 +189,7 @@ impl TransactionsHandlerPrototype { }, }; - let controller = TransactionsHandlerController { to_handler, gossip_enabled }; + let controller = TransactionsHandlerController { to_handler }; Ok((handler, controller)) } @@ -214,15 +198,9 @@ impl TransactionsHandlerPrototype { /// Controls the behaviour of a [`TransactionsHandler`] it is connected to. pub struct TransactionsHandlerController { to_handler: mpsc::UnboundedSender>, - gossip_enabled: Arc, } impl TransactionsHandlerController { - /// Controls whether transactions are being gossiped on the network. - pub fn set_gossip_enabled(&mut self, enabled: bool) { - self.gossip_enabled.store(enabled, Ordering::Relaxed); - } - /// You may call this when new transactions are imported by the transaction pool. /// /// All transactions will be fetched from the `TransactionPool` that was passed at @@ -246,7 +224,11 @@ enum ToHandler { } /// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. -pub struct TransactionsHandler { +pub struct TransactionsHandler< + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, +> { protocol_name: ProtocolName, /// Interval at which we call `propagate_transactions`. propagate_timeout: Pin + Send>>, @@ -258,13 +240,12 @@ pub struct TransactionsHandler { /// multiple times concurrently. pending_transactions_peers: HashMap>, /// Network service to use to send messages and manage peers. - service: Arc>, + service: S, /// Stream of networking events. event_stream: Pin + Send>>, // All connected peers peers: HashMap>, transaction_pool: Arc>, - gossip_enabled: Arc, from_controller: mpsc::UnboundedReceiver>, /// Prometheus metrics. metrics: Option, @@ -278,7 +259,12 @@ struct Peer { role: ObservedRole, } -impl TransactionsHandler { +impl TransactionsHandler +where + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, +{ /// Turns the [`TransactionsHandler`] into a future that should run forever and not be /// interrupted. pub async fn run(mut self) { @@ -360,9 +346,9 @@ impl TransactionsHandler { continue } - if let Ok(m) = as Decode>::decode( - &mut message.as_ref(), - ) { + if let Ok(m) = + as Decode>::decode(&mut message.as_ref()) + { self.on_transactions(remote, m); } else { warn!(target: "sub-libp2p", "Failed to decode transactions list"); @@ -376,10 +362,10 @@ impl TransactionsHandler { } /// Called when peer sends us new transactions - fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { - trace!(target: "sync", "{} Ignoring transactions while disabled", who); + fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { + trace!(target: "sync", "{} Ignoring transactions while major syncing", who); return } @@ -428,10 +414,11 @@ impl TransactionsHandler { /// Propagate one transaction. pub fn propagate_transaction(&mut self, hash: &H) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { return } + debug!(target: "sync", "Propagating transaction [{:?}]", hash); if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); @@ -479,10 +466,11 @@ impl TransactionsHandler { /// Call when we must propagate ready transactions to peers. fn propagate_transactions(&mut self) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { return } + debug!(target: "sync", "Propagating transactions"); let transactions = self.transaction_pool.transactions(); let propagated_to = self.do_propagate_transactions(&transactions); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 3c574ef13c8e6..e46c65cf018f5 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -56,6 +56,7 @@ sc-network-bitswap = { version = "0.10.0-dev", path = "../network/bitswap" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } +sc-network-transactions = { version = "0.10.0-dev", path = "../network/transactions" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5a2f4cf978b41..dfd532a14c172 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -40,7 +40,7 @@ use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ - service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, + service::{NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -326,7 +326,6 @@ where pub trait SpawnTaskNetwork: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkTransaction + NetworkStatusProvider + Send + Sync @@ -339,7 +338,6 @@ where Block: BlockT, T: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkTransaction + NetworkStatusProvider + Send + Sync @@ -368,6 +366,9 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub network: Arc>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, + /// Controller for transactions handlers + pub tx_handler_controller: + sc_network_transactions::TransactionsHandlerController<::Hash>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, } @@ -446,6 +447,7 @@ where rpc_builder, network, system_rpc_tx, + tx_handler_controller, telemetry, } = params; @@ -481,7 +483,11 @@ where spawn_handle.spawn( "on-transaction-imported", Some("transaction-pool"), - transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()), + transaction_notifications( + transaction_pool.clone(), + tx_handler_controller, + telemetry.clone(), + ), ); // Prometheus metrics. @@ -544,20 +550,21 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( +async fn transaction_notifications( transaction_pool: Arc, - network: Network, + tx_handler_controller: sc_network_transactions::TransactionsHandlerController< + ::Hash, + >, telemetry: Option, ) where Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, - Network: NetworkTransaction<::Hash> + Send + Sync, { // transaction notifications transaction_pool .import_notification_stream() .for_each(move |hash| { - network.propagate_transaction(hash); + tx_handler_controller.propagate_transaction(hash); let status = transaction_pool.status(); telemetry!( telemetry; @@ -719,6 +726,7 @@ pub fn build_network( ( Arc::Hash>>, TracingUnboundedSender>, + sc_network_transactions::TransactionsHandlerController<::Hash>, NetworkStarter, ), Error, @@ -761,9 +769,6 @@ where } } - let transaction_pool_adapter = - Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }); - let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { @@ -845,7 +850,7 @@ where protocol_config })); - let network_params = sc_network::config::Params { + let mut network_params = sc_network::config::Params { role: config.role.clone(), executor: { let spawn_handle = Clone::clone(&spawn_handle); @@ -853,16 +858,9 @@ where spawn_handle.spawn("libp2p-node", Some("networking"), fut); })) }, - transactions_handler_executor: { - let spawn_handle = Clone::clone(&spawn_handle); - Box::new(move |fut| { - spawn_handle.spawn("network-transactions-handler", Some("networking"), fut); - }) - }, network_config: config.network.clone(), chain: client.clone(), - transaction_pool: transaction_pool_adapter as _, - protocol_id, + protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), @@ -877,10 +875,32 @@ where .collect::>(), }; + // crate transactions protocol and add it to the list of supported protocols of `network_params` + let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + ); + network_params + .network_config + .extra_sets + .insert(0, transactions_handler_proto.set_config()); + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); + let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( + network.clone(), + Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), + config.prometheus_config.as_ref().map(|config| &config.registry), + )?; + spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let future = build_network_future( @@ -928,7 +948,7 @@ where future.await }); - Ok((network, system_rpc_tx, NetworkStarter(network_start_tx))) + Ok((network, system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx))) } /// Object used to start the network. diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 44153e3b914f3..bca0697bcbd08 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -24,13 +24,11 @@ pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; pub use sc_network::{ - config::{ - NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, SetConfig, TransportConfig, - }, + config::{NetworkConfiguration, NodeKeyConfig, Role}, Multiaddr, }; pub use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 0d702c7f37b98..001a83922d776 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -19,7 +19,6 @@ //! Errors that can occur during the service operation. use sc_keystore; -use sc_network; use sp_blockchain; use sp_consensus; @@ -41,7 +40,7 @@ pub enum Error { Consensus(#[from] sp_consensus::Error), #[error(transparent)] - Network(#[from] sc_network::error::Error), + Network(#[from] sc_network_common::error::Error), #[error(transparent)] Keystore(#[from] sc_keystore::Error), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 19358c1e5bc4c..091b4bbe9fe5f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -72,7 +72,7 @@ pub use sc_chain_spec::{ pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use sc_network::config::{TransactionImport, TransactionImportFuture}; +pub use sc_network_transactions::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::{ RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, }; @@ -148,7 +148,7 @@ async fn build_network_future< + Send + Sync + 'static, - H: sc_network::ExHashT, + H: sc_network_common::ExHashT, >( role: Role, mut network: sc_network::NetworkWorker, @@ -415,7 +415,8 @@ where .collect() } -impl sc_network::config::TransactionPool for TransactionPoolAdapter +impl sc_network_transactions::config::TransactionPool + for TransactionPoolAdapter where C: HeaderBackend + BlockBackend diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 23245d46cba10..5d29d34a3cbf2 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -22,12 +22,9 @@ use futures::{task::Poll, Future, TryFutureExt as _}; use log::{debug, info}; use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; -use sc_network::{ - config::{NetworkConfiguration, TransportConfig}, - multiaddr, -}; +use sc_network::{config::NetworkConfiguration, multiaddr}; use sc_network_common::{ - config::MultiaddrWithPeerId, + config::{MultiaddrWithPeerId, TransportConfig}, service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, }; use sc_service::{ From 519fbaae886e2773b37363970433d36cbb47d853 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 27 Sep 2022 08:08:14 +0100 Subject: [PATCH 11/42] =?UTF-8?q?export=20more=20types=20from=20fast-untsa?= =?UTF-8?q?ke=20=F0=9F=A4=A6=E2=80=8D=E2=99=82=EF=B8=8F=20(#12353)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * export more types from fast-untsake 🤦‍♂️ * make non-test * fmt --- frame/fast-unstake/src/lib.rs | 3 ++- frame/fast-unstake/src/types.rs | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 9bfb29f8457fa..7fbac8560ea6c 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -60,7 +60,7 @@ mod tests; // NOTE: enable benchmarking in tests as well. #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -mod types; +pub mod types; pub mod weights; pub const LOG_TARGET: &'static str = "runtime::fast-unstake"; @@ -90,6 +90,7 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; + pub use types::PreventStakingOpsIfUnbonding; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index e8d538dce4802..2ddb8dca27e9e 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -47,7 +47,6 @@ pub struct UnstakeRequest(sp_std::marker::PhantomData); -#[cfg(test)] impl PreventStakingOpsIfUnbonding { pub fn new() -> Self { Self(Default::default()) From 1763ff2273c4649fa969167503951371141a0272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 27 Sep 2022 11:37:45 +0200 Subject: [PATCH 12/42] Fix compilation on 1.66 nightly (#12363) --- primitives/state-machine/src/trie_backend_essence.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index dda7b51ab08c6..cd2a71163e2ee 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -178,7 +178,10 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> R { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); - let recorder = recorder.as_mut().map(|r| r as _); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; let mut cache = self .trie_node_cache @@ -216,7 +219,10 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> (Option, R), ) -> R { let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); - let recorder = recorder.as_mut().map(|r| r as _); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); From edca89177318580878bf11c0d49586ceea23909b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 27 Sep 2022 13:16:30 +0200 Subject: [PATCH 13/42] Relax Slots-based engines from Epochs (#12360) Remove Epochs reference from slots subsystem --- client/consensus/aura/src/lib.rs | 14 ++++++------- client/consensus/babe/src/lib.rs | 12 ++++------- client/consensus/slots/src/lib.rs | 35 +++++++++++++++---------------- 3 files changed, 28 insertions(+), 33 deletions(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index c538200bb315c..a0eed6e35310e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -257,7 +257,7 @@ pub fn build_aura_worker( SyncOracle = SO, JustificationSyncLink = L, Claim = P::Public, - EpochData = Vec>, + AuxData = Vec>, > where B: BlockT, @@ -330,7 +330,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; - type EpochData = Vec>; + type AuxData = Vec>; fn logging_target(&self) -> &'static str { "aura" @@ -340,15 +340,15 @@ where &mut self.block_import } - fn epoch_data( + fn aux_data( &self, header: &B::Header, _slot: Slot, - ) -> Result { + ) -> Result { authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option { Some(epoch_data.len()) } @@ -356,7 +356,7 @@ where &self, _header: &B::Header, slot: Slot, - epoch_data: &Self::EpochData, + epoch_data: &Self::AuxData, ) -> Option { let expected_author = slot_author::

(slot, epoch_data); expected_author.and_then(|p| { @@ -382,7 +382,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - _epoch: Self::EpochData, + _epoch: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index aef4785b7bb81..109e5aade02a7 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -729,7 +729,6 @@ where BS: BackoffAuthoringBlocksStrategy> + Sync, Error: std::error::Error + Send + From + From + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; @@ -737,6 +736,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; + type AuxData = ViableEpochDescriptor, Epoch>; fn logging_target(&self) -> &'static str { "babe" @@ -746,11 +746,7 @@ where &mut self.block_import } - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { + fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { self.epoch_changes .shared_data() .epoch_descriptor_for_child_of( @@ -763,7 +759,7 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { self.epoch_changes .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) @@ -823,7 +819,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, + epoch_descriptor: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7c5d5d4a73bc1..6225bbbda1745 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -101,8 +101,8 @@ pub trait SimpleSlotWorker { /// Data associated with a slot claim. type Claim: Send + Sync + 'static; - /// Epoch data necessary for authoring. - type EpochData: Send + Sync + 'static; + /// Auxiliary data necessary for authoring. + type AuxData: Send + Sync + 'static; /// The logging target to use when logging messages. fn logging_target(&self) -> &'static str; @@ -110,29 +110,28 @@ pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. fn block_import(&mut self) -> &mut Self::BlockImport; - /// Returns the epoch data necessary for authoring. For time-dependent epochs, - /// use the provided slot number as a canonical source of time. - fn epoch_data( + /// Returns the auxiliary data necessary for authoring. + fn aux_data( &self, header: &B::Header, slot: Slot, - ) -> Result; + ) -> Result; - /// Returns the number of authorities given the epoch data. + /// Returns the number of authorities. /// None indicate that the authorities information is incomplete. - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; + fn authorities_len(&self, aux_data: &Self::AuxData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. async fn claim_slot( &self, header: &B::Header, slot: Slot, - epoch_data: &Self::EpochData, + aux_data: &Self::AuxData, ) -> Option; /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we /// need to author blocks or not. - fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} + fn notify_slot(&self, _header: &B::Header, _slot: Slot, _aux_data: &Self::AuxData) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec; @@ -145,7 +144,7 @@ pub trait SimpleSlotWorker { body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - epoch: Self::EpochData, + epoch: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -268,12 +267,12 @@ pub trait SimpleSlotWorker { Delay::new(proposing_remaining_duration) }; - let epoch_data = match self.epoch_data(&slot_info.chain_head, slot) { - Ok(epoch_data) => epoch_data, + let aux_data = match self.aux_data(&slot_info.chain_head, slot) { + Ok(aux_data) => aux_data, Err(err) => { warn!( target: logging_target, - "Unable to fetch epoch data at block {:?}: {}", + "Unable to fetch auxiliary data for block {:?}: {}", slot_info.chain_head.hash(), err, ); @@ -290,9 +289,9 @@ pub trait SimpleSlotWorker { }, }; - self.notify_slot(&slot_info.chain_head, slot, &epoch_data); + self.notify_slot(&slot_info.chain_head, slot, &aux_data); - let authorities_len = self.authorities_len(&epoch_data); + let authorities_len = self.authorities_len(&aux_data); if !self.force_authoring() && self.sync_oracle().is_offline() && @@ -309,7 +308,7 @@ pub trait SimpleSlotWorker { return None } - let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data).await?; + let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?; if self.should_backoff(slot, &slot_info.chain_head) { return None @@ -351,7 +350,7 @@ pub trait SimpleSlotWorker { body.clone(), proposal.storage_changes, claim, - epoch_data, + aux_data, ) .await { From 2a6c314cdce2b7813fbe2af2d21388ff5ededcbe Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 27 Sep 2022 13:44:20 +0200 Subject: [PATCH 14/42] Pallet staking events to named enum (#12342) * Pallet staking events to named enum * fmt * update np staking tests * update remaining events * update benchmarks * Update frame/nomination-pools/test-staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/staking/src/pallet/mod.rs * Update frame/staking/src/pallet/mod.rs * Update frame/staking/src/lib.rs * Update frame/staking/src/pallet/impls.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: parity-processbot <> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- .../nomination-pools/test-staking/src/lib.rs | 109 ++++++++++++------ frame/offences/benchmarking/src/lib.rs | 4 +- frame/staking/src/lib.rs | 4 +- frame/staking/src/pallet/impls.rs | 26 +++-- frame/staking/src/pallet/mod.rs | 60 +++++----- frame/staking/src/slashing.rs | 5 +- frame/staking/src/tests.rs | 55 +++++---- 7 files changed, 165 insertions(+), 98 deletions(-) diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 7d848e98174b4..00e0e40ce33b0 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -41,7 +41,10 @@ fn pool_lifecycle_e2e() { // have the pool nominate. assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 50),]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -56,7 +59,10 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, 10), StakingEvent::Bonded(POOL1_BONDED, 10),] + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] ); assert_eq!( pool_events_since_last_call(), @@ -87,8 +93,8 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, ] ); assert_eq!( @@ -131,7 +137,7 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 20),] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },] ); assert_eq!( pool_events_since_last_call(), @@ -155,7 +161,10 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Chilled(POOL1_BONDED), StakingEvent::Unbonded(POOL1_BONDED, 50),] + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] ); assert_eq!( pool_events_since_last_call(), @@ -169,7 +178,7 @@ fn pool_lifecycle_e2e() { // pools is fully destroyed now. assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 50),] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] ); assert_eq!( pool_events_since_last_call(), @@ -193,7 +202,10 @@ fn pool_slash_e2e() { assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -210,7 +222,10 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, 20), StakingEvent::Bonded(POOL1_BONDED, 20)] + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } + ] ); assert_eq!( pool_events_since_last_call(), @@ -230,8 +245,8 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10) + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 } ] ); assert_eq!( @@ -253,9 +268,9 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, ] ); @@ -278,7 +293,10 @@ fn pool_slash_e2e() { 2, // slash era 2, affects chunks at era 5 onwards. ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -302,7 +320,10 @@ fn pool_slash_e2e() { unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) } ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, 5)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] + ); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] @@ -327,7 +348,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), // a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked - vec![StakingEvent::Withdrawn(POOL1_BONDED, 15 + 10 + 15)] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }] ); // now, finally, we can unbond the depositor further than their current limit. @@ -336,7 +357,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, 10)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }] ); assert_eq!( pool_events_since_last_call(), @@ -361,7 +382,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 10)] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }] ); assert_eq!( pool_events_since_last_call(), @@ -388,7 +409,10 @@ fn pool_slash_proportional() { assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -406,9 +430,9 @@ fn pool_slash_proportional() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Bonded(POOL1_BONDED, bond), - StakingEvent::Bonded(POOL1_BONDED, bond), - StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, ] ); assert_eq!( @@ -428,7 +452,7 @@ fn pool_slash_proportional() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -445,7 +469,7 @@ fn pool_slash_proportional() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -462,7 +486,7 @@ fn pool_slash_proportional() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -486,7 +510,10 @@ fn pool_slash_proportional() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -517,7 +544,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -531,7 +561,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -543,7 +573,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -567,7 +597,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] @@ -590,7 +623,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -604,7 +640,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -616,7 +652,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -640,7 +676,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index c9498214eade4..555ec42882ee1 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -309,13 +309,13 @@ benchmarks! { let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) + ::RuntimeEvent::from(StakingEvent::::Slashed{staker: id, amount: BalanceOf::::from(slash_amount)}) ); let balance_slash = |id| core::iter::once( ::RuntimeEvent::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) ); let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled(id)) + ::RuntimeEvent::from(StakingEvent::::Chilled{stash: id}) ); let balance_deposit = |id, amount: u32| ::RuntimeEvent::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index df568d6b596ba..eb30671d35a57 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -953,7 +953,9 @@ where if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { R::report_offence(reporters, offence) } else { - >::deposit_event(Event::::OldSlashingReportDiscarded(offence_session)); + >::deposit_event(Event::::OldSlashingReportDiscarded { + session_index: offence_session, + }); Ok(()) } } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 399f50aaed865..6da27da362b53 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -181,14 +181,20 @@ impl Pallet { let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; - Self::deposit_event(Event::::PayoutStarted(era, ledger.stash.clone())); + Self::deposit_event(Event::::PayoutStarted { + era_index: era, + validator_stash: ledger.stash.clone(), + }); let mut total_imbalance = PositiveImbalanceOf::::zero(); // We can now make total validator payout: if let Some(imbalance) = Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) { - Self::deposit_event(Event::::Rewarded(ledger.stash, imbalance.peek())); + Self::deposit_event(Event::::Rewarded { + stash: ledger.stash, + amount: imbalance.peek(), + }); total_imbalance.subsume(imbalance); } @@ -208,7 +214,8 @@ impl Pallet { if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - let e = Event::::Rewarded(nominator.who.clone(), imbalance.peek()); + let e = + Event::::Rewarded { stash: nominator.who.clone(), amount: imbalance.peek() }; Self::deposit_event(e); total_imbalance.subsume(imbalance); } @@ -232,7 +239,7 @@ impl Pallet { let chilled_as_validator = Self::do_remove_validator(stash); let chilled_as_nominator = Self::do_remove_nominator(stash); if chilled_as_validator || chilled_as_nominator { - Self::deposit_event(Event::::Chilled(stash.clone())); + Self::deposit_event(Event::::Chilled { stash: stash.clone() }); } } @@ -391,13 +398,18 @@ impl Pallet { let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); let staked = Self::eras_total_stake(&active_era.index); let issuance = T::Currency::total_issuance(); - let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); + let (validator_payout, remainder) = + T::EraPayout::era_payout(staked, issuance, era_duration); - Self::deposit_event(Event::::EraPaid(active_era.index, validator_payout, rest)); + Self::deposit_event(Event::::EraPaid { + era_index: active_era.index, + validator_payout, + remainder, + }); // Set ending era reward. >::insert(&active_era.index, validator_payout); - T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); + T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); // Clear offending validators. >::kill(); diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 4db3870c62d8b..6e97697736223 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -653,39 +653,36 @@ pub mod pallet { pub enum Event { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. - /// \[era_index, validator_payout, remainder\] - EraPaid(EraIndex, BalanceOf, BalanceOf), - /// The nominator has been rewarded by this amount. \[stash, amount\] - Rewarded(T::AccountId, BalanceOf), + EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, + /// The nominator has been rewarded by this amount. + Rewarded { stash: T::AccountId, amount: BalanceOf }, /// One staker (and potentially its nominators) has been slashed by the given amount. - /// \[staker, amount\] - Slashed(T::AccountId, BalanceOf), + Slashed { staker: T::AccountId, amount: BalanceOf }, /// An old slashing report from a prior era was discarded because it could - /// not be processed. \[session_index\] - OldSlashingReportDiscarded(SessionIndex), + /// not be processed. + OldSlashingReportDiscarded { session_index: SessionIndex }, /// A new set of stakers was elected. StakersElected, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. - Bonded(T::AccountId, BalanceOf), - /// An account has unbonded this amount. \[stash, amount\] - Unbonded(T::AccountId, BalanceOf), + Bonded { stash: T::AccountId, amount: BalanceOf }, + /// An account has unbonded this amount. + Unbonded { stash: T::AccountId, amount: BalanceOf }, /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` - /// from the unlocking queue. \[stash, amount\] - Withdrawn(T::AccountId, BalanceOf), - /// A nominator has been kicked from a validator. \[nominator, stash\] - Kicked(T::AccountId, T::AccountId), + /// from the unlocking queue. + Withdrawn { stash: T::AccountId, amount: BalanceOf }, + /// A nominator has been kicked from a validator. + Kicked { nominator: T::AccountId, stash: T::AccountId }, /// The election failed. No new era is planned. StakingElectionFailed, /// An account has stopped participating as either a validator or nominator. - /// \[stash\] - Chilled(T::AccountId), - /// The stakers' rewards are getting paid. \[era_index, validator_stash\] - PayoutStarted(EraIndex, T::AccountId), + Chilled { stash: T::AccountId }, + /// The stakers' rewards are getting paid. + PayoutStarted { era_index: EraIndex, validator_stash: T::AccountId }, /// A validator has set their preferences. - ValidatorPrefsSet(T::AccountId, ValidatorPrefs), + ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, } #[pallet::error] @@ -850,7 +847,7 @@ pub mod pallet { let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); - Self::deposit_event(Event::::Bonded(stash.clone(), value)); + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); let item = StakingLedger { stash, total: value, @@ -911,7 +908,7 @@ pub mod pallet { T::VoterList::on_update(&stash, Self::weight_of(&ledger.stash)).defensive(); } - Self::deposit_event(Event::::Bonded(stash, extra)); + Self::deposit_event(Event::::Bonded { stash, amount: extra }); } Ok(()) } @@ -994,7 +991,7 @@ pub mod pallet { .defensive(); } - Self::deposit_event(Event::::Unbonded(ledger.stash, value)); + Self::deposit_event(Event::::Unbonded { stash: ledger.stash, amount: value }); } Ok(()) } @@ -1050,7 +1047,7 @@ pub mod pallet { if ledger.total < old_total { // Already checked that this won't overflow by entry condition. let value = old_total - ledger.total; - Self::deposit_event(Event::::Withdrawn(stash, value)); + Self::deposit_event(Event::::Withdrawn { stash, amount: value }); } Ok(post_info_weight.into()) @@ -1088,7 +1085,7 @@ pub mod pallet { Self::do_remove_nominator(stash); Self::do_add_validator(stash, prefs.clone()); - Self::deposit_event(Event::::ValidatorPrefsSet(ledger.stash, prefs)); + Self::deposit_event(Event::::ValidatorPrefsSet { stash: ledger.stash, prefs }); Ok(()) } @@ -1471,7 +1468,10 @@ pub mod pallet { // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); - Self::deposit_event(Event::::Bonded(ledger.stash.clone(), rebonded_value)); + Self::deposit_event(Event::::Bonded { + stash: ledger.stash.clone(), + amount: rebonded_value, + }); // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); @@ -1546,10 +1546,10 @@ pub mod pallet { if let Some(ref mut nom) = maybe_nom { if let Some(pos) = nom.targets.iter().position(|v| v == stash) { nom.targets.swap_remove(pos); - Self::deposit_event(Event::::Kicked( - nom_stash.clone(), - stash.clone(), - )); + Self::deposit_event(Event::::Kicked { + nominator: nom_stash.clone(), + stash: stash.clone(), + }); } } }); diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index f3272a25fab5c..a1900136d64fd 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -626,7 +626,10 @@ pub fn do_slash( >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event(super::Event::::Slashed(stash.clone(), value)); + >::deposit_event(super::Event::::Slashed { + staker: stash.clone(), + amount: value, + }); } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 6798a78030f9e..8ec98da99ecb1 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -303,7 +303,11 @@ fn rewards_should_work() { assert_eq!(mock::RewardRemainderUnbalanced::get(), maximum_payout - total_payout_0,); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) + Event::EraPaid { + era_index: 0, + validator_payout: total_payout_0, + remainder: maximum_payout - total_payout_0 + } ); mock::make_all_reward_payment(0); @@ -341,7 +345,11 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid(1, total_payout_1, maximum_payout - total_payout_1) + Event::EraPaid { + era_index: 1, + validator_payout: total_payout_1, + remainder: maximum_payout - total_payout_1 + } ); mock::make_all_reward_payment(1); @@ -1645,7 +1653,7 @@ fn rebond_emits_right_value_in_event() { }) ); // Event emitted should be correct - assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 100 }); // Re-bond way more than available Staking::rebond(RuntimeOrigin::signed(10), 100_000).unwrap(); @@ -1660,7 +1668,7 @@ fn rebond_emits_right_value_in_event() { }) ); // Event emitted should be correct, only 800 - assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 800)); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 800 }); }); } @@ -2870,9 +2878,9 @@ fn deferred_slashes_are_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 11075, 33225), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 } ] ); }) @@ -2901,9 +2909,9 @@ fn retroactive_deferred_slashes_two_eras_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 7100, 21300), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 3, validator_payout: 7100, remainder: 21300 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, ] ); }) @@ -2934,7 +2942,10 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(4); assert_eq!( staking_events_since_last_call(), - vec![Event::StakersElected, Event::EraPaid(3, 11075, 33225)] + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 } + ] ); assert_eq!(Staking::ledger(10).unwrap().total, 1000); @@ -2944,9 +2955,9 @@ fn retroactive_deferred_slashes_one_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(4, 11075, 33225), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 4, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 } ] ); @@ -3090,9 +3101,9 @@ fn remove_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 11075, 33225), - Event::Slashed(11, 50), - Event::Slashed(101, 7) + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 50 }, + Event::Slashed { staker: 101, amount: 7 } ] ); @@ -4057,7 +4068,7 @@ fn offences_weight_calculated_correctly() { &one_offender, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed + DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight ); @@ -4955,10 +4966,10 @@ fn min_commission_works() { // event emitted should be correct assert_eq!( *staking_events().last().unwrap(), - Event::ValidatorPrefsSet( - 11, - ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } - ) + Event::ValidatorPrefsSet { + stash: 11, + prefs: ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + } ); assert_ok!(Staking::set_staking_configs( From 74daaf1eb23686991a40c6cc361940421322472b Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Tue, 27 Sep 2022 17:44:16 +0200 Subject: [PATCH 15/42] [fix] Bound staking ledger correctly with MaxUnlockingChunks from configuration (#12343) * used maxunlockingchunks from config * mhl MaxUnlockingChunks * no migration needed * changes as per requested * fmt * fix tests * fix benchmark * warning in the doc for abrupt changes in the config * less unnecessary details in the test * fix tests Co-authored-by: mrisholukamba Co-authored-by: parity-processbot <> --- frame/staking/src/benchmarking.rs | 4 +- frame/staking/src/lib.rs | 7 +--- frame/staking/src/mock.rs | 3 +- frame/staking/src/pallet/mod.rs | 27 +++++++++----- frame/staking/src/tests.rs | 61 +++++++++++++++++++++++++++++-- 5 files changed, 80 insertions(+), 22 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 1ea05bba3b579..c7e6936ac75d8 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -613,7 +613,7 @@ benchmarks! { } rebond { - let l in 1 .. MaxUnlockingChunks::get() as u32; + let l in 1 .. T::MaxUnlockingChunks::get() as u32; // clean up any existing state. clear_validators_and_nominators::(); @@ -764,7 +764,7 @@ benchmarks! { #[extra] do_slash { - let l in 1 .. MaxUnlockingChunks::get() as u32; + let l in 1 .. T::MaxUnlockingChunks::get() as u32; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index eb30671d35a57..a0144463540be 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -301,7 +301,6 @@ mod pallet; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ - parameter_types, traits::{Currency, Defensive, Get}, weights::Weight, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, @@ -349,10 +348,6 @@ type NegativeImbalanceOf = <::Currency as Currency< type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -parameter_types! { - pub MaxUnlockingChunks: u32 = 32; -} - /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ActiveEraInfo { @@ -465,7 +460,7 @@ pub struct StakingLedger { /// Any balance that is becoming free, which may eventually be transferred out of the stash /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first /// in, first out queue where the new (higher value) eras get pushed on the back. - pub unlocking: BoundedVec>, MaxUnlockingChunks>, + pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. pub claimed_rewards: BoundedVec, diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 385087f9bec41..3a9351ef4a271 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -237,6 +237,7 @@ parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static MaxNominations: u32 = 16; pub static HistoryDepth: u32 = 80; + pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } @@ -301,7 +302,7 @@ impl crate::pallet::pallet::Config for Test { // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. type VoterList = VoterBagsList; type TargetList = UseValidatorsMap; - type MaxUnlockingChunks = ConstU32<32>; + type MaxUnlockingChunks = MaxUnlockingChunks; type HistoryDepth = HistoryDepth; type OnStakerSlash = OnStakerSlashMock; type BenchmarkingConfig = TestBenchmarkingConfig; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 6e97697736223..560c3b6ed830c 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -43,9 +43,9 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, - PositiveImbalanceOf, Releases, RewardDestination, SessionInterface, StakingLedger, - UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, + Releases, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, }; const STAKING_ID: LockIdentifier = *b"staking "; @@ -142,8 +142,9 @@ pub mod pallet { /// /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` /// item `StakingLedger.claimed_rewards`. Setting this value lower than - /// the existing value can lead to inconsistencies and will need to be - /// handled properly in a migration. + /// the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a migration. + /// The test `reducing_history_depth_abrupt` shows this effect. #[pallet::constant] type HistoryDepth: Get; @@ -237,8 +238,16 @@ pub mod pallet { /// VALIDATOR. type TargetList: SortedListProvider>; - /// The maximum number of `unlocking` chunks a [`StakingLedger`] can have. Effectively - /// determines how many unique eras a staker may be unbonding in. + /// The maximum number of `unlocking` chunks a [`StakingLedger`] can + /// have. Effectively determines how many unique eras a staker may be + /// unbonding in. + /// + /// Note: `MaxUnlockingChunks` is used as the upper bound for the + /// `BoundedVec` item `StakingLedger.unlocking`. Setting this value + /// lower than the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a runtime + /// migration. The test `reducing_max_unlocking_chunks_abrupt` shows + /// this effect. #[pallet::constant] type MaxUnlockingChunks: Get; @@ -940,7 +949,7 @@ pub mod pallet { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( - ledger.unlocking.len() < MaxUnlockingChunks::get() as usize, + ledger.unlocking.len() < T::MaxUnlockingChunks::get() as usize, Error::::NoMoreChunks, ); @@ -1454,7 +1463,7 @@ pub mod pallet { /// - Bounded by `MaxUnlockingChunks`. /// - Storage changes: Can't increase storage, only decrease it. /// # - #[pallet::weight(T::WeightInfo::rebond(MaxUnlockingChunks::get() as u32))] + #[pallet::weight(T::WeightInfo::rebond(T::MaxUnlockingChunks::get() as u32))] pub fn rebond( origin: OriginFor, #[pallet::compact] value: BalanceOf, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 8ec98da99ecb1..4812c105c0d80 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -17,7 +17,7 @@ //! Tests for the module. -use super::{ConfigOp, Event, MaxUnlockingChunks, *}; +use super::{ConfigOp, Event, *}; use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, bounded_vec, @@ -1354,7 +1354,8 @@ fn too_many_unbond_calls_should_not_work() { ExtBuilder::default().build_and_execute(|| { let mut current_era = 0; // locked at era MaxUnlockingChunks - 1 until 3 - for i in 0..MaxUnlockingChunks::get() - 1 { + + for i in 0..<::MaxUnlockingChunks as Get>::get() - 1 { // There is only 1 chunk per era, so we need to be in a new era to create a chunk. current_era = i as u32; mock::start_active_era(current_era); @@ -1369,7 +1370,7 @@ fn too_many_unbond_calls_should_not_work() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); assert_eq!( Staking::ledger(&10).unwrap().unlocking.len(), - MaxUnlockingChunks::get() as usize + <::MaxUnlockingChunks as Get>::get() as usize ); // can't do more. assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); @@ -5494,7 +5495,7 @@ fn pre_bonding_era_cannot_be_claimed() { } #[test] -fn reducing_history_depth_without_migration() { +fn reducing_history_depth_abrupt() { // Verifies initial conditions of mock ExtBuilder::default().nominate(false).build_and_execute(|| { let original_history_depth = HistoryDepth::get(); @@ -5571,3 +5572,55 @@ fn reducing_history_depth_without_migration() { HistoryDepth::set(original_history_depth); }); } + +#[test] +fn reducing_max_unlocking_chunks_abrupt() { + // Concern is on validators only + // By Default 11, 10 are stash and ctrl and 21,20 + ExtBuilder::default().build_and_execute(|| { + // given a staker at era=10 and MaxUnlockChunks set to 2 + MaxUnlockingChunks::set(2); + start_active_era(10); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 300, RewardDestination::Staked)); + assert!(matches!(Staking::ledger(4), Some(_))); + + // when staker unbonds + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 20)); + + // then an unlocking chunk is added at `current_era + bonding_duration` + // => 10 + 3 = 13 + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20 as Balance, era: 13 as EraIndex }]; + assert!(matches!(Staking::ledger(4), + Some(StakingLedger { + unlocking, + .. + }) if unlocking==expected_unlocking)); + + // when staker unbonds at next era + start_active_era(11); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 50)); + // then another unlock chunk is added + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20, era: 13 }, UnlockChunk { value: 50, era: 14 }]; + assert!(matches!(Staking::ledger(4), + Some(StakingLedger { + unlocking, + .. + }) if unlocking==expected_unlocking)); + + // when staker unbonds further + start_active_era(12); + // then further unbonding not possible + assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NoMoreChunks); + + // when max unlocking chunks is reduced abruptly to a low value + MaxUnlockingChunks::set(1); + // then unbond, rebond ops are blocked with ledger in corrupt state + assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NotController); + assert_noop!(Staking::rebond(RuntimeOrigin::signed(4), 100), Error::::NotController); + + // reset the ledger corruption + MaxUnlockingChunks::set(2); + }) +} From 94b9646177430adb74d7e4737c98ba333f91c451 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 27 Sep 2022 19:31:12 +0200 Subject: [PATCH 16/42] [Feature] Add deposit to fast-unstake (#12366) * [Feature] Add deposit to fast-unstake * disable on ErasToCheckPerBlock == 0 * removed signed ext * remove obsolete import * remove some obsolete stuff * fix some comments * fixed all the comments * remove obsolete imports * fix some tests * CallNotAllowed tests * Update frame/fast-unstake/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix tests * fix deregister + tests * more fixes * make sure we go above existential deposit * fixed the last test * some nit fixes * fix node * fix bench * last bench fix * Update frame/fast-unstake/src/lib.rs * ".git/.scripts/fmt.sh" 1 Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: command-bot <> --- bin/node/runtime/src/lib.rs | 3 +- frame/fast-unstake/src/benchmarking.rs | 8 +- frame/fast-unstake/src/lib.rs | 105 +++++--- frame/fast-unstake/src/mock.rs | 19 +- frame/fast-unstake/src/tests.rs | 333 ++++++++++++++++--------- frame/fast-unstake/src/types.rs | 83 +----- 6 files changed, 313 insertions(+), 238 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8ed5f1c847f5e..aa1a525bf095c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -581,8 +581,9 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SlashPerEra = ConstU128<{ DOLLARS }>; type ControlOrigin = frame_system::EnsureRoot; + type Deposit = ConstU128<{ DOLLARS }>; + type DepositCurrency = Balances; type WeightInfo = (); } diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 5690d5ce6f29f..8770cc6b64c0d 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -110,18 +110,18 @@ fn on_idle_full_block() { benchmarks! { // on_idle, we we don't check anyone, but fully unbond and move them to another pool. on_idle_unstake { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), )); - ErasToCheckPerBlock::::put(1); // run on_idle once. This will check era 0. assert_eq!(Head::::get(), None); on_idle_full_block::(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap() }) + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), deposit: T::Deposit::get() }) ); } : { @@ -162,7 +162,7 @@ benchmarks! { let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked }) + Some(UnstakeRequest { stash: who.clone(), checked, deposit: T::Deposit::get() }) ); assert!(matches!( fast_unstake_events::().last(), @@ -171,6 +171,7 @@ benchmarks! { } register_fast_unstake { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); @@ -182,6 +183,7 @@ benchmarks! { } deregister { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 7fbac8560ea6c..ed26d6b436e1d 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -81,7 +81,10 @@ pub mod pallet { use super::*; use crate::types::*; use frame_election_provider_support::ElectionProvider; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::*, + traits::{Defensive, ReservableCurrency}, + }; use frame_system::{pallet_prelude::*, RawOrigin}; use pallet_staking::Pallet as Staking; use sp_runtime::{ @@ -90,7 +93,6 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; - pub use types::PreventStakingOpsIfUnbonding; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] @@ -113,10 +115,12 @@ pub mod pallet { + IsType<::RuntimeEvent> + TryInto>; - /// The amount of balance slashed per each era that was wastefully checked. - /// - /// A reasonable value could be `runtime_weight_to_fee(weight_per_era_check)`. - type SlashPerEra: Get>; + /// The currency used for deposits. + type DepositCurrency: ReservableCurrency>; + + /// Deposit to take for unstaking, to make sure we're able to slash the it in order to cover + /// the costs of resources on unsuccessful unstake. + type Deposit: Get>; /// The origin that can control this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; @@ -128,13 +132,13 @@ pub mod pallet { /// The current "head of the queue" being unstaked. #[pallet::storage] pub type Head = - StorageValue<_, UnstakeRequest>, OptionQuery>; + StorageValue<_, UnstakeRequest, BalanceOf>, OptionQuery>; /// The map of all accounts wishing to be unstaked. /// - /// Keeps track of `AccountId` wishing to unstake. + /// Keeps track of `AccountId` wishing to unstake and it's corresponding deposit. #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, BalanceOf>; /// Number of eras to check per block. /// @@ -177,6 +181,8 @@ pub mod pallet { NotQueued, /// The provided un-staker is already in Head, and cannot deregister. AlreadyHead, + /// The call is not allowed at this point because the pallet is not active. + CallNotAllowed, } #[pallet::hooks] @@ -214,6 +220,8 @@ pub mod pallet { pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; + ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + let ledger = pallet_staking::Ledger::::get(&ctrl).ok_or(Error::::NotController)?; ensure!(!Queue::::contains_key(&ledger.stash), Error::::AlreadyQueued); @@ -231,8 +239,10 @@ pub mod pallet { Staking::::chill(RawOrigin::Signed(ctrl.clone()).into())?; Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; + T::DepositCurrency::reserve(&ledger.stash, T::Deposit::get())?; + // enqueue them. - Queue::::insert(ledger.stash, ()); + Queue::::insert(ledger.stash, T::Deposit::get()); Ok(()) } @@ -246,6 +256,9 @@ pub mod pallet { #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; + + ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + let stash = pallet_staking::Ledger::::get(&ctrl) .map(|l| l.stash) .ok_or(Error::::NotController)?; @@ -254,7 +267,17 @@ pub mod pallet { Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != stash), Error::::AlreadyHead ); - Queue::::remove(stash); + let deposit = Queue::::take(stash.clone()); + + if let Some(deposit) = deposit.defensive() { + let remaining = T::DepositCurrency::unreserve(&stash, deposit); + if !remaining.is_zero() { + frame_support::defensive!("`not enough balance to unreserve`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } + } + Ok(()) } @@ -315,18 +338,23 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked } = match Head::::take().or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, _)| UnstakeRequest { stash, checked: Default::default() }) - .next() - }) { - None => { - // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4) - }, - Some(head) => head, - }; + let UnstakeRequest { stash, mut checked, deposit } = + match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, deposit)| UnstakeRequest { + stash, + deposit, + checked: Default::default(), + }) + .next() + }) { + None => { + // There's no `Head` and nothing in the `Queue`, nothing to do here. + return T::DbWeight::get().reads(4) + }, + Some(head) => head, + }; log!( debug, @@ -381,9 +409,16 @@ pub mod pallet { num_slashing_spans, ); - log!(info, "unstaked {:?}, outcome: {:?}", stash, result); + let remaining = T::DepositCurrency::unreserve(&stash, deposit); + if !remaining.is_zero() { + frame_support::defensive!("`not enough balance to unreserve`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } else { + log!(info, "unstaked {:?}, outcome: {:?}", stash, result); + Self::deposit_event(Event::::Unstaked { stash, result }); + } - Self::deposit_event(Event::::Unstaked { stash, result }); ::WeightInfo::on_idle_unstake() } else { // eras remaining to be checked. @@ -406,22 +441,18 @@ pub mod pallet { // the last 28 eras, have registered yourself to be unstaked, midway being checked, // you are exposed. if is_exposed { - let amount = T::SlashPerEra::get() - .saturating_mul(eras_checked.saturating_add(checked.len() as u32).into()); - pallet_staking::slashing::do_slash::( - &stash, - amount, - &mut Default::default(), - &mut Default::default(), - current_era, - ); - log!(info, "slashed {:?} by {:?}", stash, amount); - Self::deposit_event(Event::::Slashed { stash, amount }); + T::DepositCurrency::slash_reserved(&stash, deposit); + log!(info, "slashed {:?} by {:?}", stash, deposit); + Self::deposit_event(Event::::Slashed { stash, amount: deposit }); } else { // Not exposed in these eras. match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => { - Head::::put(UnstakeRequest { stash: stash.clone(), checked }); + Head::::put(UnstakeRequest { + stash: stash.clone(), + checked, + deposit, + }); Self::deposit_event(Event::::Checking { stash, eras: unchecked_eras_to_check, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 62f343709e245..4c4c5f9ff26fd 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -164,12 +164,13 @@ impl Convert for U256ToBalance { } parameter_types! { - pub static SlashPerEra: u32 = 100; + pub static DepositAmount: u128 = 7; } impl fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SlashPerEra = SlashPerEra; + type Deposit = DepositAmount; + type DepositCurrency = Balances; type ControlOrigin = frame_system::EnsureRoot; type WeightInfo = (); } @@ -213,11 +214,11 @@ impl Default for ExtBuilder { fn default() -> Self { Self { exposed_nominators: vec![ - (1, 2, 100), - (3, 4, 100), - (5, 6, 100), - (7, 8, 100), - (9, 10, 100), + (1, 2, 7 + 100), + (3, 4, 7 + 100), + (5, 6, 7 + 100), + (7, 8, 7 + 100), + (9, 10, 7 + 100), ], } } @@ -270,8 +271,8 @@ impl ExtBuilder { .into_iter() .map(|(_, ctrl, balance)| (ctrl, balance * 2)), ) - .chain(validators_range.clone().map(|x| (x, 100))) - .chain(nominators_range.clone().map(|x| (x, 100))) + .chain(validators_range.clone().map(|x| (x, 7 + 100))) + .chain(nominators_range.clone().map(|x| (x, 7 + 100))) .collect::>(), } .assimilate_storage(&mut storage); diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index 5586443ce797c..6e617fd992028 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -35,6 +35,7 @@ fn test_setup_works() { #[test] fn register_works() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Ensure stash is in the queue. @@ -42,9 +43,38 @@ fn register_works() { }); } +#[test] +fn register_insufficient_funds_fails() { + use pallet_balances::Error as BalancesError; + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + ::DepositCurrency::make_free_balance_be(&1, 3); + + // Controller account registers for fast unstake. + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), + BalancesError::::InsufficientBalance, + ); + + // Ensure stash is in the queue. + assert_eq!(Queue::::get(1), None); + }); +} + +#[test] +fn register_disabled_fails() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), + Error::::CallNotAllowed + ); + }); +} + #[test] fn cannot_register_if_not_bonded() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Mint accounts 1 and 2 with 200 tokens. for _ in 1..2 { let _ = Balances::make_free_balance_be(&1, 200); @@ -60,8 +90,9 @@ fn cannot_register_if_not_bonded() { #[test] fn cannot_register_if_in_queue() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Insert some Queue item - Queue::::insert(1, ()); + Queue::::insert(1, 10); // Cannot re-register, already in queue assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), @@ -73,8 +104,13 @@ fn cannot_register_if_in_queue() { #[test] fn cannot_register_if_head() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Insert some Head item for stash - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); + Head::::put(UnstakeRequest { + stash: 1, + checked: bounded_vec![], + deposit: DepositAmount::get(), + }); // Controller attempts to regsiter assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), @@ -86,6 +122,7 @@ fn cannot_register_if_head() { #[test] fn cannot_register_if_has_unlocking_chunks() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Start unbonding half of staked tokens assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); // Cannot register for fast unstake with unlock chunks active @@ -99,18 +136,37 @@ fn cannot_register_if_has_unlocking_chunks() { #[test] fn deregister_works() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + // Ensure stash no longer exists in the queue. assert_eq!(Queue::::get(1), None); }); } +#[test] +fn deregister_disabled_fails() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + ErasToCheckPerBlock::::put(0); + assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::CallNotAllowed); + }); +} + #[test] fn cannot_deregister_if_not_controller() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Stash tries to deregister. @@ -121,6 +177,7 @@ fn cannot_deregister_if_not_controller() { #[test] fn cannot_deregister_if_not_queued() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller tries to deregister without first registering assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::NotQueued); }); @@ -129,10 +186,15 @@ fn cannot_deregister_if_not_queued() { #[test] fn cannot_deregister_already_head() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller attempts to register, should fail assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); + Head::::put(UnstakeRequest { + stash: 1, + checked: bounded_vec![], + deposit: DepositAmount::get(), + }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); }); @@ -165,14 +227,14 @@ mod on_idle { // set up Queue item assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); }); } @@ -185,7 +247,7 @@ mod on_idle { // given assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -204,7 +266,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); // when: another 1 era. @@ -220,7 +286,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // when: then 5 eras, we only need 2 more. @@ -242,7 +312,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: not enough weight to unstake: @@ -254,7 +328,11 @@ mod on_idle { assert_eq!(fast_unstake_events_since_last_call(), vec![]); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: enough weight to get over at least one iteration: then we are unblocked and can @@ -285,12 +363,16 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); + assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -300,7 +382,11 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); assert_eq!(Queue::::count(), 4); @@ -317,10 +403,16 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 5, checked: bounded_vec![3, 2, 1, 0] }), + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 5, + checked: bounded_vec![3, 2, 1, 0] + }), ); assert_eq!(Queue::::count(), 3); + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!( fast_unstake_events_since_last_call(), vec![ @@ -340,9 +432,9 @@ mod on_idle { // register multi accounts for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_eq!(Queue::::get(3), Some(())); + assert_eq!(Queue::::get(3), Some(DepositAmount::get())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -391,7 +483,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -402,7 +494,11 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -425,9 +521,11 @@ mod on_idle { ErasToCheckPerBlock::::put(BondingDuration::get() + 1); CurrentEra::::put(BondingDuration::get()); + Balances::make_free_balance_be(&2, 100); + // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -438,7 +536,11 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -464,7 +566,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -475,28 +577,44 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -529,30 +647,46 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: a new era happens right before one is free. @@ -567,6 +701,7 @@ mod on_idle { stash: 1, // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], + deposit: DepositAmount::get(), }) ); @@ -602,13 +737,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // when @@ -618,13 +761,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // then we register a new era. @@ -636,14 +787,22 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 4] + }) ); // progress to end next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 4, 1] + }) ); // but notice that we don't care about era 0 instead anymore! we're done. @@ -669,7 +828,6 @@ mod on_idle { fn exposed_nominator_cannot_unstake() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(1); - SlashPerEra::set(7); CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 @@ -686,6 +844,7 @@ mod on_idle { )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); + Balances::make_free_balance_be(&exposed, 100_000); // register the exposed one. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); @@ -693,23 +852,30 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!(Head::::get(), None); assert_eq!( fast_unstake_events_since_last_call(), - // we slash them by 21, since we checked 3 eras in total (3, 2, 1). vec![ Event::Checking { stash: exposed, eras: vec![3] }, Event::Checking { stash: exposed, eras: vec![2] }, - Event::Slashed { stash: exposed, amount: 3 * 7 } + Event::Slashed { stash: exposed, amount: DepositAmount::get() } ] ); }); @@ -721,7 +887,6 @@ mod on_idle { // same as the previous check, but we check 2 eras per block, and we make the exposed be // exposed in era 0, so that it is detected halfway in a check era. ErasToCheckPerBlock::::put(2); - SlashPerEra::set(7); CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 @@ -729,7 +894,7 @@ mod on_idle { pallet_staking::ErasStakers::::mutate(0, VALIDATORS_PER_ERA, |expo| { expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); }); - Balances::make_free_balance_be(&exposed, 100); + Balances::make_free_balance_be(&exposed, DepositAmount::get() + 100); assert_ok!(Staking::bond( RuntimeOrigin::signed(exposed), exposed, @@ -745,17 +910,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!(Head::::get(), None); assert_eq!( fast_unstake_events_since_last_call(), - // we slash them by 28, since we checked 4 eras in total. + // we slash them vec![ Event::Checking { stash: exposed, eras: vec![3, 2] }, - Event::Slashed { stash: exposed, amount: 4 * 7 } + Event::Slashed { stash: exposed, amount: DepositAmount::get() } ] ); }); @@ -786,7 +955,7 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Slashed { stash: 100, amount: 100 }] + vec![Event::Slashed { stash: 100, amount: DepositAmount::get() }] ); }); } @@ -798,7 +967,7 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // create a new validator that 100% not exposed. - Balances::make_free_balance_be(&42, 100); + Balances::make_free_balance_be(&42, 100 + DepositAmount::get()); assert_ok!(Staking::bond(RuntimeOrigin::signed(42), 42, 10, RewardDestination::Staked)); assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); @@ -809,7 +978,11 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 42, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 42, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -824,69 +997,3 @@ mod on_idle { }); } } - -mod signed_extension { - use super::*; - use sp_runtime::traits::SignedExtension; - - const STAKING_CALL: crate::mock::RuntimeCall = - crate::mock::RuntimeCall::Staking(pallet_staking::Call::::chill {}); - - #[test] - fn does_nothing_if_not_queued() { - ExtBuilder::default().build_and_execute(|| { - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_ok()); - }) - } - - #[test] - fn prevents_queued() { - ExtBuilder::default().build_and_execute(|| { - // given: stash for 2 is 1. - // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - // then - // stash can't. - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - - // controller can't. - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - }) - } - - #[test] - fn prevents_head_stash() { - ExtBuilder::default().build_and_execute(|| { - // given: stash for 2 is 1. - // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) - ); - - // then - // stash can't - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - - // controller can't - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - }) - } -} diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index 2ddb8dca27e9e..08b9ab4326eb2 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -17,14 +17,12 @@ //! Types used in the Fast Unstake pallet. -use crate::*; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - traits::{Currency, Get, IsSubType}, + traits::{Currency, Get}, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; -use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_staking::EraIndex; use sp_std::{fmt::Debug, prelude::*}; @@ -36,80 +34,15 @@ pub type BalanceOf = <::Currency as Currency< #[derive( Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, )] -pub struct UnstakeRequest> { +pub struct UnstakeRequest< + AccountId: Eq + PartialEq + Debug, + MaxChecked: Get, + Balance: PartialEq + Debug, +> { /// Their stash account. pub(crate) stash: AccountId, /// The list of eras for which they have been checked. pub(crate) checked: BoundedVec, -} - -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] -#[scale_info(skip_type_params(T))] -pub struct PreventStakingOpsIfUnbonding(sp_std::marker::PhantomData); - -impl PreventStakingOpsIfUnbonding { - pub fn new() -> Self { - Self(Default::default()) - } -} - -impl sp_runtime::traits::SignedExtension - for PreventStakingOpsIfUnbonding -where - ::RuntimeCall: IsSubType>, -{ - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "PreventStakingOpsIfUnbonding"; - - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - // NOTE: we want to prevent this stash-controller pair from doing anything in the - // staking system as long as they are registered here. - stash_or_controller: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - // we don't check this in the tx-pool as it requires a storage read. - if >>::is_sub_type(call).is_some() { - let check_stash = |stash: &T::AccountId| { - if Queue::::contains_key(&stash) || - Head::::get().map_or(false, |u| &u.stash == stash) - { - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - } else { - Ok(()) - } - }; - match ( - // mapped from controller. - pallet_staking::Ledger::::get(&stash_or_controller), - // mapped from stash. - pallet_staking::Bonded::::get(&stash_or_controller), - ) { - (Some(ledger), None) => { - // it is a controller. - check_stash(&ledger.stash) - }, - (_, Some(_)) => { - // it's a stash. - let stash = stash_or_controller; - check_stash(stash) - }, - (None, None) => { - // They are not a staker -- let them execute. - Ok(()) - }, - } - } else { - Ok(()) - } - } + /// Deposit to be slashed if the unstake was unsuccessful. + pub(crate) deposit: Balance, } From 2ee4cb47fa01ad6c7d6c94acf3370fd26470b388 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Wed, 28 Sep 2022 04:14:01 +0800 Subject: [PATCH 17/42] Add missing CountedStorageMap in pallet::storage error info (#12356) --- frame/support/procedural/src/pallet/parse/storage.rs | 4 ++-- .../test/tests/pallet_ui/storage_not_storage_type.stderr | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 321c4dd5d4914..b16ff05803d98 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -555,8 +555,8 @@ fn process_generics( found => { let msg = format!( "Invalid pallet::storage, expected ident: `StorageValue` or \ - `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, \ - found `{}`.", + `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` \ + in order to expand metadata, found `{}`.", found, ); return Err(syn::Error::new(segment.ident.span(), msg)) diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr index 4fd59183282d0..223e9cfa3e9f8 100644 --- a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. --> $DIR/storage_not_storage_type.rs:19:16 | 19 | type Foo = u8; From 17c07af0b953b84dbe89341294e98e586f9b4591 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 28 Sep 2022 18:21:53 +0800 Subject: [PATCH 18/42] Add storage size component to weights (#12277) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add storage size component to weights * Rename storage_size to proof_size * Update primitives/weights/src/weight_v2.rs Co-authored-by: Oliver Tale-Yazdi * Fixes * cargo fmt * Implement custom Decode and CompactAs * Add missing import * Fixes * Remove CompactAs implementation * Properly migrate from 1D weight * Remove #[pallet::compact] from Weight parameters * More #[pallet::compact] removals * Add unit tests * Set appropriate default block proof size * cargo fmt * Remove nonsensical weight constant * Test only for the reference time weight in frame_system::limits * Only check for reference time weight on idle * Use destructuring syntax * Update test expectations * Fixes * Fixes * Fixes * Correctly migrate from 1D weights * cargo fmt * Migrate using extra extrinsics instead of custom Decode * Fixes * Silence dispatch call warnings that were previously allowed * Fix gas_left test * Use OldWeight instead of u64 * Fixes * Only check for reference time weight in election provider * Fix test expectations * Fix test expectations * Use only reference time weight in grandpa test * Use only reference time weight in examples test * Use only reference time weight in examples test * Fix test expectations Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Alexander Theißen --- frame/alliance/src/lib.rs | 62 ++++- frame/babe/src/tests.rs | 3 +- frame/collective/src/lib.rs | 69 +++++- frame/contracts/src/lib.rs | 179 ++++++++++++++- frame/contracts/src/wasm/mod.rs | 15 +- .../election-provider-multi-phase/src/lib.rs | 9 +- .../src/unsigned.rs | 9 +- frame/examples/basic/src/tests.rs | 6 +- frame/executive/src/lib.rs | 7 +- frame/grandpa/src/tests.rs | 3 +- .../procedural/src/pallet/expand/call.rs | 20 ++ .../procedural/src/pallet/parse/call.rs | 3 + ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/limits.rs | 21 +- frame/transaction-payment/src/types.rs | 5 +- primitives/weights/src/lib.rs | 21 +- primitives/weights/src/weight_v2.rs | 211 ++++++++++++------ 20 files changed, 531 insertions(+), 128 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 2ef6718538122..24111b44ced9e 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -120,7 +120,7 @@ use frame_support::{ ChangeMembers, Currency, Get, InitializeMembers, IsSubType, OnUnbalanced, ReservableCurrency, }, - weights::Weight, + weights::{OldWeight, Weight}, }; use pallet_identity::IdentityField; @@ -620,25 +620,22 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(x, y, p2)) .max(T::WeightInfo::close_approved(b, x, y, p2)) .max(T::WeightInfo::close_disapproved(x, y, p2)) - .saturating_add(p1) + .saturating_add(p1.into()) })] - pub fn close( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to use `close`")] + pub fn close_old_weight( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] proposal_weight_bound: OldWeight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { + let proposal_weight_bound: Weight = proposal_weight_bound.into(); let who = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); - let info = T::ProposalProvider::close_proposal( - proposal_hash, - index, - proposal_weight_bound, - length_bound, - )?; - Ok(info.into()) + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) } /// Initialize the Alliance, onboard founders, fellows, and allies. @@ -985,6 +982,34 @@ pub mod pallet { Self::deposit_event(Event::UnscrupulousItemRemoved { items }); Ok(()) } + + /// Close a vote that is either approved, disapproved, or whose voting period has ended. + /// + /// Requires the sender to be a founder or fellow. + #[pallet::weight({ + let b = *length_bound; + let x = T::MaxFounders::get(); + let y = T::MaxFellows::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, x, y, p2) + .max(T::WeightInfo::close_early_disapproved(x, y, p2)) + .max(T::WeightInfo::close_approved(b, x, y, p2)) + .max(T::WeightInfo::close_disapproved(x, y, p2)) + .saturating_add(p1) + })] + pub fn close( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] index: ProposalIndex, + proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); + + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + } } } @@ -1197,4 +1222,19 @@ impl, I: 'static> Pallet { } res } + + fn do_close( + proposal_hash: T::Hash, + index: ProposalIndex, + proposal_weight_bound: Weight, + length_bound: u32, + ) -> DispatchResultWithPostInfo { + let info = T::ProposalProvider::close_proposal( + proposal_hash, + index, + proposal_weight_bound, + length_bound, + )?; + Ok(info.into()) + } } diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 8d2a9b326cd0f..d4132e6378540 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -852,7 +852,8 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info.weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index ae68ae2fe3e16..06d5b1fab78e7 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -57,7 +57,7 @@ use frame_support::{ traits::{ Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, }, - weights::Weight, + weights::{OldWeight, Weight}, }; #[cfg(test)] @@ -620,17 +620,20 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(m, p2)) .max(T::WeightInfo::close_approved(b, m, p2)) .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1) + .saturating_add(p1.into()) }, DispatchClass::Operational ))] - pub fn close( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `close`")] + pub fn close_old_weight( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] proposal_weight_bound: OldWeight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { + let proposal_weight_bound: Weight = proposal_weight_bound.into(); let _ = ensure_signed(origin)?; Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) @@ -659,6 +662,64 @@ pub mod pallet { let proposal_count = Self::do_disapprove_proposal(proposal_hash); Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) } + + /// Close a vote that is either approved, disapproved or whose voting period has ended. + /// + /// May be called by any signed account in order to finish voting and close the proposal. + /// + /// If called before the end of the voting period it will only close the vote if it is + /// has enough votes to be approved or disapproved. + /// + /// If called after the end of the voting period abstentions are counted as rejections + /// unless there is a prime member set and the prime member cast an approval. + /// + /// If the close operation completes successfully with disapproval, the transaction fee will + /// be waived. Otherwise execution of the approved operation will be charged to the caller. + /// + /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed + /// proposal. + /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via + /// `storage::read` so it is `size_of::() == 4` larger than the pure length. + /// + /// # + /// ## Weight + /// - `O(B + M + P1 + P2)` where: + /// - `B` is `proposal` size in bytes (length-fee-bounded) + /// - `M` is members-count (code- and governance-bounded) + /// - `P1` is the complexity of `proposal` preimage. + /// - `P2` is proposal-count (code-bounded) + /// - DB: + /// - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`) + /// - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec + /// `O(P2)`) + /// - any mutations done while executing `proposal` (`P1`) + /// - up to 3 events + /// # + #[pallet::weight(( + { + let b = *length_bound; + let m = T::MaxMembers::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, m, p2) + .max(T::WeightInfo::close_early_disapproved(m, p2)) + .max(T::WeightInfo::close_approved(b, m, p2)) + .max(T::WeightInfo::close_disapproved(m, p2)) + .saturating_add(p1) + }, + DispatchClass::Operational + ))] + pub fn close( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] index: ProposalIndex, + proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + } } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index fc44e4507ca00..f9a1c8decf042 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -113,7 +113,7 @@ use frame_support::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time, }, - weights::Weight, + weights::{OldWeight, Weight}, BoundedVec, WeakBoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; @@ -429,15 +429,18 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] - pub fn call( + #[pallet::weight(T::WeightInfo::call().saturating_add((*gas_limit).into()))] + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] + pub fn call_old_weight( origin: OriginFor, dest: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut output = Self::internal_call( @@ -485,17 +488,22 @@ pub mod pallet { /// - The `deploy` function is executed in the context of the newly-created account. #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add(*gas_limit) + .saturating_add((*gas_limit).into()) )] - pub fn instantiate_with_code( + #[allow(deprecated)] + #[deprecated( + note = "1D weight is used in this extrinsic, please migrate to `instantiate_with_code`" + )] + pub fn instantiate_with_code_old_weight( origin: OriginFor, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code: Vec, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let code_len = code.len() as u32; let salt_len = salt.len() as u32; @@ -526,17 +534,20 @@ pub mod pallet { /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) + T::WeightInfo::instantiate(salt.len() as u32).saturating_add((*gas_limit).into()) )] - pub fn instantiate( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] + pub fn instantiate_old_weight( origin: OriginFor, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code_hash: CodeHash, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let salt_len = salt.len() as u32; let mut output = Self::internal_instantiate( @@ -639,6 +650,154 @@ pub mod pallet { Ok(()) }) } + + /// Makes a call to an account, optionally transferring some balance. + /// + /// # Parameters + /// + /// * `dest`: Address of the contract to call. + /// * `value`: The balance to transfer from the `origin` to `dest`. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `storage_deposit_limit`: The maximum amount of balance that can be charged from the + /// caller to pay for the storage consumed. + /// * `data`: The input data to pass to the contract. + /// + /// * If the account is a smart-contract account, the associated code will be + /// executed and any value will be transferred. + /// * If the account is a regular account, any value will be transferred. + /// * If no account exists and the call value is not less than `existential_deposit`, + /// a regular account will be created and any value will be transferred. + #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] + pub fn call( + origin: OriginFor, + dest: AccountIdLookupOf, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + data: Vec, + ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + let mut output = Self::internal_call( + origin, + dest, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + data, + None, + ); + if let Ok(retval) = &output.result { + if retval.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) + } + + /// Instantiates a new contract from the supplied `code` optionally transferring + /// some balance. + /// + /// This dispatchable has the same effect as calling [`Self::upload_code`] + + /// [`Self::instantiate`]. Bundling them together provides efficiency gains. Please + /// also check the documentation of [`Self::upload_code`]. + /// + /// # Parameters + /// + /// * `value`: The balance to transfer from the `origin` to the newly created contract. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `storage_deposit_limit`: The maximum amount of balance that can be charged/reserved + /// from the caller to pay for the storage consumed. + /// * `code`: The contract code to deploy in raw bytes. + /// * `data`: The input data to pass to the contract constructor. + /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. + /// + /// Instantiation is executed as follows: + /// + /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that + /// code. + /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. + /// - The destination address is computed based on the sender, code_hash and the salt. + /// - The smart-contract account is created at the computed address. + /// - The `value` is transferred to the new account. + /// - The `deploy` function is executed in the context of the newly-created account. + #[pallet::weight( + T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) + .saturating_add(*gas_limit) + )] + pub fn instantiate_with_code( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let code_len = code.len() as u32; + let salt_len = salt.len() as u32; + let mut output = Self::internal_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + Code::Upload(Bytes(code)), + data, + salt, + None, + ); + if let Ok(retval) = &output.result { + if retval.1.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, result)| result), + T::WeightInfo::instantiate_with_code(code_len, salt_len), + ) + } + + /// Instantiates a contract from a previously deployed wasm binary. + /// + /// This function is identical to [`Self::instantiate_with_code`] but without the + /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary + /// must be supplied. + #[pallet::weight( + T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) + )] + pub fn instantiate( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let salt_len = salt.len() as u32; + let mut output = Self::internal_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + Code::Existing(code_hash), + data, + salt, + None, + ); + if let Ok(retval) = &output.result { + if retval.1.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, output)| output), + T::WeightInfo::instantiate(salt_len), + ) + } } #[pallet::event] diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 126a37e9401ec..d8b4cd245356e 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -274,7 +274,11 @@ mod tests { BalanceOf, CodeHash, Error, Pallet as Contracts, }; use assert_matches::assert_matches; - use frame_support::{assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight}; + use frame_support::{ + assert_ok, + dispatch::DispatchResultWithPostInfo, + weights::{OldWeight, Weight}, + }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; use sp_core::{Bytes, H256}; @@ -1545,10 +1549,11 @@ mod tests { let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); - let gas_left = Weight::decode(&mut &*output.data).unwrap(); + let OldWeight(gas_left) = OldWeight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); - assert!(gas_left.all_lt(gas_limit), "gas_left must be less than initial"); - assert!(gas_left.all_gt(actual_left), "gas_left must be greater than final"); + // TODO: account for proof size weight + assert!(gas_left < gas_limit.ref_time(), "gas_left must be less than initial"); + assert!(gas_left > actual_left.ref_time(), "gas_left must be greater than final"); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -1946,7 +1951,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left().all_gt(Weight::zero())); + assert!(mock_ext.gas_meter.gas_left().ref_time() > 0); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 05353e5a3ac61..bba8139f38f44 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1008,8 +1008,10 @@ pub mod pallet { // unlikely to ever return an error: if phase is signed, snapshot will exist. let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; + // TODO: account for proof size weight ensure!( - Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), + Self::solution_weight_of(&raw_solution, size).ref_time() < + T::SignedMaxWeight::get().ref_time(), Error::::SignedTooMuchWeight, ); @@ -2336,8 +2338,9 @@ mod tests { }; let mut active = 1; - while weight_with(active) - .all_lte(::BlockWeights::get().max_block) || + // TODO: account for proof size weight + while weight_with(active).ref_time() <= + ::BlockWeights::get().max_block.ref_time() || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 833f80c90d13e..281ac37421174 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -638,7 +638,8 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - if current_weight.all_lt(max_weight) { + // TODO: account for proof size weight + if current_weight.ref_time() < max_weight.ref_time() { let next_voters = voters.checked_add(step); match next_voters { Some(voters) if voters < max_voters => Ok(voters), @@ -673,7 +674,8 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { + // TODO: account for proof size weight + while voters < max_voters && weight_with(voters + 1).ref_time() < max_weight.ref_time() { voters += 1; } while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { @@ -681,8 +683,9 @@ impl Miner { } let final_decision = voters.min(size.voters); + // TODO: account for proof size weight debug_assert!( - weight_with(final_decision).all_lte(max_weight), + weight_with(final_decision).ref_time() <= max_weight.ref_time(), "weight_with({}) <= {}", final_decision, max_weight, diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index db4787eaa0faa..97fbddfbc41e0 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -191,11 +191,13 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert!(info1.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info1.weight.ref_time() > 0); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); - assert!(info1.weight.all_gt(info2.weight)); + // TODO: account for proof size weight + assert!(info1.weight.ref_time() > info2.weight.ref_time()); } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index a41c82da5757c..014c7a2bc02a6 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -459,7 +459,8 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight.all_gt(Weight::zero()) { + // TODO: account for proof size weight + if remaining_weight.ref_time() > 0 { let used_weight = >::on_idle( block_number, remaining_weight, @@ -938,13 +939,13 @@ mod tests { block_import_works_inner( new_test_ext_v0(1), array_bytes::hex_n_into_unchecked( - "1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5", + "0d786e24c1f9e6ce237806a22c005bbbc7dee4edd6692b6c5442843d164392de", ), ); block_import_works_inner( new_test_ext(1), array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + "348485a4ab856467b440167e45f99b491385e8528e09b0e51f85f814a3021c93", ), ); } diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 775eda58c03e0..5d2ebdf29cb6b 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,7 +856,8 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info.weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 18d5adee63ad6..39d16109aa8fa 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::{pallet::Def, COUNTER}; +use quote::ToTokens; use syn::spanned::Spanned; /// @@ -158,6 +159,24 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }); } + // Extracts #[allow] attributes, necessary so that we don't run into compiler warnings + let maybe_allow_attrs = methods + .iter() + .map(|method| { + method + .attrs + .iter() + .find(|attr| { + if let Ok(syn::Meta::List(syn::MetaList { path, .. })) = attr.parse_meta() { + path.segments.last().map(|seg| seg.ident == "allow").unwrap_or(false) + } else { + false + } + }) + .map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream()) + }) + .collect::>(); + quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -289,6 +308,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); + #maybe_allow_attrs <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) .map(Into::into).map_err(Into::into) }, diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 336e08c3d39b7..f7b2c9544d831 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -61,6 +61,8 @@ pub struct CallVariantDef { pub call_index: u8, /// Docs, used for metadata. pub docs: Vec, + /// Attributes annotated at the top of the dispatchable function. + pub attrs: Vec, } /// Attributes for functions in call impl block. @@ -287,6 +289,7 @@ impl CallDef { call_index: final_index, args, docs, + attrs: method.attrs.clone(), }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 5d159ec961c7f..b0716d569409c 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 159 others + and 160 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 4671855431b27..926dc92530659 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 159 others + and 160 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index d9cd20711403d..563190a06f76f 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 76 others + and 77 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9a4e8d740cb2c..c10005223b674 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 76 others + and 77 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index e182eb626424d..cfc1d261baa01 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -207,7 +207,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1u32 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults(1u64 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) } } @@ -224,6 +224,7 @@ impl BlockWeights { } let mut error = ValidationErrors::default(); + // TODO: account for proof size weight in the assertions below for class in DispatchClass::all() { let weights = self.per_class.get(*class); let max_for_class = or_max(weights.max_total); @@ -232,18 +233,16 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) - || max_for_class == Weight::zero(), + (max_for_class.ref_time() > self.base_block.ref_time() && max_for_class.ref_time() > base_for_class.ref_time()) + || max_for_class.ref_time() == 0, &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights - .max_extrinsic - .unwrap_or(Weight::zero()) - .all_lte(max_for_class.saturating_sub(base_for_class)), + weights.max_extrinsic.unwrap_or(Weight::zero()).ref_time() <= + max_for_class.saturating_sub(base_for_class).ref_time(), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -252,14 +251,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), + weights.max_extrinsic.unwrap_or_else(Weight::max_value).ref_time() > 0, &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.all_gt(base_for_class) || reserved == Weight::zero(), + reserved.ref_time() > base_for_class.ref_time() || reserved.ref_time() == 0, &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -268,7 +267,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), + self.max_block.ref_time() >= weights.max_total.unwrap_or(Weight::zero()).ref_time(), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -277,7 +276,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.all_gt(base_for_class + self.base_block), + self.max_block.ref_time() > (base_for_class + self.base_block).ref_time(), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 1f41ba7b0b72e..fff41ef6937f5 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -140,7 +140,8 @@ mod tests { partial_fee: 1_000_000_u64, }; - let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"1000000"}"#; + let json_str = + r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"1000000"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); @@ -157,7 +158,7 @@ mod tests { partial_fee: u128::max_value(), }; - let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + let json_str = r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index d260f73d41268..e1ac7fcd4e892 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -30,7 +30,7 @@ extern crate self as sp_weights; mod weight_v2; -use codec::{Decode, Encode}; +use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -53,6 +53,25 @@ pub mod constants { pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); } +/// The old weight type. +/// +/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other +/// cases. +#[derive( + Decode, + Encode, + CompactAs, + PartialEq, + Eq, + Clone, + Copy, + RuntimeDebug, + Default, + MaxEncodedLen, + TypeInfo, +)] +pub struct OldWeight(pub u64); + /// The weight of database operations that the runtime can invoke. /// /// NOTE: This is currently only measured in computational time, and will probably diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index af0f469ebaaeb..a8eaf79a28711 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use sp_debug_derive::RuntimeDebug; @@ -23,22 +23,22 @@ use sp_debug_derive::RuntimeDebug; use super::*; #[derive( - Encode, - Decode, - MaxEncodedLen, - TypeInfo, - Eq, - PartialEq, - Copy, - Clone, - RuntimeDebug, - Default, - CompactAs, + Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, RuntimeDebug, Default, )] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Weight { + #[codec(compact)] /// The weight of computational time used based on some reference hardware. ref_time: u64, + #[codec(compact)] + /// The weight of storage space used by proof of validity. + proof_size: u64, +} + +impl From for Weight { + fn from(old: OldWeight) -> Self { + Weight::from_ref_time(old.0) + } } impl Weight { @@ -48,71 +48,118 @@ impl Weight { self } + /// Set the storage size part of the weight. + pub const fn set_proof_size(mut self, c: u64) -> Self { + self.proof_size = c; + self + } + /// Return the reference time part of the weight. pub const fn ref_time(&self) -> u64 { self.ref_time } - /// Return a mutable reference time part of the weight. + /// Return the storage size part of the weight. + pub const fn proof_size(&self) -> u64 { + self.proof_size + } + + /// Return a mutable reference to the reference time part of the weight. pub fn ref_time_mut(&mut self) -> &mut u64 { &mut self.ref_time } - pub const MAX: Self = Self { ref_time: u64::MAX }; + /// Return a mutable reference to the storage size part of the weight. + pub fn proof_size_mut(&mut self) -> &mut u64 { + &mut self.proof_size + } + + pub const MAX: Self = Self { ref_time: u64::MAX, proof_size: u64::MAX }; /// Get the conservative min of `self` and `other` weight. pub fn min(&self, other: Self) -> Self { - Self { ref_time: self.ref_time.min(other.ref_time) } + Self { + ref_time: self.ref_time.min(other.ref_time), + proof_size: self.proof_size.min(other.proof_size), + } } /// Get the aggressive max of `self` and `other` weight. pub fn max(&self, other: Self) -> Self { - Self { ref_time: self.ref_time.max(other.ref_time) } + Self { + ref_time: self.ref_time.max(other.ref_time), + proof_size: self.proof_size.max(other.proof_size), + } } /// Try to add some `other` weight while upholding the `limit`. pub fn try_add(&self, other: &Self, limit: &Self) -> Option { let total = self.checked_add(other)?; - if total.ref_time > limit.ref_time { + if total.any_gt(*limit) { None } else { Some(total) } } - /// Construct [`Weight`] with reference time weight. + /// Construct [`Weight`] with reference time weight and 0 storage size weight. pub const fn from_ref_time(ref_time: u64) -> Self { - Self { ref_time } + Self { ref_time, proof_size: 0 } + } + + /// Construct [`Weight`] with storage size weight and 0 reference time weight. + pub const fn from_proof_size(proof_size: u64) -> Self { + Self { ref_time: 0, proof_size } + } + + /// Construct [`Weight`] with weight components, namely reference time and storage size weights. + pub const fn from_components(ref_time: u64, proof_size: u64) -> Self { + Self { ref_time, proof_size } } /// Saturating [`Weight`] addition. Computes `self + rhs`, saturating at the numeric bounds of /// all fields instead of overflowing. pub const fn saturating_add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } + Self { + ref_time: self.ref_time.saturating_add(rhs.ref_time), + proof_size: self.proof_size.saturating_add(rhs.proof_size), + } } /// Saturating [`Weight`] subtraction. Computes `self - rhs`, saturating at the numeric bounds /// of all fields instead of overflowing. pub const fn saturating_sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } + Self { + ref_time: self.ref_time.saturating_sub(rhs.ref_time), + proof_size: self.proof_size.saturating_sub(rhs.proof_size), + } } /// Saturating [`Weight`] scalar multiplication. Computes `self.field * scalar` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time.saturating_mul(scalar) } + Self { + ref_time: self.ref_time.saturating_mul(scalar), + proof_size: self.proof_size.saturating_mul(scalar), + } } /// Saturating [`Weight`] scalar division. Computes `self.field / scalar` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time.saturating_div(scalar) } + Self { + ref_time: self.ref_time.saturating_div(scalar), + proof_size: self.proof_size.saturating_div(scalar), + } } /// Saturating [`Weight`] scalar exponentiation. Computes `self.field.pow(exp)` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_pow(self, exp: u32) -> Self { - Self { ref_time: self.ref_time.saturating_pow(exp) } + Self { + ref_time: self.ref_time.saturating_pow(exp), + proof_size: self.proof_size.saturating_pow(exp), + } } /// Increment [`Weight`] by `amount` via saturating addition. @@ -122,124 +169,144 @@ impl Weight { /// Checked [`Weight`] addition. Computes `self + rhs`, returning `None` if overflow occurred. pub const fn checked_add(&self, rhs: &Self) -> Option { - match self.ref_time.checked_add(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_add(rhs.ref_time) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_add(rhs.proof_size) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] subtraction. Computes `self - rhs`, returning `None` if overflow /// occurred. pub const fn checked_sub(&self, rhs: &Self) -> Option { - match self.ref_time.checked_sub(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_sub(rhs.ref_time) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_sub(rhs.proof_size) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] scalar multiplication. Computes `self.field * scalar` for each field, /// returning `None` if overflow occurred. pub const fn checked_mul(self, scalar: u64) -> Option { - match self.ref_time.checked_mul(scalar) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_mul(scalar) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_mul(scalar) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] scalar division. Computes `self.field / scalar` for each field, returning /// `None` if overflow occurred. pub const fn checked_div(self, scalar: u64) -> Option { - match self.ref_time.checked_div(scalar) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_div(scalar) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_div(scalar) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Return a [`Weight`] where all fields are zero. pub const fn zero() -> Self { - Self { ref_time: 0 } + Self { ref_time: 0, proof_size: 0 } } /// Constant version of Add with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn add(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time + scalar } + Self { ref_time: self.ref_time + scalar, proof_size: self.proof_size + scalar } } /// Constant version of Sub with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn sub(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time - scalar } + Self { ref_time: self.ref_time - scalar, proof_size: self.proof_size - scalar } } /// Constant version of Div with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time / scalar } + Self { ref_time: self.ref_time / scalar, proof_size: self.proof_size / scalar } } /// Constant version of Mul with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time * scalar } + Self { ref_time: self.ref_time * scalar, proof_size: self.proof_size * scalar } } /// Returns true if any of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn any_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time + self.ref_time > other.ref_time || self.proof_size > other.proof_size } /// Returns true if all of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn all_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time + self.ref_time > other.ref_time && self.proof_size > other.proof_size } /// Returns true if any of `self`'s constituent weights is strictly less than that of the /// `other`'s, otherwise returns false. pub const fn any_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time + self.ref_time < other.ref_time || self.proof_size < other.proof_size } /// Returns true if all of `self`'s constituent weights is strictly less than that of the /// `other`'s, otherwise returns false. pub const fn all_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time + self.ref_time < other.ref_time && self.proof_size < other.proof_size } /// Returns true if any of `self`'s constituent weights is greater than or equal to that of the /// `other`'s, otherwise returns false. pub const fn any_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time + self.ref_time >= other.ref_time || self.proof_size >= other.proof_size } /// Returns true if all of `self`'s constituent weights is greater than or equal to that of the /// `other`'s, otherwise returns false. pub const fn all_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time + self.ref_time >= other.ref_time && self.proof_size >= other.proof_size } /// Returns true if any of `self`'s constituent weights is less than or equal to that of the /// `other`'s, otherwise returns false. pub const fn any_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time + self.ref_time <= other.ref_time || self.proof_size <= other.proof_size } /// Returns true if all of `self`'s constituent weights is less than or equal to that of the /// `other`'s, otherwise returns false. pub const fn all_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time + self.ref_time <= other.ref_time && self.proof_size <= other.proof_size } /// Returns true if any of `self`'s constituent weights is equal to that of the `other`'s, /// otherwise returns false. pub const fn any_eq(self, other: Self) -> bool { - self.ref_time == other.ref_time + self.ref_time == other.ref_time || self.proof_size == other.proof_size } // NOTE: `all_eq` does not exist, as it's simply the `eq` method from the `PartialEq` trait. @@ -258,14 +325,20 @@ impl Zero for Weight { impl Add for Weight { type Output = Self; fn add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time + rhs.ref_time } + Self { + ref_time: self.ref_time + rhs.ref_time, + proof_size: self.proof_size + rhs.proof_size, + } } } impl Sub for Weight { type Output = Self; fn sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time - rhs.ref_time } + Self { + ref_time: self.ref_time - rhs.ref_time, + proof_size: self.proof_size - rhs.proof_size, + } } } @@ -275,7 +348,7 @@ where { type Output = Self; fn mul(self, b: T) -> Self { - Self { ref_time: b * self.ref_time } + Self { ref_time: b * self.ref_time, proof_size: b * self.proof_size } } } @@ -285,7 +358,10 @@ macro_rules! weight_mul_per_impl { impl Mul for $t { type Output = Weight; fn mul(self, b: Weight) -> Weight { - Weight { ref_time: self * b.ref_time } + Weight { + ref_time: self * b.ref_time, + proof_size: self * b.proof_size, + } } } )* @@ -305,7 +381,10 @@ macro_rules! weight_mul_primitive_impl { impl Mul for $t { type Output = Weight; fn mul(self, b: Weight) -> Weight { - Weight { ref_time: u64::from(self) * b.ref_time } + Weight { + ref_time: u64::from(self) * b.ref_time, + proof_size: u64::from(self) * b.proof_size, + } } } )* @@ -320,7 +399,7 @@ where { type Output = Self; fn div(self, b: T) -> Self { - Self { ref_time: self.ref_time / b } + Self { ref_time: self.ref_time / b, proof_size: self.proof_size / b } } } @@ -338,7 +417,7 @@ impl CheckedSub for Weight { impl core::fmt::Display for Weight { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Weight(ref_time: {})", self.ref_time) + write!(f, "Weight(ref_time: {}, proof_size: {})", self.ref_time, self.proof_size) } } @@ -353,12 +432,18 @@ impl Bounded for Weight { impl AddAssign for Weight { fn add_assign(&mut self, other: Self) { - *self = Self { ref_time: self.ref_time + other.ref_time }; + *self = Self { + ref_time: self.ref_time + other.ref_time, + proof_size: self.proof_size + other.proof_size, + }; } } impl SubAssign for Weight { fn sub_assign(&mut self, other: Self) { - *self = Self { ref_time: self.ref_time - other.ref_time }; + *self = Self { + ref_time: self.ref_time - other.ref_time, + proof_size: self.proof_size - other.proof_size, + }; } } From 01a905e304f2b6b2c1caf4c12b622edb12b265fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 28 Sep 2022 14:37:03 +0200 Subject: [PATCH 19/42] pallet-utility: Only disallow the `None` origin (#12351) --- frame/utility/src/lib.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 819314f3d8454..9ae89097a9bc3 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -63,7 +63,7 @@ use frame_support::{ }; use sp_core::TypeId; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; +use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; @@ -203,7 +203,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -319,7 +324,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -426,7 +436,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); From 1b1a5e12c0e391c7ed4e3ffa332eb2fe928d257f Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 28 Sep 2022 14:43:04 +0200 Subject: [PATCH 20/42] Fix staking migration (#12373) Causing issues on Kusama... Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/staking/src/migrations.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 8f37ae30dd056..f2ccb4f8b096f 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -40,10 +40,14 @@ pub mod v12 { "Expected v11 before upgrading to v12" ); - frame_support::ensure!( - T::HistoryDepth::get() == HistoryDepth::::get(), - "Provided value of HistoryDepth should be same as the existing storage value" - ); + if HistoryDepth::::exists() { + frame_support::ensure!( + T::HistoryDepth::get() == HistoryDepth::::get(), + "Provided value of HistoryDepth should be same as the existing storage value" + ); + } else { + log::info!("No HistoryDepth in storage; nothing to remove"); + } Ok(Default::default()) } From 0ec4373d9c1252b60f0a3512fd910b1d48af385a Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 29 Sep 2022 04:38:12 +0900 Subject: [PATCH 21/42] Support running the pallet benchmarks analysis without running the benchmarks (#12361) * Support running the pallet benchmarks analysis without running the benchmarks * Rename `override-results` to `json-input` and update the help comment * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> --- frame/benchmarking/src/utils.rs | 20 ++++-- .../benchmarking-cli/src/pallet/command.rs | 67 +++++++++++++++++-- .../frame/benchmarking-cli/src/pallet/mod.rs | 10 ++- 3 files changed, 82 insertions(+), 15 deletions(-) diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index b483208e3ef69..753e8c1c684ee 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -23,14 +23,14 @@ use frame_support::{ traits::StorageInfo, }; #[cfg(feature = "std")] -use serde::Serialize; +use serde::{Deserialize, Serialize}; use sp_io::hashing::blake2_256; use sp_runtime::traits::TrailingZeroInput; use sp_std::{prelude::Box, vec::Vec}; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] @@ -71,7 +71,7 @@ impl std::fmt::Display for BenchmarkParameter { } /// The results of a single of benchmark. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatch { /// The pallet containing this benchmark. @@ -89,7 +89,7 @@ pub struct BenchmarkBatch { // TODO: could probably make API cleaner here. /// The results of a single of benchmark, where time and db results are separated. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatchSplitResults { /// The pallet containing this benchmark. @@ -110,7 +110,7 @@ pub struct BenchmarkBatchSplitResults { /// Result from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] pub struct BenchmarkResult { pub components: Vec<(BenchmarkParameter, u32)>, @@ -121,7 +121,7 @@ pub struct BenchmarkResult { pub writes: u32, pub repeat_writes: u32, pub proof_size: u32, - #[cfg_attr(feature = "std", serde(skip_serializing))] + #[cfg_attr(feature = "std", serde(skip))] pub keys: Vec<(Vec, u32, u32, bool)>, } @@ -141,6 +141,14 @@ mod serde_as_str { let s = std::str::from_utf8(value).map_err(serde::ser::Error::custom)?; serializer.collect_str(s) } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: serde::de::Deserializer<'de>, + { + let s: &str = serde::de::Deserialize::deserialize(deserializer)?; + Ok(s.into()) + } } /// Possible errors returned from the benchmarking pipeline. diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 6870ec386d23d..72592617c52ac 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -134,6 +134,20 @@ impl PalletCmd { }; } + if let Some(json_input) = &self.json_input { + let raw_data = match std::fs::read(json_input) { + Ok(raw_data) => raw_data, + Err(error) => + return Err(format!("Failed to read {:?}: {}", json_input, error).into()), + }; + let batches: Vec = match serde_json::from_slice(&raw_data) { + Ok(batches) => batches, + Err(error) => + return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()), + }; + return self.output_from_results(&batches) + } + let spec = config.chain_spec; let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); let pallet = self.pallet.clone().unwrap_or_default(); @@ -396,8 +410,16 @@ impl PalletCmd { // Combine all of the benchmark results, so that benchmarks of the same pallet/function // are together. - let batches: Vec = combine_batches(batches, batches_db); + let batches = combine_batches(batches, batches_db); + self.output(&batches, &storage_info, &component_ranges) + } + fn output( + &self, + batches: &[BenchmarkBatchSplitResults], + storage_info: &[StorageInfo], + component_ranges: &HashMap<(Vec, Vec), Vec>, + ) -> Result<()> { // Jsonify the result and write it to a file or stdout if desired. if !self.jsonify(&batches)? { // Print the summary only if `jsonify` did not write to stdout. @@ -412,10 +434,45 @@ impl PalletCmd { Ok(()) } + fn output_from_results(&self, batches: &[BenchmarkBatchSplitResults]) -> Result<()> { + let mut component_ranges = + HashMap::<(Vec, Vec), HashMap>::new(); + for batch in batches { + let range = component_ranges + .entry((batch.pallet.clone(), batch.benchmark.clone())) + .or_default(); + for result in &batch.time_results { + for (param, value) in &result.components { + let name = param.to_string(); + let (ref mut min, ref mut max) = range.entry(name).or_insert((*value, *value)); + if *value < *min { + *min = *value; + } + if *value > *max { + *max = *value; + } + } + } + } + + let component_ranges: HashMap<_, _> = component_ranges + .into_iter() + .map(|(key, ranges)| { + let ranges = ranges + .into_iter() + .map(|(name, (min, max))| ComponentRange { name, min, max }) + .collect(); + (key, ranges) + }) + .collect(); + + self.output(batches, &[], &component_ranges) + } + /// Jsonifies the passed batches and writes them to stdout or into a file. /// Can be configured via `--json` and `--json-file`. /// Returns whether it wrote to stdout. - fn jsonify(&self, batches: &Vec) -> Result { + fn jsonify(&self, batches: &[BenchmarkBatchSplitResults]) -> Result { if self.json_output || self.json_file.is_some() { let json = serde_json::to_string_pretty(&batches) .map_err(|e| format!("Serializing into JSON: {:?}", e))?; @@ -432,11 +489,7 @@ impl PalletCmd { } /// Prints the results as human-readable summary without raw timing data. - fn print_summary( - &self, - batches: &Vec, - storage_info: &Vec, - ) { + fn print_summary(&self, batches: &[BenchmarkBatchSplitResults], storage_info: &[StorageInfo]) { for batch in batches.iter() { // Print benchmark metadata println!( diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index b8c1f7b905c0c..0e698c4e73910 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -35,11 +35,11 @@ fn parse_pallet_name(pallet: &str) -> String { #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present = "list")] + #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present_any = ["list", "json-input"])] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[clap(short, long, required_unless_present = "list")] + #[clap(short, long, required_unless_present_any = ["list", "json-input"])] pub extrinsic: Option, /// Select how many samples we should take across the variable components. @@ -166,4 +166,10 @@ pub struct PalletCmd { /// template for that purpose. #[clap(long)] pub no_storage_info: bool, + + /// A path to a `.json` file with existing benchmark results generated with `--json` or + /// `--json-file`. When specified the benchmarks are not actually executed, and the data for + /// the analysis is read from this file. + #[clap(long)] + pub json_input: Option, } From d66adfabd7911bf01ab01ec96ec4228307a03e07 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 28 Sep 2022 22:00:33 +0200 Subject: [PATCH 22/42] fix: typo in AllPalletsWithSystem deprecated msg (#12379) --- frame/support/procedural/src/construct_runtime/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index e20cb61b7aec1..73d0d54343eb9 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -422,7 +422,7 @@ fn decl_all_pallets<'a>( /// All pallets included in the runtime as a nested tuple of types in reversed order. /// Excludes the System pallet. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); } }); @@ -433,7 +433,7 @@ fn decl_all_pallets<'a>( #attr /// All pallets included in the runtime as a nested tuple of types in reversed order. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsWithSystemReversed = ( #(#names,)* ); } }); @@ -447,7 +447,7 @@ fn decl_all_pallets<'a>( /// All pallets included in the runtime as a nested tuple of types in reversed order. /// With the system pallet first. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); } }); From 96de768061b182934b2d824b2fe76effb5b4db85 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 28 Sep 2022 22:04:14 +0200 Subject: [PATCH 23/42] New Pallet: Root offences (#11943) * root-offences pallet * fix errors * cleaned up a bit * remove unwrap() * new pallet is getting compiled * remove unnecessary type annotations * remove more unnecessary type annotations * addidtional cleaning * commit * cleaned up * fix in logic * add event * removed Clone trait from AccountId * test module * remove unused imports * fmt * fix * separate into functions, still messy * test * first test * fmt * cleaned up a bit * separate into mock.rs and tests.rs * basic docs for now * pallet_staking GenesisiConfig * fix * added start_session * passing tests * impl GenesisConfig for pallet_session * updated event * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * docs * Update frame/root-offences/README.md Co-authored-by: Andronik * Update frame/root-offences/Cargo.toml Co-authored-by: Andronik * license header Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Andronik --- Cargo.lock | 22 ++ Cargo.toml | 1 + frame/nomination-pools/src/lib.rs | 2 +- frame/root-offences/Cargo.toml | 51 +++++ frame/root-offences/README.md | 5 + frame/root-offences/src/lib.rs | 131 +++++++++++ frame/root-offences/src/mock.rs | 356 ++++++++++++++++++++++++++++++ frame/root-offences/src/tests.rs | 94 ++++++++ 8 files changed, 661 insertions(+), 1 deletion(-) create mode 100644 frame/root-offences/Cargo.toml create mode 100644 frame/root-offences/README.md create mode 100644 frame/root-offences/src/lib.rs create mode 100644 frame/root-offences/src/mock.rs create mode 100644 frame/root-offences/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index a9a0eef551179..de50d4ec27105 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6197,6 +6197,28 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-root-offences" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-offences", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 018355df6c9fd..25f12a2c9fd3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ members = [ "frame/staking/reward-fn", "frame/state-trie-migration", "frame/sudo", + "frame/root-offences", "frame/support", "frame/support/procedural", "frame/support/procedural/tools", diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 28d10ce573401..9e77adaeee677 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -2523,7 +2523,7 @@ impl Pallet { impl OnStakerSlash> for Pallet { fn on_slash( pool_account: &T::AccountId, - // Bonded balance is always read directly from staking, therefore we need not update + // Bonded balance is always read directly from staking, therefore we don't need to update // anything here. slashed_bonded: BalanceOf, slashed_unlocking: &BTreeMap>, diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml new file mode 100644 index 0000000000000..ea6a6527848aa --- /dev/null +++ b/frame/root-offences/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "pallet-root-offences" +version = "1.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME root offences pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../frame/staking" } +pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../frame/offences" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } + +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } + +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } + +[features] +runtime-benchmarks = [] +try-runtime = ["frame-support/try-runtime"] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-session/std", + "pallet-staking/std", + "pallet-offences/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/frame/root-offences/README.md b/frame/root-offences/README.md new file mode 100644 index 0000000000000..a2c5261b6985a --- /dev/null +++ b/frame/root-offences/README.md @@ -0,0 +1,5 @@ +# Sudo Offences Pallet + +Pallet that allows the root to create an offence. + +NOTE: This pallet should only be used for testing purposes. \ No newline at end of file diff --git a/frame/root-offences/src/lib.rs b/frame/root-offences/src/lib.rs new file mode 100644 index 0000000000000..b4b549627f3fa --- /dev/null +++ b/frame/root-offences/src/lib.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Sudo Offences Pallet +//! Pallet that allows the root to create an offence. +//! +//! NOTE: This pallet should be used for testing purposes. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +use pallet_session::historical::IdentificationTuple; +use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; +use sp_runtime::Perbill; +use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: + frame_system::Config + + pallet_staking::Config + + pallet_session::Config::AccountId> + + pallet_session::historical::Config< + FullIdentification = Exposure< + ::AccountId, + BalanceOf, + >, + FullIdentificationOf = ExposureOf, + > + { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An offence was created by root. + OffenceCreated { offenders: Vec<(T::AccountId, Perbill)> }, + } + + #[pallet::error] + pub enum Error { + /// Failed to get the active era from the staking pallet. + FailedToGetActiveEra, + } + + type OffenceDetails = sp_staking::offence::OffenceDetails< + ::AccountId, + IdentificationTuple, + >; + + #[pallet::call] + impl Pallet { + /// Allows the `root`, for example sudo to create an offence. + #[pallet::weight(T::DbWeight::get().reads(2))] + pub fn create_offence( + origin: OriginFor, + offenders: Vec<(T::AccountId, Perbill)>, + ) -> DispatchResult { + ensure_root(origin)?; + + let slash_fraction = + offenders.clone().into_iter().map(|(_, fraction)| fraction).collect::>(); + let offence_details = Self::get_offence_details(offenders.clone())?; + + Self::submit_offence(&offence_details, &slash_fraction); + Self::deposit_event(Event::OffenceCreated { offenders }); + Ok(()) + } + } + + impl Pallet { + /// Returns a vector of offenders that are going to be slashed. + fn get_offence_details( + offenders: Vec<(T::AccountId, Perbill)>, + ) -> Result>, DispatchError> { + let now = Staking::::active_era() + .map(|e| e.index) + .ok_or(Error::::FailedToGetActiveEra)?; + + Ok(offenders + .clone() + .into_iter() + .map(|(o, _)| OffenceDetails:: { + offender: (o.clone(), Staking::::eras_stakers(now, o)), + reporters: vec![], + }) + .collect()) + } + + /// Submits the offence by calling the `on_offence` function. + fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { + let session_index = as frame_support::traits::ValidatorSet>::session_index(); + + as OnOffenceHandler< + T::AccountId, + IdentificationTuple, + Weight, + >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); + } + } +} diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs new file mode 100644 index 0000000000000..3f0a26afc1358 --- /dev/null +++ b/frame/root-offences/src/mock.rs @@ -0,0 +1,356 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as root_offences; + +use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64, GenesisBuild, Hooks, OneSessionHandler}, +}; +use pallet_staking::StakerStatus; +use sp_core::H256; +use sp_runtime::{ + curve::PiecewiseLinear, + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, IdentityLookup, Zero}, +}; +use sp_staking::{EraIndex, SessionIndex}; +use sp_std::collections::btree_map::BTreeMap; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; +type Balance = u64; +type BlockNumber = u64; + +pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + RootOffences: root_offences::{Pallet, Call, Storage, Event}, + Historical: pallet_session::historical::{Pallet, Storage}, + } +); + +/// Another session handler struct to test on_disabled. +pub struct OtherSessionHandler; +impl OneSessionHandler for OtherSessionHandler { + type Key = UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_disabled(_validator_index: u32) {} +} + +impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = UintAuthorityId; +} + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); +} + +pallet_staking_reward_curve::build! { + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000u64, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Test; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); +} + +pub struct OnStakerSlashMock(core::marker::PhantomData); +impl sp_staking::OnStakerSlash for OnStakerSlashMock { + fn on_slash( + _pool_account: &AccountId, + slashed_bonded: Balance, + slashed_chunks: &BTreeMap, + ) { + LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); + } +} + +parameter_types! { + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub static Offset: BlockNumber = 0; + pub const Period: BlockNumber = 1; + pub static SessionsPerEra: SessionIndex = 3; + pub static SlashDeferDuration: EraIndex = 0; + pub const BondingDuration: EraIndex = 3; + pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); +} + +impl pallet_staking::Config for Test { + type MaxNominations = ConstU32<16>; + type Currency = Balances; + type CurrencyBalance = ::Balance; + type UnixTime = Timestamp; + type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type SlashDeferDuration = SlashDeferDuration; + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = Self; + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = Session; + type MaxNominatorRewardedPerValidator = ConstU32<64>; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; + type ElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type TargetList = pallet_staking::UseValidatorsMap; + type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type OnStakerSlash = OnStakerSlashMock; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); +} + +impl pallet_session::historical::Config for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} + +impl pallet_session::Config for Test { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type RuntimeEvent = RuntimeEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +pub struct ExtBuilder { + validator_count: u32, + minimum_validator_count: u32, + invulnerables: Vec, + balance_factor: Balance, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + validator_count: 2, + minimum_validator_count: 0, + invulnerables: vec![], + balance_factor: 1, + } + } +} + +impl ExtBuilder { + fn build(self) -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + //controllers + (10, self.balance_factor * 50), + (20, self.balance_factor * 50), + (30, self.balance_factor * 50), + (40, self.balance_factor * 50), + // stashes + (11, self.balance_factor * 1000), + (21, self.balance_factor * 1000), + (31, self.balance_factor * 500), + (41, self.balance_factor * 1000), + ], + } + .assimilate_storage(&mut storage) + .unwrap(); + + let stakers = vec![ + // (stash, ctrl, stake, status) + // these two will be elected in the default test where we elect 2. + (11, 10, 1000, StakerStatus::::Validator), + (21, 20, 1000, StakerStatus::::Validator), + // a loser validator + (31, 30, 500, StakerStatus::::Validator), + // an idle validator + (41, 40, 1000, StakerStatus::::Idle), + ]; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + ..Default::default() + }; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + validator_count: self.validator_count, + minimum_validator_count: self.minimum_validator_count, + invulnerables: self.invulnerables, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let _ = pallet_session::GenesisConfig:: { + keys: stakers + .into_iter() + .map(|(id, ..)| (id, id, SessionKeys { other: id.into() })) + .collect(), + } + .assimilate_storage(&mut storage); + + storage.into() + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = self.build(); + ext.execute_with(test); + } +} + +/// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. +pub(crate) fn start_session(session_index: SessionIndex) { + let end: u64 = if Offset::get().is_zero() { + (session_index as u64) * Period::get() + } else { + Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() + }; + run_to_block(end); + // session must have progressed properly. + assert_eq!( + Session::current_index(), + session_index, + "current session index = {}, expected = {}", + Session::current_index(), + session_index, + ); +} + +/// Progress to the given block, triggering session and era changes as we progress. +/// +/// This will finalize the previous block, initialize up to the given block, essentially simulating +/// a block import/propose process where we first initialize the block, then execute some stuff (not +/// in the function), and then finalize the block. +pub(crate) fn run_to_block(n: BlockNumber) { + Staking::on_finalize(System::block_number()); + for b in (System::block_number() + 1)..=n { + System::set_block_number(b); + Session::on_initialize(b); + >::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); + if b != n { + Staking::on_finalize(System::block_number()); + } + } +} + +pub(crate) fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} diff --git a/frame/root-offences/src/tests.rs b/frame/root-offences/src/tests.rs new file mode 100644 index 0000000000000..a8b7d0a6d6aca --- /dev/null +++ b/frame/root-offences/src/tests.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::{assert_err, assert_ok}; +use mock::{active_era, start_session, Balances, ExtBuilder, RootOffences, RuntimeOrigin, System}; + +#[test] +fn create_offence_fails_given_signed_origin() { + use sp_runtime::traits::BadOrigin; + ExtBuilder::default().build_and_execute(|| { + let offenders = (&[]).to_vec(); + assert_err!(RootOffences::create_offence(RuntimeOrigin::signed(1), offenders), BadOrigin); + }) +} + +#[test] +fn create_offence_works_given_root_origin() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + assert_eq!(Balances::free_balance(11), 1000); + + let offenders = [(11, Perbill::from_percent(50))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + // the slash should be applied right away. + assert_eq!(Balances::free_balance(11), 500); + + // the other validator should keep his balance, because we only created + // an offences for the first validator. + assert_eq!(Balances::free_balance(21), 1000); + }) +} + +#[test] +fn create_offence_wont_slash_non_active_validators() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + // 31 is not an active validator. + assert_eq!(Balances::free_balance(31), 500); + + let offenders = [(31, Perbill::from_percent(20)), (11, Perbill::from_percent(20))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // so 31 didn't get slashed. + assert_eq!(Balances::free_balance(31), 500); + + // but 11 is an active validator so he got slashed. + assert_eq!(Balances::free_balance(11), 800); + }) +} + +#[test] +fn create_offence_wont_slash_idle() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + // 41 is idle. + assert_eq!(Balances::free_balance(41), 1000); + + let offenders = [(41, Perbill::from_percent(50))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // 41 didn't get slashed. + assert_eq!(Balances::free_balance(41), 1000); + }) +} From e7f994d1e797420f252dd24714b029071ccbc46c Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 28 Sep 2022 22:52:16 +0200 Subject: [PATCH 24/42] bounding staking: `BoundedElectionProvider` trait (#12362) * add a bounded election provider trait * extract common trait election provider base * fmt * only bound the outer support vector * fix tests * docs * fix rust docs * fmt * fix rustdocs * docs * improve docs * small doc change --- .../election-provider-multi-phase/src/lib.rs | 16 ++-- .../election-provider-multi-phase/src/mock.rs | 5 +- frame/election-provider-support/src/lib.rs | 83 ++++++++++++------- .../election-provider-support/src/onchain.rs | 27 +++--- frame/fast-unstake/src/lib.rs | 5 +- frame/fast-unstake/src/mock.rs | 4 +- primitives/npos-elections/src/lib.rs | 14 ++-- 7 files changed, 99 insertions(+), 55 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index bba8139f38f44..649aec30c58b3 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -231,7 +231,8 @@ use codec::{Decode, Encode}; use frame_election_provider_support::{ - ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, + ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, + NposSolution, }; use frame_support::{ dispatch::DispatchClass, @@ -289,7 +290,7 @@ pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex pub type SolutionAccuracyOf = ::MinerConfig> as NposSolution>::Accuracy; /// The fallback election type. -pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; +pub type FallbackErrorOf = <::Fallback as ElectionProviderBase>::Error; /// Configuration for the benchmarks of the pallet. pub trait BenchmarkingConfig { @@ -312,7 +313,7 @@ pub trait BenchmarkingConfig { /// A fallback implementation that transitions the pallet to the emergency phase. pub struct NoFallback(sp_std::marker::PhantomData); -impl ElectionProvider for NoFallback { +impl ElectionProviderBase for NoFallback { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type DataProvider = T::DataProvider; @@ -321,7 +322,9 @@ impl ElectionProvider for NoFallback { fn ongoing() -> bool { false } +} +impl ElectionProvider for NoFallback { fn elect() -> Result, Self::Error> { // Do nothing, this will enable the emergency phase. Err("NoFallback.") @@ -1563,7 +1566,7 @@ impl Pallet { >::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - T::Fallback::elect() + ::elect() .map(|supports| ReadySolution { supports, score: Default::default(), @@ -1598,7 +1601,7 @@ impl Pallet { } } -impl ElectionProvider for Pallet { +impl ElectionProviderBase for Pallet { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type Error = ElectionError; @@ -1610,7 +1613,9 @@ impl ElectionProvider for Pallet { _ => true, } } +} +impl ElectionProvider for Pallet { fn elect() -> Result, Self::Error> { match Self::do_elect() { Ok(supports) => { @@ -1627,7 +1632,6 @@ impl ElectionProvider for Pallet { } } } - /// convert a DispatchError to a custom InvalidTransaction with the inner code being the error /// number. pub fn dispatch_error_to_invalid(error: DispatchError) -> InvalidTransaction { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 34aa2e1bbfc58..c1c53a3980676 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -297,7 +297,7 @@ impl onchain::Config for OnChainSeqPhragmen { } pub struct MockFallback; -impl ElectionProvider for MockFallback { +impl ElectionProviderBase for MockFallback { type AccountId = AccountId; type BlockNumber = u64; type Error = &'static str; @@ -306,7 +306,8 @@ impl ElectionProvider for MockFallback { fn ongoing() -> bool { false } - +} +impl ElectionProvider for MockFallback { fn elect() -> Result, Self::Error> { Self::elect_with_bounds(Bounded::max_value(), Bounded::max_value()) } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 0bf62bd8c35cd..5ee65e102bd06 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -20,10 +20,11 @@ //! This crate provides two traits that could interact to enable extensible election functionality //! within FRAME pallets. //! -//! Something that will provide the functionality of election will implement [`ElectionProvider`], -//! whilst needing an associated [`ElectionProvider::DataProvider`], which needs to be fulfilled by -//! an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* the receiver -//! of the election, resulting in a diagram as below: +//! Something that will provide the functionality of election will implement +//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an +//! associated [`ElectionProviderBase::DataProvider`], which needs to be +//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* +//! the receiver of the election, resulting in a diagram as below: //! //! ```ignore //! ElectionDataProvider @@ -131,12 +132,16 @@ //! type DataProvider: ElectionDataProvider; //! } //! -//! impl ElectionProvider for GenericElectionProvider { +//! impl ElectionProviderBase for GenericElectionProvider { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; //! type Error = &'static str; //! type DataProvider = T::DataProvider; //! fn ongoing() -> bool { false } +//! +//! } +//! +//! impl ElectionProvider for GenericElectionProvider { //! fn elect() -> Result, Self::Error> { //! Self::DataProvider::electable_targets(None) //! .map_err(|_| "failed to elect") @@ -177,8 +182,8 @@ pub use frame_support::{traits::Get, weights::Weight, BoundedVec, RuntimeDebug}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128, - Support, Supports, VoteWeight, + Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance, + IdentifierT, PerThing128, Support, Supports, VoteWeight, }; pub use traits::NposSolution; @@ -349,12 +354,12 @@ pub trait ElectionDataProvider { fn clear() {} } -/// Something that can compute the result of an election and pass it back to the caller. +/// Base trait for [`ElectionProvider`] and [`BoundedElectionProvider`]. It is +/// meant to be used only with an extension trait that adds an election +/// functionality. /// -/// This trait only provides an interface to _request_ an election, i.e. -/// [`ElectionProvider::elect`]. That data required for the election need to be passed to the -/// implemented of this trait through [`ElectionProvider::DataProvider`]. -pub trait ElectionProvider { +/// Data can be bounded or unbounded and is fetched from [`Self::DataProvider`]. +pub trait ElectionProviderBase { /// The account identifier type. type AccountId; @@ -372,24 +377,39 @@ pub trait ElectionProvider { /// Indicate if this election provider is currently ongoing an asynchronous election or not. fn ongoing() -> bool; +} - /// Elect a new set of winners, without specifying any bounds on the amount of data fetched from - /// [`Self::DataProvider`]. An implementation could nonetheless impose its own custom limits. - /// - /// The result is returned in a target major format, namely as *vector of supports*. - /// - /// This should be implemented as a self-weighing function. The implementor should register its - /// appropriate weight at the end of execution with the system pallet directly. +/// Elect a new set of winners, bounded by `MaxWinners`. +/// +/// Returns a result in bounded, target major format, namely as +/// *BoundedVec<(AccountId, Vec), MaxWinners>*. +pub trait BoundedElectionProvider: ElectionProviderBase { + /// The upper bound on election winners. + type MaxWinners: Get; + /// Performs the election. This should be implemented as a self-weighing function. The + /// implementor should register its appropriate weight at the end of execution with the + /// system pallet directly. + fn elect() -> Result, Self::Error>; +} + +/// Same a [`BoundedElectionProvider`], but no bounds are imposed on the number +/// of winners. +/// +/// The result is returned in a target major format, namely as +///*Vec<(AccountId, Vec)>*. +pub trait ElectionProvider: ElectionProviderBase { + /// Performs the election. This should be implemented as a self-weighing + /// function, similar to [`BoundedElectionProvider::elect()`]. fn elect() -> Result, Self::Error>; } -/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure an election needs to -/// happen instantly, not asynchronously. +/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure +/// an election needs to happen instantly, not asynchronously. /// /// The same `DataProvider` is assumed to be used. /// -/// Consequently, allows for control over the amount of data that is being fetched from the -/// [`ElectionProvider::DataProvider`]. +/// Consequently, allows for control over the amount of data that is being +/// fetched from the [`ElectionProviderBase::DataProvider`]. pub trait InstantElectionProvider: ElectionProvider { /// Elect a new set of winners, but unlike [`ElectionProvider::elect`] which cannot enforce /// bounds, this trait method can enforce bounds on the amount of data provided by the @@ -410,7 +430,7 @@ pub trait InstantElectionProvider: ElectionProvider { pub struct NoElection(sp_std::marker::PhantomData); #[cfg(feature = "std")] -impl ElectionProvider +impl ElectionProviderBase for NoElection<(AccountId, BlockNumber, DataProvider)> where DataProvider: ElectionDataProvider, @@ -420,15 +440,22 @@ where type Error = &'static str; type DataProvider = DataProvider; - fn elect() -> Result, Self::Error> { - Err(" cannot do anything.") - } - fn ongoing() -> bool { false } } +#[cfg(feature = "std")] +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider)> +where + DataProvider: ElectionDataProvider, +{ + fn elect() -> Result, Self::Error> { + Err(" cannot do anything.") + } +} + /// A utility trait for something to implement `ElectionDataProvider` in a sensible way. /// /// This is generic over `AccountId` and it can represent a validator, a nominator, or any other diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 10c3519d03df6..88aa6ca7267a0 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,7 +20,8 @@ //! careful when using it onchain. use crate::{ - Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolver, WeightInfo, + Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, + NposSolver, WeightInfo, }; use frame_support::{dispatch::DispatchClass, traits::Get}; use sp_npos_elections::*; @@ -133,15 +134,6 @@ fn elect_with( } impl ElectionProvider for UnboundedExecution { - type AccountId = ::AccountId; - type BlockNumber = ::BlockNumber; - type Error = Error; - type DataProvider = T::DataProvider; - - fn ongoing() -> bool { - false - } - fn elect() -> Result, Self::Error> { // This should not be called if not in `std` mode (and therefore neither in genesis nor in // testing) @@ -156,6 +148,17 @@ impl ElectionProvider for UnboundedExecution { } } +impl ElectionProviderBase for UnboundedExecution { + type AccountId = ::AccountId; + type BlockNumber = ::BlockNumber; + type Error = Error; + type DataProvider = T::DataProvider; + + fn ongoing() -> bool { + false + } +} + impl InstantElectionProvider for UnboundedExecution { fn elect_with_bounds( max_voters: usize, @@ -165,7 +168,7 @@ impl InstantElectionProvider for UnboundedExecution { } } -impl ElectionProvider for BoundedExecution { +impl ElectionProviderBase for BoundedExecution { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; type Error = Error; @@ -174,7 +177,9 @@ impl ElectionProvider for BoundedExecution { fn ongoing() -> bool { false } +} +impl ElectionProvider for BoundedExecution { fn elect() -> Result, Self::Error> { elect_with::(Some(T::VotersBound::get() as usize), Some(T::TargetsBound::get() as usize)) } diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index ed26d6b436e1d..8fdb7a79dd537 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -80,7 +80,7 @@ macro_rules! log { pub mod pallet { use super::*; use crate::types::*; - use frame_election_provider_support::ElectionProvider; + use frame_election_provider_support::ElectionProviderBase; use frame_support::{ pallet_prelude::*, traits::{Defensive, ReservableCurrency}, @@ -330,7 +330,8 @@ pub mod pallet { } } - if ::ElectionProvider::ongoing() { + if <::ElectionProvider as ElectionProviderBase>::ongoing() + { // NOTE: we assume `ongoing` does not consume any weight. // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 4c4c5f9ff26fd..dc2c694d52956 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -104,7 +104,7 @@ parameter_types! { } pub struct MockElection; -impl frame_election_provider_support::ElectionProvider for MockElection { +impl frame_election_provider_support::ElectionProviderBase for MockElection { type AccountId = AccountId; type BlockNumber = BlockNumber; type DataProvider = Staking; @@ -113,7 +113,9 @@ impl frame_election_provider_support::ElectionProvider for MockElection { fn ongoing() -> bool { Ongoing::get() } +} +impl frame_election_provider_support::ElectionProvider for MockElection { fn elect() -> Result, Self::Error> { Err(()) } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index dd2a9bf198f8d..514ded67ad38b 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -74,17 +74,16 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; -use sp_core::RuntimeDebug; +use sp_core::{bounded::BoundedVec, RuntimeDebug}; use sp_std::{ cell::RefCell, cmp::Ordering, collections::btree_map::BTreeMap, prelude::*, rc::Rc, vec, }; -use codec::{Decode, Encode, MaxEncodedLen}; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - #[cfg(test)] mod mock; #[cfg(test)] @@ -451,6 +450,11 @@ impl Default for Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; +/// Same as `Supports` bounded by `MaxWinners`. +/// +/// To note, the inner `Support` is still unbounded. +pub type BoundedSupports = BoundedVec<(A, Support), MaxWinners>; + /// Linkage from a winner to their [`Support`]. /// /// This is more helpful than a normal [`Supports`] as it allows faster error checking. From 427fd09bcb193c1e79dec85b1e207c718b686c35 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 29 Sep 2022 09:28:22 +0300 Subject: [PATCH 25/42] BEEFY: impl TypeInfo for SignedCommitment (#12382) --- primitives/beefy/src/commitment.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 4880d4b69ab01..0e22c8d56d937 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -16,6 +16,7 @@ // limitations under the License. use codec::{Decode, Encode, Error, Input}; +use scale_info::TypeInfo; use sp_std::{cmp, prelude::*}; use crate::ValidatorSetId; @@ -39,7 +40,7 @@ pub mod known_payload_ids { /// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected /// value. Duplicated identifiers are disallowed. It's okay for different implementations to only /// support a subset of possible values. -#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash)] +#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] pub struct Payload(Vec<(BeefyPayloadId, Vec)>); impl Payload { @@ -80,7 +81,7 @@ impl Payload { /// height [block_number](Commitment::block_number). /// GRANDPA validators collect signatures on commitments and a stream of such signed commitments /// (see [SignedCommitment]) forms the BEEFY protocol. -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo)] pub struct Commitment { /// A collection of payloads to be signed, see [`Payload`] for details. /// @@ -138,7 +139,7 @@ where /// Note that SCALE-encoding of the structure is optimized for size efficiency over the wire, /// please take a look at custom [`Encode`] and [`Decode`] implementations and /// `CompactSignedCommitment` struct. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, TypeInfo)] pub struct SignedCommitment { /// The commitment signatures are collected for. pub commitment: Commitment, From 61b9a4d1a8a9bf39c1d89a8dd02f82785c10860c Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 29 Sep 2022 23:48:10 +0800 Subject: [PATCH 26/42] Properly set the max proof size weight on defaults and tests (#12383) * Properly set the max proof size weight on defaults and tests * cargo fmt * Set proper max proof size for contracts pallet tests * Properly set max proof size for node * Properly set max proof size for frame system mock * Update test expectations * Update test expectations * Properly set max proof size for balances mock * Update test expectations * Update test expectations * Properly set max proof size for democracy mock * Properly set max proof size for scheduler mock * Properly set max proof size for fast unstake mock * Properly set max proof size for tx payment mock * Properly set max proof size for elections phragmen mock * Properly set max proof size for node template --- bin/node-template/runtime/src/lib.rs | 7 +- bin/node/runtime/src/impls.rs | 2 +- bin/node/runtime/src/lib.rs | 4 +- frame/balances/src/tests_composite.rs | 4 +- frame/balances/src/tests_local.rs | 4 +- frame/balances/src/tests_reentrancy.rs | 4 +- frame/contracts/src/tests.rs | 14 +- frame/democracy/src/tests.rs | 4 +- .../election-provider-multi-phase/src/lib.rs | 9 +- .../election-provider-multi-phase/src/mock.rs | 7 +- .../src/signed.rs | 9 +- .../src/unsigned.rs | 292 +++++++++++++++--- frame/elections-phragmen/src/lib.rs | 4 +- frame/executive/src/lib.rs | 5 +- frame/fast-unstake/src/mock.rs | 4 +- frame/grandpa/src/tests.rs | 3 +- frame/scheduler/src/mock.rs | 4 +- frame/system/src/extensions/check_weight.rs | 20 +- frame/system/src/limits.rs | 24 +- frame/system/src/mock.rs | 2 +- .../asset-tx-payment/src/tests.rs | 2 +- frame/transaction-payment/src/lib.rs | 2 +- 22 files changed, 323 insertions(+), 107 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index f801068b10fda..1d0e18d31bf80 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -139,8 +139,11 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. - pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2u64 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + NORMAL_DISPATCH_RATIO, + ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index fb2f3cec65290..0f9ed6e275196 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -224,7 +224,7 @@ mod multiplier_tests { fn multiplier_can_grow_from_zero() { // if the min is too small, then this will not change, and we are doomed forever. // the weight is 1/100th bigger than target. - run_with_system_weight(target() * 101 / 100, || { + run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || { let next = runtime_multiplier_update(min_multiplier()); assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier()); }) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index aa1a525bf095c..5e4fdb4748d15 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -170,8 +170,8 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2); +/// We allow for 2 seconds of compute with a 6 second average block time, with maximum proof size. +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2).set_proof_size(u64::MAX); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 1e38d611773d4..f8a8fdd1851d4 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -47,7 +47,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index e080eafb66067..152a5da37410f 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -48,7 +48,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index fa2eb0e488e7d..90363140000e8 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -51,7 +51,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a56e4f5564845..e5893c3dbd112 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -279,7 +279,9 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 1; } impl frame_system::Config for Test { @@ -413,7 +415,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000); +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(u64::MAX); pub struct ExtBuilder { existential_deposit: u64, @@ -628,7 +630,7 @@ fn deposit_event_max_value_limit() { RuntimeOrigin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT * 2, // we are copying a huge buffer, + GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, None, ::Schedule::get().limits.payload_len.encode(), )); @@ -769,7 +771,7 @@ fn storage_max_value_limit() { RuntimeOrigin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT * 2, // we are copying a huge buffer + GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer None, ::Schedule::get().limits.payload_len.encode(), )); @@ -2543,7 +2545,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required), + Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2613,7 +2615,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required), + Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), None, call.encode(), false, diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 17b35ee3c38cd..03d7216fd5aaa 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -78,7 +78,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_ref_time(1_000_000).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 649aec30c58b3..fb17bd25ea541 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1011,10 +1011,8 @@ pub mod pallet { // unlikely to ever return an error: if phase is signed, snapshot will exist. let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; - // TODO: account for proof size weight ensure!( - Self::solution_weight_of(&raw_solution, size).ref_time() < - T::SignedMaxWeight::get().ref_time(), + Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), Error::::SignedTooMuchWeight, ); @@ -2342,9 +2340,8 @@ mod tests { }; let mut active = 1; - // TODO: account for proof size weight - while weight_with(active).ref_time() <= - ::BlockWeights::get().max_block.ref_time() || + while weight_with(active) + .all_lte(::BlockWeights::get().max_block) || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index c1c53a3980676..d3082be0cf750 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -26,7 +26,7 @@ pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; use frame_support::{ bounded_vec, parameter_types, traits::{ConstU32, Hooks}, - weights::Weight, + weights::{constants, Weight}, BoundedVec, }; use multi_phase::unsigned::{IndexAssignmentOf, VoterOf}; @@ -227,7 +227,10 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + ::with_sensible_defaults( + Weight::from_components(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + NORMAL_DISPATCH_RATIO, + ); } impl pallet_balances::Config for Runtime { diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 1cf071e6796f1..2e01d99be0a42 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -957,7 +957,7 @@ mod tests { #[test] fn cannot_consume_too_much_future_weight() { ExtBuilder::default() - .signed_weight(Weight::from_ref_time(40)) + .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(15); @@ -973,11 +973,14 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); - assert_eq!(::SignedMaxWeight::get(), Weight::from_ref_time(40)); + assert_eq!( + ::SignedMaxWeight::get(), + Weight::from_ref_time(40).set_proof_size(u64::MAX) + ); assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw.clone()))); - ::set(Weight::from_ref_time(30)); + ::set(Weight::from_ref_time(30).set_proof_size(u64::MAX)); // note: resubmitting the same solution is technically okay as long as the queue has // space. diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 281ac37421174..025ff832bb08a 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -638,8 +638,7 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - // TODO: account for proof size weight - if current_weight.ref_time() < max_weight.ref_time() { + if current_weight.all_lt(max_weight) { let next_voters = voters.checked_add(step); match next_voters { Some(voters) if voters < max_voters => Ok(voters), @@ -674,8 +673,7 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - // TODO: account for proof size weight - while voters < max_voters && weight_with(voters + 1).ref_time() < max_weight.ref_time() { + while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { voters += 1; } while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { @@ -683,9 +681,8 @@ impl Miner { } let final_decision = voters.min(size.voters); - // TODO: account for proof size weight debug_assert!( - weight_with(final_decision).ref_time() <= max_weight.ref_time(), + weight_with(final_decision).all_lte(max_weight), "weight_with({}) <= {}", final_decision, max_weight, @@ -703,151 +700,346 @@ mod max_weight { fn find_max_voter_binary_search_works() { let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; MockWeightInfo::set(crate::mock::MockedWeightInfo::Complex); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::zero()), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::zero().set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1990).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2990).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2999).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3000).set_proof_size(u64::MAX) + ), 3 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 3 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(5500)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(5500).set_proof_size(u64::MAX) + ), 5 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(7777)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(7777).set_proof_size(u64::MAX) + ), 7 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(9999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(9999).set_proof_size(u64::MAX) + ), 9 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(10_000).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(10_999).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(11_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(11_000).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(22_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(22_000).set_proof_size(u64::MAX) + ), 10 ); let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(0).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1990).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 1 ); let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(0).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 2 ); } @@ -1131,7 +1323,7 @@ mod tests { #[test] fn miner_trims_weight() { ExtBuilder::default() - .miner_weight(Weight::from_ref_time(100)) + .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(25); @@ -1149,7 +1341,7 @@ mod tests { assert_eq!(raw.solution.voter_count(), 5); // now reduce the max weight - ::set(Weight::from_ref_time(25)); + ::set(Weight::from_ref_time(25).set_proof_size(u64::MAX)); let (raw, witness) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 0616087d975e8..165a8fcab429b 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1174,7 +1174,9 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Test { diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 014c7a2bc02a6..b7884efccf685 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -459,8 +459,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - // TODO: account for proof size weight - if remaining_weight.ref_time() > 0 { + if remaining_weight.all_gt(Weight::zero()) { let used_weight = >::on_idle( block_number, remaining_weight, @@ -768,7 +767,7 @@ mod tests { frame_system::limits::BlockWeights::builder() .base_block(Weight::from_ref_time(10)) .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_ref_time(5)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).into()) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into()) .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index dc2c694d52956..71fc2d4ba905a 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -32,7 +32,9 @@ pub type T = Runtime; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Runtime { diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 5d2ebdf29cb6b..626decd12821e 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,8 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - // TODO: account for proof size weight - assert!(info.weight.ref_time() > 0); + assert!(info.weight.any_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 6f6667590a6c3..6aaad13e48183 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -118,7 +118,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_ref_time(2_000_000_000_000).set_proof_size(u64::MAX), + ); } impl system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 15a88913cd337..5c3b80f59bfa8 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -310,7 +310,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().all_gt(block_weight_limit())); + assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -367,7 +367,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().all_gt(block_weight_limit())); + assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); } @@ -392,8 +392,8 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); - assert_eq!(System::block_weight().total(), block_weight_limit()); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); + assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -417,8 +417,8 @@ mod tests { // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); - assert_eq!(System::block_weight().total(), block_weight_limit()); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); + assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); } @@ -669,7 +669,7 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(1024 + 768)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); }); @@ -682,11 +682,11 @@ mod tests { .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_ref_time(20)); + w.max_total = Some(Weight::from_ref_time(20).set_proof_size(u64::MAX)); }) .for_class(DispatchClass::Mandatory, |w| { w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_ref_time(5)); + w.reserved = Some(Weight::from_ref_time(5).set_proof_size(u64::MAX)); w.max_total = None; }) .build_or_panic(); @@ -695,7 +695,7 @@ mod tests { DispatchClass::Operational => Weight::from_ref_time(10), DispatchClass::Mandatory => Weight::zero(), }); - assert_eq!(maximum_weight.max_block, all_weight.total()); + assert_eq!(maximum_weight.max_block, all_weight.total().set_proof_size(u64::MAX)); // fits into reserved let mandatory1 = DispatchInfo { diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index cfc1d261baa01..07ad240afe159 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -207,7 +207,10 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1u64 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults( + Weight::from_components(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + DEFAULT_NORMAL_RATIO, + ) } } @@ -224,7 +227,6 @@ impl BlockWeights { } let mut error = ValidationErrors::default(); - // TODO: account for proof size weight in the assertions below for class in DispatchClass::all() { let weights = self.per_class.get(*class); let max_for_class = or_max(weights.max_total); @@ -233,16 +235,18 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.ref_time() > self.base_block.ref_time() && max_for_class.ref_time() > base_for_class.ref_time()) - || max_for_class.ref_time() == 0, + (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) + || max_for_class == Weight::zero(), &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights.max_extrinsic.unwrap_or(Weight::zero()).ref_time() <= - max_for_class.saturating_sub(base_for_class).ref_time(), + weights + .max_extrinsic + .unwrap_or(Weight::zero()) + .all_lte(max_for_class.saturating_sub(base_for_class)), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -251,14 +255,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).ref_time() > 0, + weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.ref_time() > base_for_class.ref_time() || reserved.ref_time() == 0, + reserved.all_gt(base_for_class) || reserved == Weight::zero(), &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -267,7 +271,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.ref_time() >= weights.max_total.unwrap_or(Weight::zero()).ref_time(), + self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -276,7 +280,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.ref_time() > (base_for_class + self.base_block).ref_time(), + self.max_block.all_gt(base_for_class + self.base_block), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index b6fc121612050..d31a1b08667e5 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -41,7 +41,7 @@ frame_support::construct_runtime!( ); const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024); +const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024).set_proof_size(u64::MAX); parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index cdf7d17898145..e775f3aa92990 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -71,7 +71,7 @@ impl Get for BlockWeights { weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).into(); + weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); }) .build_or_panic() } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 1ad6a2b3b3b6f..80297d1a0d362 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -889,7 +889,7 @@ mod tests { weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).into(); + weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); }) .build_or_panic() } From c2026ca6e9b2a24d8ae1a05c5b3784ffa0748946 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 30 Sep 2022 11:14:13 +0800 Subject: [PATCH 27/42] Carry over where clauses defined in Config to Call and Hook (#12388) --- frame/support/procedural/src/pallet/expand/call.rs | 2 +- frame/support/procedural/src/pallet/expand/hooks.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 39d16109aa8fa..6b166e6726d38 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -32,7 +32,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { (span, where_clause, methods, docs) }, - None => (def.item.span(), None, Vec::new(), Vec::new()), + None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()), }; let frame_support = &def.frame_support; let frame_system = &def.frame_system; diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 48d4aec436d40..d8d009cf3c940 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -26,7 +26,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let has_runtime_upgrade = hooks.has_runtime_upgrade; (where_clause, span, has_runtime_upgrade) }, - None => (None, def.pallet_struct.attr_span, false), + None => (def.config.where_clause.clone(), def.pallet_struct.attr_span, false), }; let frame_support = &def.frame_support; From dbb72f3fd98253b72c0090375b738b9d00995090 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 30 Sep 2022 12:06:46 +0200 Subject: [PATCH 28/42] unsafe_pruning flag removed (#12385) --- client/cli/src/config.rs | 11 ----------- client/cli/src/params/import_params.rs | 12 ------------ 2 files changed, 23 deletions(-) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index fad2ec7bc4a93..77689708a231f 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -659,17 +659,6 @@ pub trait CliConfiguration: Sized { } } - if self.import_params().map_or(false, |p| { - #[allow(deprecated)] - p.unsafe_pruning - }) { - // according to https://github.com/substrate/issues/8103; - warn!( - "WARNING: \"--unsafe-pruning\" CLI-flag is deprecated and has no effect. \ - In future builds it will be removed, and providing this flag will lead to an error." - ); - } - Ok(()) } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index c851050838965..3cd9fd83bd31b 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -41,18 +41,6 @@ pub struct ImportParams { #[clap(flatten)] pub database_params: DatabaseParams, - /// THIS IS A DEPRECATED CLI-ARGUMENT. - /// - /// It has been preserved in order to not break the compatibility with the existing scripts. - /// Enabling this option will lead to a runtime warning. - /// In future this option will be removed completely, thus specifying it will lead to a start - /// up error. - /// - /// Details: - #[clap(long)] - #[deprecated = "According to https://github.com/paritytech/substrate/issues/8103"] - pub unsafe_pruning: bool, - /// Method for executing Wasm runtime code. #[clap( long = "wasm-execution", From 952030cfa6f11be6aef938e5359064c4cf6b30a9 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 30 Sep 2022 13:46:48 +0300 Subject: [PATCH 29/42] pallet-mmr: generate historical proofs (#12324) * BEEFY: generate historical proofs Signed-off-by: Serban Iorga * Update frame/merkle-mountain-range/rpc/src/lib.rs Co-authored-by: Adrian Catangiu * Update primitives/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * cargo fmt * fix off-by-one in leaves powerset generation * test all possible mmr sizes for historical proofs * remove now redundant simple_historical_proof * cargo fmt Signed-off-by: Serban Iorga Co-authored-by: Adrian Catangiu Co-authored-by: Robert Hambrock --- bin/node/runtime/src/lib.rs | 39 ++- client/beefy/src/tests.rs | 7 + frame/merkle-mountain-range/rpc/src/lib.rs | 49 ++++ frame/merkle-mountain-range/src/lib.rs | 22 +- frame/merkle-mountain-range/src/tests.rs | 276 +++++++++++++++++++- primitives/merkle-mountain-range/src/lib.rs | 11 +- 6 files changed, 379 insertions(+), 25 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5e4fdb4748d15..4fa4049e22682 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2011,10 +2011,7 @@ impl_runtime_apis! { } } - impl pallet_mmr::primitives::MmrApi< - Block, - mmr::Hash, - > for Runtime { + impl pallet_mmr::primitives::MmrApi for Runtime { fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { @@ -2049,11 +2046,35 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Mmr::generate_batch_proof(leaf_indices) - .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) + fn generate_batch_proof( + leaf_indices: Vec, + ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + Mmr::generate_batch_proof(leaf_indices).map(|(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }) + } + + fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: pallet_mmr::primitives::LeafIndex, + ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + Mmr::generate_historical_batch_proof(leaf_indices, leaves_count).map( + |(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }, + ) } fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 26c85592ecb85..3e49f4e05cc91 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -277,6 +277,13 @@ macro_rules! create_test_api { unimplemented!() } + fn generate_historical_batch_proof( + _leaf_indices: Vec, + _leaves_count: LeafIndex + ) -> Result<(Vec, BatchProof), MmrError> { + unimplemented!() + } + fn verify_batch_proof(_leaves: Vec, _proof: BatchProof) -> Result<(), MmrError> { unimplemented!() } diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 75032d40f492a..e939ff8ae7cd0 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -128,6 +128,31 @@ pub trait MmrApi { leaf_indices: Vec, at: Option, ) -> RpcResult>; + + /// Generate a MMR proof for the given `leaf_indices` of the MMR that had `leaves_count` leaves. + /// + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// a MMR proof for the set of leaves at the given `leaf_indices` with MMR fixed to the state + /// with exactly `leaves_count` leaves. `leaves_count` must be larger than all `leaf_indices` + /// for the function to succeed. + /// + /// Optionally, a block hash at which the runtime should be queried can be specified. + /// Note that specifying the block hash isn't super-useful here, unless you're generating + /// proof using non-finalized blocks where there are several competing forks. That's because + /// MMR state will be fixed to the state with `leaves_count`, which already points to some + /// historical block. + /// + /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of + /// the leaves). Both parameters are SCALE-encoded. + /// The order of entries in the `leaves` field of the returned struct + /// is the same as the order of the entries in `leaf_indices` supplied + #[method(name = "mmr_generateHistoricalBatchProof")] + fn generate_historical_batch_proof( + &self, + leaf_indices: Vec, + leaves_count: LeafIndex, + at: Option, + ) -> RpcResult>; } /// MMR RPC methods. @@ -192,6 +217,30 @@ where Ok(LeafBatchProof::new(block_hash, leaves, proof)) } + + fn generate_historical_batch_proof( + &self, + leaf_indices: Vec, + leaves_count: LeafIndex, + at: Option<::Hash>, + ) -> RpcResult::Hash>> { + let api = self.client.runtime_api(); + let block_hash = at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash); + + let (leaves, proof) = api + .generate_historical_batch_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_indices, + leaves_count, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(LeafBatchProof::new(block_hash, leaves, proof)) + } } /// Converts a mmr-specific error into a [`CallError`]. diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 9f989847af0f9..8b4f2b60bc198 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -330,7 +330,27 @@ impl, I: 'static> Pallet { (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + Self::generate_historical_batch_proof(leaf_indices, Self::mmr_leaves()) + } + + /// Generate a MMR proof for the given `leaf_indices` for the MMR of `leaves_count` size. + /// + /// Note this method can only be used from an off-chain context + /// (Offchain Worker or Runtime API call), since it requires + /// all the leaves to be present. + /// It may return an error or panic if used incorrectly. + pub fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: LeafIndex, + ) -> Result< + (Vec>, primitives::BatchProof<>::Hash>), + primitives::Error, + > { + if leaves_count > Self::mmr_leaves() { + return Err(Error::InvalidLeavesCount) + } + + let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); mmr.generate_batch_proof(leaf_indices) } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index d6886f90a5da7..bcb775ba02819 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -227,7 +227,8 @@ fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| add_blocks(7)); + let num_blocks: u64 = 7; + ext.execute_with(|| add_blocks(num_blocks as usize)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -241,6 +242,23 @@ fn should_generate_proofs_correctly() { crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() }) .collect::>(); + // when generate historical proofs for all leaves + let historical_proofs = (0_u64..crate::NumberOfLeaves::::get()) + .into_iter() + .map(|leaf_index| { + let mut proofs = vec![]; + for leaves_count in leaf_index + 1..=num_blocks { + proofs.push( + crate::Pallet::::generate_historical_batch_proof( + vec![leaf_index], + leaves_count, + ) + .unwrap(), + ) + } + proofs + }) + .collect::>(); // then assert_eq!( @@ -258,6 +276,79 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!( + historical_proofs[0][0], + ( + vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], + BatchProof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } + ) + ); + + // D + // / \ + // / \ + // A B C + // / \ / \ / \ + // 1 2 3 4 5 6 7 + // + // we're proving 3 => we need { 4, A, C++7 } + assert_eq!( + proofs[2], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 7, + items: vec![ + hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), + ], + } + ) + ); + // A + // / \ + // 1 2 3 + // + // we're proving 3 => we need { A } + assert_eq!( + historical_proofs[2][0], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 3, + items: vec![hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" + ),], + } + ) + ); + // D + // / \ + // / \ + // A B + // / \ / \ + // 1 2 3 4 5 + // we're proving 3 => we need { 4, A, 5 } + assert_eq!( + historical_proofs[2][2], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 5, + items: vec![ + hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + hex("3b031d22e24f1126c8f7d2f394b663f9b960ed7abbedb7152e17ce16112656d0") + ], + } + ) + ); + assert_eq!(historical_proofs[2][4], proofs[2]); + assert_eq!( proofs[4], ( @@ -273,6 +364,21 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!( + historical_proofs[4][0], + ( + vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], + BatchProof { + leaf_indices: vec![4], + leaf_count: 5, + items: vec![hex( + "ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252" + ),], + } + ) + ); + assert_eq!(historical_proofs[4][2], proofs[4]); + assert_eq!( proofs[6], ( @@ -287,6 +393,7 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!(historical_proofs[6][0], proofs[6]); }); } @@ -302,9 +409,8 @@ fn should_generate_batch_proof_correctly() { // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - // when generate proofs for all leaves + // when generate proofs for a batch of leaves let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); - // then assert_eq!( proof, @@ -318,6 +424,28 @@ fn should_generate_batch_proof_correctly() { ], } ); + + // when generate historical proofs for a batch of leaves + let (.., historical_proof) = + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap(); + // then + assert_eq!( + historical_proof, + BatchProof { + leaf_indices: vec![0, 4, 5], + leaf_count: 6, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + ], + } + ); + + // when generate historical proofs for a batch of leaves + let (.., historical_proof) = + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 7).unwrap(); + // then + assert_eq!(historical_proof, proof); }); } @@ -338,11 +466,33 @@ fn should_verify() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); + let (simple_historical_leaves, simple_historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + }); + let (advanced_historical_leaves, advanced_historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 7).unwrap() + }); ext.execute_with(|| { add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); + assert_eq!( + crate::Pallet::::verify_leaves( + simple_historical_leaves, + simple_historical_proof5 + ), + Ok(()) + ); + assert_eq!( + crate::Pallet::::verify_leaves( + advanced_historical_leaves, + advanced_historical_proof5 + ), + Ok(()) + ); }); } @@ -350,16 +500,40 @@ fn should_verify() { fn should_verify_batch_proofs() { fn generate_and_verify_batch_proof( ext: &mut sp_io::TestExternalities, - leaves: &Vec, + leaf_indices: &Vec, blocks_to_add: usize, ) { - let (leaves, proof) = ext - .execute_with(|| crate::Pallet::::generate_batch_proof(leaves.to_vec()).unwrap()); + let (leaves, proof) = ext.execute_with(|| { + crate::Pallet::::generate_batch_proof(leaf_indices.to_vec()).unwrap() + }); + + let mmr_size = ext.execute_with(|| crate::Pallet::::mmr_leaves()); + let min_mmr_size = leaf_indices.iter().max().unwrap() + 1; + + // generate historical proofs for all possible mmr sizes, + // lower bound being index of highest leaf to be proven + let historical_proofs = (min_mmr_size..=mmr_size) + .map(|mmr_size| { + ext.execute_with(|| { + crate::Pallet::::generate_historical_batch_proof( + leaf_indices.to_vec(), + mmr_size, + ) + .unwrap() + }) + }) + .collect::>(); ext.execute_with(|| { add_blocks(blocks_to_add); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + historical_proofs.iter().for_each(|(leaves, proof)| { + assert_eq!( + crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), + Ok(()) + ); + }); }) } @@ -378,7 +552,7 @@ fn should_verify_batch_proofs() { ext.persist_offchain_overlay(); // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n - let leaves_set: Vec> = (0..n).into_iter().powerset().skip(1).collect(); + let leaves_set: Vec> = (0..=n).into_iter().powerset().skip(1).collect(); leaves_set.iter().for_each(|leaves_subset| { generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); @@ -393,7 +567,7 @@ fn should_verify_batch_proofs() { ext.persist_offchain_overlay(); // generate all possible 2-leaf combinations for mmr size n - let leaves_set: Vec> = (0..n).into_iter().combinations(2).collect(); + let leaves_set: Vec> = (0..=n).into_iter().combinations(2).collect(); leaves_set.iter().for_each(|leaves_subset| { generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); @@ -414,7 +588,13 @@ fn verification_should_be_stateless() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); + let (root_6, root_7) = ext.execute_with(|| { + add_blocks(6); + let root_6 = crate::Pallet::::mmr_root_hash(); + add_blocks(1); + let root_7 = crate::Pallet::::mmr_root_hash(); + (root_6, root_7) + }); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -424,12 +604,27 @@ fn verification_should_be_stateless() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let (_, historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + }); // Verify proof without relying on any on-chain data. let leaf = crate::primitives::DataOrHash::Data(leaves[0].clone()); assert_eq!( - crate::verify_leaves_proof::<::Hashing, _>(root, vec![leaf], proof5), + crate::verify_leaves_proof::<::Hashing, _>( + root_7, + vec![leaf.clone()], + proof5 + ), + Ok(()) + ); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>( + root_6, + vec![leaf], + historical_proof5 + ), Ok(()) ); } @@ -441,7 +636,13 @@ fn should_verify_batch_proof_statelessly() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); + let (root_6, root_7) = ext.execute_with(|| { + add_blocks(6); + let root_6 = crate::Pallet::::mmr_root_hash(); + add_blocks(1); + let root_7 = crate::Pallet::::mmr_root_hash(); + (root_6, root_7) + }); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -451,12 +652,15 @@ fn should_verify_batch_proof_statelessly() { // when crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let (historical_leaves, historical_proof) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap() + }); // Verify proof without relying on any on-chain data. assert_eq!( crate::verify_leaves_proof::<::Hashing, _>( - root, + root_7, leaves .into_iter() .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) @@ -465,6 +669,17 @@ fn should_verify_batch_proof_statelessly() { ), Ok(()) ); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>( + root_6, + historical_leaves + .into_iter() + .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) + .collect(), + historical_proof + ), + Ok(()) + ); } #[test] @@ -721,3 +936,36 @@ fn should_verify_canonicalized() { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); }); } + +#[test] +fn does_not_panic_when_generating_historical_proofs() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + + // given 7 blocks (7 MMR leaves) + ext.execute_with(|| add_blocks(7)); + ext.persist_offchain_overlay(); + + // Try to generate historical proof with invalid arguments. This requires the offchain + // extensions to be present to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when leaf index is invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![10], 7), + Err(Error::LeafNotFound), + ); + + // when leaves count is invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![3], 100), + Err(Error::InvalidLeavesCount), + ); + + // when both leaf index and leaves count are invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![10], 100), + Err(Error::InvalidLeavesCount), + ); + }); +} diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 29a7e3d1a6fb6..c40a594739ec1 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -402,6 +402,8 @@ pub enum Error { PalletNotIncluded, /// Cannot find the requested leaf index InvalidLeafIndex, + /// The provided leaves count is larger than the actual leaves count. + InvalidLeavesCount, } impl Error { @@ -455,7 +457,14 @@ sp_api::decl_runtime_apis! { fn mmr_root() -> Result; /// Generate MMR proof for a series of leaves under given indices. - fn generate_batch_proof(leaf_indices: Vec) -> Result<(Vec, BatchProof), Error>; + fn generate_batch_proof(leaf_indices: Vec) + -> Result<(Vec, BatchProof), Error>; + + /// Generate MMR proof for a series of leaves under given indices, using MMR at given `leaves_count` size. + fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: LeafIndex + ) -> Result<(Vec, BatchProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// From 37664fe5b3513eb996225f016eceaf74963b8133 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Sun, 2 Oct 2022 17:16:45 +0200 Subject: [PATCH 30/42] Remove contracts RPCs (#12358) * Remove contracts RPCs * Remove serde as RPC serialization is no longer needed * Rename folder to match crate name * Compile fix * Remove Byte wrapper --- Cargo.lock | 26 +- Cargo.toml | 4 +- bin/node/rpc/Cargo.toml | 1 - bin/node/rpc/src/lib.rs | 3 - bin/node/runtime/Cargo.toml | 6 +- bin/node/runtime/src/lib.rs | 2 +- frame/contracts/Cargo.toml | 2 +- .../{common => primitives}/Cargo.toml | 8 - .../{common => primitives}/README.md | 0 .../{common => primitives}/src/lib.rs | 91 +-- frame/contracts/rpc/Cargo.toml | 30 - frame/contracts/rpc/README.md | 3 - frame/contracts/rpc/runtime-api/README.md | 7 - frame/contracts/rpc/src/lib.rs | 524 ------------------ .../{rpc => }/runtime-api/Cargo.toml | 12 +- frame/contracts/runtime-api/README.md | 7 + .../{rpc => }/runtime-api/src/lib.rs | 6 +- frame/contracts/src/exec.rs | 27 +- frame/contracts/src/lib.rs | 8 +- frame/contracts/src/tests.rs | 15 +- frame/contracts/src/wasm/mod.rs | 103 ++-- frame/contracts/src/wasm/runtime.rs | 10 +- 22 files changed, 103 insertions(+), 792 deletions(-) rename frame/contracts/{common => primitives}/Cargo.toml (70%) rename frame/contracts/{common => primitives}/README.md (100%) rename frame/contracts/{common => primitives}/src/lib.rs (74%) delete mode 100644 frame/contracts/rpc/Cargo.toml delete mode 100644 frame/contracts/rpc/README.md delete mode 100644 frame/contracts/rpc/runtime-api/README.md delete mode 100644 frame/contracts/rpc/src/lib.rs rename frame/contracts/{rpc => }/runtime-api/Cargo.toml (78%) create mode 100644 frame/contracts/runtime-api/README.md rename frame/contracts/{rpc => }/runtime-api/src/lib.rs (94%) diff --git a/Cargo.lock b/Cargo.lock index de50d4ec27105..723a09ee9a39f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3371,7 +3371,7 @@ dependencies = [ "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", + "pallet-contracts-runtime-api", "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", @@ -4825,7 +4825,6 @@ version = "3.0.0-dev" dependencies = [ "jsonrpsee", "node-primitives", - "pallet-contracts-rpc", "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "sc-chain-spec", @@ -5530,10 +5529,6 @@ version = "6.0.0" dependencies = [ "bitflags", "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-rpc", "sp-runtime", "sp-std", ] @@ -5548,24 +5543,7 @@ dependencies = [ ] [[package]] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -dependencies = [ - "jsonrpsee", - "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", - "parity-scale-codec", - "serde", - "serde_json", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-rpc", - "sp-runtime", -] - -[[package]] -name = "pallet-contracts-rpc-runtime-api" +name = "pallet-contracts-runtime-api" version = "4.0.0-dev" dependencies = [ "pallet-contracts-primitives", diff --git a/Cargo.toml b/Cargo.toml index 25f12a2c9fd3f..02bc6aede8669 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,8 @@ members = [ "frame/child-bounties", "frame/collective", "frame/contracts", - "frame/contracts/rpc", - "frame/contracts/rpc/runtime-api", + "frame/contracts/primitives", + "frame/contracts/runtime-api", "frame/conviction-voting", "frame/democracy", "frame/fast-unstake", diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 0b69ae27010fa..1f93feabf2f1e 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } -pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 0e6b04087fa63..94e01619c6e63 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -108,7 +108,6 @@ where + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, @@ -118,7 +117,6 @@ where B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use pallet_contracts_rpc::{Contracts, ContractsApiServer}; use pallet_mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; @@ -150,7 +148,6 @@ where // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.merge(Contracts::new(client.clone()).into_rpc())?; io.merge(Mmr::new(client.clone()).into_rpc())?; io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge( diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index e722024231651..ac3afc19da50f 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -61,8 +61,8 @@ pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/child-bounties" } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } +pallet-contracts-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/runtime-api/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } @@ -139,7 +139,7 @@ std = [ "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", - "pallet-contracts-rpc-runtime-api/std", + "pallet-contracts-runtime-api/std", "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4fa4049e22682..f0c68b5b225cd 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1942,7 +1942,7 @@ impl_runtime_apis! { } } - impl pallet_contracts_rpc_runtime_api::ContractsApi< + impl pallet_contracts_runtime_api::ContractsApi< Block, AccountId, Balance, BlockNumber, Hash, > for Runtime diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 30fbad680ebe5..7c3b677e06436 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -36,7 +36,7 @@ rand_pcg = { version = "0.3", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "common" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "primitives" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/primitives/Cargo.toml similarity index 70% rename from frame/contracts/common/Cargo.toml rename to frame/contracts/primitives/Cargo.toml index 49d7973ab155f..64e332007350b 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -15,23 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1", features = ["derive"], optional = true } # Substrate Dependencies (This crate should not rely on frame) -sp-core = { version = "6.0.0", path = "../../../primitives/core", default-features = false } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } -sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc", optional = true } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] std = [ "codec/std", - "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", - "sp-rpc", - "serde", ] diff --git a/frame/contracts/common/README.md b/frame/contracts/primitives/README.md similarity index 100% rename from frame/contracts/common/README.md rename to frame/contracts/primitives/README.md diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/primitives/src/lib.rs similarity index 74% rename from frame/contracts/common/src/lib.rs rename to frame/contracts/primitives/src/lib.rs index f810725afcd36..5daf875ac2651 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -21,32 +21,16 @@ use bitflags::bitflags; use codec::{Decode, Encode}; -use sp_core::Bytes; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; use sp_std::prelude::*; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "std")] -use sp_rpc::number::NumberOrHex; - /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "R: Serialize, Balance: Copy + Into"), - bound(deserialize = "R: Deserialize<'de>, Balance: TryFrom") - ) -)] pub struct ContractResult { /// How much gas was consumed during execution. pub gas_consumed: u64, @@ -80,7 +64,6 @@ pub struct ContractResult { /// /// The debug message is never generated during on-chain execution. It is reserved for /// RPC calls. - #[cfg_attr(feature = "std", serde(with = "as_string"))] pub debug_message: Vec, /// The execution result of the wasm code. pub result: R, @@ -113,8 +96,6 @@ pub enum ContractAccessError { bitflags! { /// Flags used by a contract to customize exit behaviour. #[derive(Encode, Decode)] - #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[cfg_attr(feature = "std", serde(rename_all = "camelCase", transparent))] pub struct ReturnFlags: u32 { /// If this bit is set all changes made by the contract execution are rolled back. const REVERT = 0x0000_0001; @@ -123,13 +104,11 @@ bitflags! { /// Output of a contract call or instantiation which ran to completion. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct ExecReturnValue { /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. - pub data: Bytes, + pub data: Vec, } impl ExecReturnValue { @@ -141,8 +120,6 @@ impl ExecReturnValue { /// The result of a successful contract instantiation. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct InstantiateReturnValue { /// The output of the called constructor. pub result: ExecReturnValue, @@ -152,63 +129,40 @@ pub struct InstantiateReturnValue { /// The result of succesfully uploading a contract. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "CodeHash: Serialize, Balance: Copy + Into"), - bound(deserialize = "CodeHash: Deserialize<'de>, Balance: TryFrom") - ) -)] pub struct CodeUploadReturnValue { /// The key under which the new code is stored. pub code_hash: CodeHash, /// The deposit that was reserved at the caller. Is zero when the code already existed. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] pub deposit: Balance, } /// Reference to an existing code hash or a new wasm module. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub enum Code { /// A wasm module as raw bytes. - Upload(Bytes), + Upload(Vec), /// The code hash of an on-chain wasm blob. Existing(Hash), } impl>, Hash> From for Code { fn from(from: T) -> Self { - Code::Upload(Bytes(from.into())) + Code::Upload(from.into()) } } /// The amount of balance that was either charged or refunded in order to pay for storage. #[derive(Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "Balance: Copy + Into"), - bound(deserialize = "Balance: TryFrom") - ) -)] pub enum StorageDeposit { /// The transaction reduced storage consumption. /// /// This means that the specified amount of balance was transferred from the involved /// contracts to the call origin. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] Refund(Balance), /// The transaction increased overall storage usage. /// /// This means that the specified amount of balance was transferred from the call origin /// to the contracts involved. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] Charge(Balance), } @@ -295,42 +249,3 @@ where } } } - -#[cfg(feature = "std")] -mod as_string { - use super::*; - use serde::{ser::Error, Deserializer, Serializer}; - - pub fn serialize(bytes: &Vec, serializer: S) -> Result { - std::str::from_utf8(bytes) - .map_err(|e| S::Error::custom(format!("Debug buffer contains invalid UTF8: {}", e)))? - .serialize(serializer) - } - - pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { - Ok(String::deserialize(deserializer)?.into_bytes()) - } -} - -#[cfg(feature = "std")] -mod as_hex { - use super::*; - use serde::{de::Error as _, Deserializer, Serializer}; - - pub fn serialize(balance: &Balance, serializer: S) -> Result - where - S: Serializer, - Balance: Copy + Into, - { - Into::::into(*balance).serialize(serializer) - } - - pub fn deserialize<'de, D, Balance>(deserializer: D) -> Result - where - D: Deserializer<'de>, - Balance: TryFrom, - { - Balance::try_from(NumberOrHex::deserialize(deserializer)?) - .map_err(|_| D::Error::custom("Cannot decode NumberOrHex to Balance")) - } -} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml deleted file mode 100644 index 7876c7cba40d0..0000000000000 --- a/frame/contracts/rpc/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Node-specific RPC methods for interaction with contracts." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } -serde = { version = "1", features = ["derive"] } - -# Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "6.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } - -[dev-dependencies] -serde_json = "1" diff --git a/frame/contracts/rpc/README.md b/frame/contracts/rpc/README.md deleted file mode 100644 index be6df237bf60d..0000000000000 --- a/frame/contracts/rpc/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Node-specific RPC methods for interaction with contracts. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/README.md b/frame/contracts/rpc/runtime-api/README.md deleted file mode 100644 index d57f29a93bd1d..0000000000000 --- a/frame/contracts/rpc/runtime-api/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Runtime API definition required by Contracts RPC extensions. - -This API should be imported and implemented by the runtime, -of a node that wants to use the custom RPC extension -adding Contracts access methods. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs deleted file mode 100644 index 1df7a5753f77e..0000000000000 --- a/frame/contracts/rpc/src/lib.rs +++ /dev/null @@ -1,524 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Node-specific RPC methods for interaction with contracts. - -#![warn(unused_crate_dependencies)] - -use std::{marker::PhantomData, sync::Arc}; - -use codec::Codec; -use jsonrpsee::{ - core::{async_trait, Error as JsonRpseeError, RpcResult}, - proc_macros::rpc, - types::error::{CallError, ErrorCode, ErrorObject}, -}; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, -}; -use serde::{Deserialize, Serialize}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_core::Bytes; -use sp_rpc::number::NumberOrHex; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; - -pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; - -const RUNTIME_ERROR: i32 = 1; -const CONTRACT_DOESNT_EXIST: i32 = 2; -const KEY_DECODING_FAILED: i32 = 3; - -pub type Weight = u64; - -/// A rough estimate of how much gas a decent hardware consumes per second, -/// using native execution. -/// This value is used to set the upper bound for maximal contract calls to -/// prevent blocking the RPC for too long. -/// -/// As 1 gas is equal to 1 weight we base this on the conducted benchmarks which -/// determined runtime weights: -/// -const GAS_PER_SECOND: Weight = 1_000_000_000_000; - -/// The maximum amount of weight that the call and instantiate rpcs are allowed to consume. -/// This puts a ceiling on the weight limit that is supplied to the rpc as an argument. -const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; - -/// A private newtype for converting `ContractAccessError` into an RPC error. -struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); - -impl From for JsonRpseeError { - fn from(e: ContractAccessError) -> Self { - use pallet_contracts_primitives::ContractAccessError::*; - match e.0 { - DoesntExist => CallError::Custom(ErrorObject::owned( - CONTRACT_DOESNT_EXIST, - "The specified contract doesn't exist.", - None::<()>, - )) - .into(), - KeyDecodingFailed => CallError::Custom(ErrorObject::owned( - KEY_DECODING_FAILED, - "Failed to decode the specified storage key.", - None::<()>, - )) - .into(), - } - } -} - -/// A struct that encodes RPC parameters required for a call to a smart-contract. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct CallRequest { - origin: AccountId, - dest: AccountId, - value: NumberOrHex, - gas_limit: NumberOrHex, - storage_deposit_limit: Option, - input_data: Bytes, -} - -/// A struct that encodes RPC parameters required to instantiate a new smart-contract. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct InstantiateRequest { - origin: AccountId, - value: NumberOrHex, - gas_limit: NumberOrHex, - storage_deposit_limit: Option, - code: Code, - data: Bytes, - salt: Bytes, -} - -/// A struct that encodes RPC parameters required for a call to upload a new code. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct CodeUploadRequest { - origin: AccountId, - code: Bytes, - storage_deposit_limit: Option, -} - -/// Contracts RPC methods. -#[rpc(client, server)] -pub trait ContractsApi -where - Balance: Copy + TryFrom + Into, -{ - /// Executes a call to a contract. - /// - /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. - /// - /// This method is useful for calling getter-like methods on contracts or to dry-run a - /// a contract call in order to determine the `gas_limit`. - #[method(name = "contracts_call")] - fn call( - &self, - call_request: CallRequest, - at: Option, - ) -> RpcResult>; - - /// Instantiate a new contract. - /// - /// This instantiate is performed locally without submitting any transactions. Thus the contract - /// is not actually created. - /// - /// This method is useful for UIs to dry-run contract instantiations. - #[method(name = "contracts_instantiate")] - fn instantiate( - &self, - instantiate_request: InstantiateRequest, - at: Option, - ) -> RpcResult>; - - /// Upload new code without instantiating a contract from it. - /// - /// This upload is performed locally without submitting any transactions. Thus executing this - /// won't change any state. - /// - /// This method is useful for UIs to dry-run code upload. - #[method(name = "contracts_upload_code")] - fn upload_code( - &self, - upload_request: CodeUploadRequest, - at: Option, - ) -> RpcResult>; - - /// Returns the value under a specified storage `key` in a contract given by `address` param, - /// or `None` if it is not set. - #[method(name = "contracts_getStorage")] - fn get_storage( - &self, - address: AccountId, - key: Bytes, - at: Option, - ) -> RpcResult>; -} - -/// Contracts RPC methods. -pub struct Contracts { - client: Arc, - _marker: PhantomData, -} - -impl Contracts { - /// Create new `Contracts` with the given reference to the client. - pub fn new(client: Arc) -> Self { - Self { client, _marker: Default::default() } - } -} - -#[async_trait] -impl - ContractsApiServer< - ::Hash, - <::Header as HeaderT>::Number, - AccountId, - Balance, - Hash, - > for Contracts -where - Block: BlockT, - Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - Client::Api: ContractsRuntimeApi< - Block, - AccountId, - Balance, - <::Header as HeaderT>::Number, - Hash, - >, - AccountId: Codec, - Balance: Codec + Copy + TryFrom + Into, - Hash: Codec, -{ - fn call( - &self, - call_request: CallRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let CallRequest { origin, dest, value, gas_limit, storage_deposit_limit, input_data } = - call_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: u64 = decode_hex(gas_limit, "weight")?; - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - limit_gas(gas_limit)?; - - api.call(&at, origin, dest, value, gas_limit, storage_deposit_limit, input_data.to_vec()) - .map_err(runtime_error_into_rpc_err) - } - - fn instantiate( - &self, - instantiate_request: InstantiateRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let InstantiateRequest { - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data, - salt, - } = instantiate_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: u64 = decode_hex(gas_limit, "weight")?; - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - limit_gas(gas_limit)?; - - api.instantiate( - &at, - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data.to_vec(), - salt.to_vec(), - ) - .map_err(runtime_error_into_rpc_err) - } - - fn upload_code( - &self, - upload_request: CodeUploadRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let CodeUploadRequest { origin, code, storage_deposit_limit } = upload_request; - - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - - api.upload_code(&at, origin, code.to_vec(), storage_deposit_limit) - .map_err(runtime_error_into_rpc_err) - } - - fn get_storage( - &self, - address: AccountId, - key: Bytes, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); - let result = api - .get_storage(&at, address, key.to_vec()) - .map_err(runtime_error_into_rpc_err)? - .map_err(ContractAccessError)? - .map(Bytes); - - Ok(result) - } -} - -/// Converts a runtime trap into an RPC error. -fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> JsonRpseeError { - CallError::Custom(ErrorObject::owned( - RUNTIME_ERROR, - "Runtime error", - Some(format!("{:?}", err)), - )) - .into() -} - -fn decode_hex>(from: H, name: &str) -> RpcResult { - from.try_into().map_err(|_| { - JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!("{:?} does not fit into the {} type", from, name), - None::<()>, - ))) - }) -} - -fn limit_gas(gas_limit: Weight) -> RpcResult<()> { - if gas_limit > GAS_LIMIT { - Err(JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!( - "Requested gas limit is greater than maximum allowed: {} > {}", - gas_limit, GAS_LIMIT - ), - None::<()>, - )))) - } else { - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use pallet_contracts_primitives::{ContractExecResult, ContractInstantiateResult}; - use sp_core::U256; - - fn trim(json: &str) -> String { - json.chars().filter(|c| !c.is_whitespace()).collect() - } - - #[test] - fn call_request_should_serialize_deserialize_properly() { - type Req = CallRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", - "value": "0x112210f4B16c1cb1", - "gasLimit": 1000000000000, - "storageDepositLimit": 5000, - "inputData": "0x8c97db39" - } - "#, - ) - .unwrap(); - assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); - assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); - assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); - } - - #[test] - fn instantiate_request_should_serialize_deserialize_properly() { - type Req = InstantiateRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "value": "0x88", - "gasLimit": 42, - "code": { "existing": "0x1122" }, - "data": "0x4299", - "salt": "0x9988" - } - "#, - ) - .unwrap(); - - assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); - assert_eq!(req.value.into_u256(), 0x88.into()); - assert_eq!(req.gas_limit.into_u256(), 42.into()); - assert_eq!(req.storage_deposit_limit, None); - assert_eq!(&*req.data, [0x42, 0x99].as_ref()); - assert_eq!(&*req.salt, [0x99, 0x88].as_ref()); - let code = match req.code { - Code::Existing(hash) => hash, - _ => panic!("json encoded an existing hash"), - }; - assert_eq!(&code, "0x1122"); - } - - #[test] - fn code_upload_request_should_serialize_deserialize_properly() { - type Req = CodeUploadRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "code": "0x8c97db39", - "storageDepositLimit": 5000 - } - "#, - ) - .unwrap(); - assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); - assert_eq!(&*req.code, [0x8c, 0x97, 0xdb, 0x39].as_ref()); - assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); - } - - #[test] - fn call_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: ContractExecResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "gasConsumed": 5000, - "gasRequired": 8000, - "storageDeposit": {"charge": 42000}, - "debugMessage": "HelloWorld", - "result": { - "Ok": { - "flags": 5, - "data": "0x1234" - } - } - }"#, - ); - test( - r#"{ - "gasConsumed": 3400, - "gasRequired": 5200, - "storageDeposit": {"refund": 12000}, - "debugMessage": "HelloWorld", - "result": { - "Err": "BadOrigin" - } - }"#, - ); - } - - #[test] - fn instantiate_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: ContractInstantiateResult = - serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "gasConsumed": 5000, - "gasRequired": 8000, - "storageDeposit": {"refund": 12000}, - "debugMessage": "HelloWorld", - "result": { - "Ok": { - "result": { - "flags": 5, - "data": "0x1234" - }, - "accountId": "5CiPP" - } - } - }"#, - ); - test( - r#"{ - "gasConsumed": 3400, - "gasRequired": 5200, - "storageDeposit": {"charge": 0}, - "debugMessage": "HelloWorld", - "result": { - "Err": "BadOrigin" - } - }"#, - ); - } - - #[test] - fn code_upload_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: CodeUploadResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "Ok": { - "codeHash": 4711, - "deposit": 99 - } - }"#, - ); - test( - r#"{ - "Err": "BadOrigin" - }"#, - ); - } -} diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/runtime-api/Cargo.toml similarity index 78% rename from frame/contracts/rpc/runtime-api/Cargo.toml rename to frame/contracts/runtime-api/Cargo.toml index bd07d577ec272..05b0e05d4c568 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/runtime-api/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "pallet-contracts-rpc-runtime-api" +name = "pallet-contracts-runtime-api" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "Runtime API definition required by Contracts RPC extensions." +description = "Runtime API definition used to provide dry-run capabilities" readme = "README.md" [package.metadata.docs.rs] @@ -17,10 +17,10 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } # Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../common" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../../primitives/std" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../primitives" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } [features] default = ["std"] diff --git a/frame/contracts/runtime-api/README.md b/frame/contracts/runtime-api/README.md new file mode 100644 index 0000000000000..fed285b23b2ac --- /dev/null +++ b/frame/contracts/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition used to provide dry-run capabilities + +This API should be imported and implemented by the runtime, +of a node that wants to provide clients with dry-run +capabilities. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/runtime-api/src/lib.rs similarity index 94% rename from frame/contracts/rpc/runtime-api/src/lib.rs rename to frame/contracts/runtime-api/src/lib.rs index 9765b37057c7b..79fd20c8c0163 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/runtime-api/src/lib.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Runtime API definition required by Contracts RPC extensions. +//! Runtime API definition used to provide dry-run capabilities. //! //! This API should be imported and implemented by the runtime, -//! of a node that wants to use the custom RPC extension -//! adding Contracts access methods. +//! of a node that wants to provide clients with dry-run +//! capabilities. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 6260dd41de707..bf35410d0bd4b 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1384,7 +1384,6 @@ mod tests { use frame_system::{EventRecord, Phase}; use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; - use sp_core::Bytes; use sp_runtime::{traits::Hash, DispatchError}; use std::{ cell::RefCell, @@ -1517,7 +1516,7 @@ mod tests { } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) } fn exec_trapped() -> ExecResult { @@ -1586,7 +1585,7 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1621,13 +1620,13 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); let delegate_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1662,7 +1661,7 @@ mod tests { let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1715,7 +1714,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) }); ExtBuilder::default().build().execute_with(|| { @@ -1736,7 +1735,7 @@ mod tests { let output = result.unwrap(); assert!(!output.did_revert()); - assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); + assert_eq!(output.data, vec![1, 2, 3, 4]); }); } @@ -1747,7 +1746,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) }); ExtBuilder::default().build().execute_with(|| { @@ -1768,7 +1767,7 @@ mod tests { let output = result.unwrap(); assert!(output.did_revert()); - assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); + assert_eq!(output.data, vec![1, 2, 3, 4]); }); } @@ -2115,7 +2114,7 @@ mod tests { #[test] fn instantiation_work_with_success_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2140,7 +2139,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == Bytes(vec![80, 65, 83, 83]) => address + Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); // Check that the newly created account has the expected code hash and @@ -2159,7 +2158,7 @@ mod tests { #[test] fn instantiation_fails_with_failing_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2184,7 +2183,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == Bytes(vec![70, 65, 73, 76]) => address + Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); // Check that the account has not been created. diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index f9a1c8decf042..3aeb8742705c2 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -123,7 +123,7 @@ use pallet_contracts_primitives::{ StorageDeposit, }; use scale_info::TypeInfo; -use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; @@ -512,7 +512,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(Bytes(code)), + Code::Upload(code), data, salt, None, @@ -743,7 +743,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(Bytes(code)), + Code::Upload(code), data, salt, None, @@ -1234,7 +1234,7 @@ where let try_exec = || { let schedule = T::Schedule::get(); let (extra_deposit, executable) = match code { - Code::Upload(Bytes(binary)) => { + Code::Upload(binary) => { let executable = PrefabWasmModule::from_code(binary, &schedule, origin.clone()) .map_err(|(err, msg)| { debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index e5893c3dbd112..b4a8f8f4c834f 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -44,7 +44,6 @@ use frame_support::{ }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; -use sp_core::Bytes; use sp_io::hashing::blake2_256; use sp_keystore::{testing::KeyStore, KeystoreExt}; use sp_runtime::{ @@ -1722,7 +1721,7 @@ fn chain_extension_works() { let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); - assert_eq!(result.result.unwrap().data, Bytes(input)); + assert_eq!(result.result.unwrap().data, input); // 1 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( @@ -1787,7 +1786,7 @@ fn chain_extension_works() { .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, Bytes(vec![42, 99])); + assert_eq!(result.data, vec![42, 99]); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second @@ -1804,7 +1803,7 @@ fn chain_extension_works() { .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, Bytes(vec![0x4B, 0x1D])); + assert_eq!(result.data, vec![0x4B, 0x1D]); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension @@ -2672,7 +2671,7 @@ fn ecdsa_recover() { .result .unwrap(); assert!(!result.did_revert()); - assert_eq!(result.data.as_ref(), &EXPECTED_COMPRESSED_PUBLIC_KEY); + assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); }) } @@ -3503,7 +3502,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.result.flags, flags); - assert_eq!(result.result.data.0, buffer); + assert_eq!(result.result.data, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. @@ -3539,7 +3538,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.flags, flags); - assert_eq!(result.data.0, buffer); + assert_eq!(result.data, buffer); }); } @@ -3559,7 +3558,7 @@ fn code_rejected_error_works() { 0, GAS_LIMIT, None, - Code::Upload(Bytes(wasm)), + Code::Upload(wasm), vec![], vec![], true, diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index d8b4cd245356e..b341ae3bd155d 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -281,7 +281,7 @@ mod tests { }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; - use sp_core::{Bytes, H256}; + use sp_core::H256; use sp_runtime::DispatchError; use std::{ borrow::BorrowMut, @@ -341,8 +341,8 @@ mod tests { } /// The call is mocked and just returns this hardcoded value. - fn call_return_data() -> Bytes { - Bytes(vec![0xDE, 0xAD, 0xBE, 0xEF]) + fn call_return_data() -> Vec { + vec![0xDE, 0xAD, 0xBE, 0xEF] } impl Default for MockExt { @@ -404,7 +404,7 @@ mod tests { }); Ok(( Contracts::::contract_address(&ALICE, &code_hash, salt), - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, + ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }, )) } fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { @@ -804,7 +804,7 @@ mod tests { let mut mock_ext = MockExt::default(); let input = vec![0xff, 0x2a, 0x99, 0x88]; let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); - assert_eq!(result.data.0, input); + assert_eq!(result.data, input); assert_eq!( &mock_ext.calls, &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: true }] @@ -907,15 +907,15 @@ mod tests { // value does not exist -> sentinel value returned let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); // value did exist -> success let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1,); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1,); // value did exist -> success (zero sized type) let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0,); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0,); } #[test] @@ -977,13 +977,13 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); // value exists let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); @@ -991,7 +991,7 @@ mod tests { let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[2u8; 19].to_vec()).unwrap(), &([] as [u8; 0])); } @@ -1234,7 +1234,7 @@ mod tests { let output = execute(CODE_ECDSA_TO_ETH_ADDRESS, vec![], MockExt::default()).unwrap(); assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x02; 20].to_vec()) } + ExecReturnValue { flags: ReturnFlags::empty(), data: [0x02; 20].to_vec() } ); } @@ -1311,7 +1311,7 @@ mod tests { assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x22; 32].to_vec()) } + ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() } ); } @@ -1630,10 +1630,7 @@ mod tests { fn return_from_start_fn() { let output = execute(CODE_RETURN_FROM_START_FN, vec![], MockExt::default()).unwrap(); - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) } - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); } const CODE_TIMESTAMP_NOW: &str = r#" @@ -1902,15 +1899,13 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes( - ( - array_bytes::hex2array_unchecked::<32>( - "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" - ), - 42u64, - ) - .encode() - ), + data: ( + array_bytes::hex2array_unchecked::<32>( + "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" + ), + 42u64, + ) + .encode() }, ); } @@ -2124,7 +2119,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes(array_bytes::hex2bytes_unchecked("445566778899")), + data: array_bytes::hex2bytes_unchecked("445566778899"), } ); assert!(!output.did_revert()); @@ -2143,7 +2138,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::REVERT, - data: Bytes(array_bytes::hex2bytes_unchecked("5566778899")), + data: array_bytes::hex2bytes_unchecked("5566778899"), } ); assert!(output.did_revert()); @@ -2306,7 +2301,7 @@ mod tests { let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); assert_eq!(*ext.runtime_calls.borrow(), vec![call]); // 0 = ReturnCode::Success - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); } #[test] @@ -2371,19 +2366,19 @@ mod tests { // value did not exist before -> sentinel returned let input = ([1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = ([1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = ([1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } @@ -2442,19 +2437,19 @@ mod tests { // value did not exist before -> sentinel returned let input = (32, [1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = (32, [1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = (32, [1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } @@ -2527,7 +2522,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2535,21 +2530,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); - assert_eq!(&result.data.0[4..], &[42u8]); + assert_eq!(&result.data[4..], &[42u8]); // value exists (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); - assert_eq!(&result.data.0[4..], &([] as [u8; 0])); + assert_eq!(&result.data[4..], &([] as [u8; 0])); } #[test] @@ -2611,14 +2606,14 @@ mod tests { let input = (32, [3u8; 32]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[3u8; 32].to_vec()), None); // value did exist let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); // value cleared assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); @@ -2626,14 +2621,14 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); // value exists let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned (test for 0 sized) - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); // value cleared assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); } @@ -2710,7 +2705,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2718,21 +2713,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); - assert_eq!(&result.data.0[4..], &[42u8]); + assert_eq!(&result.data[4..], &[42u8]); // value did exist -> length returned (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); - assert_eq!(&result.data.0[4..], &[0u8; 0]); + assert_eq!(&result.data[4..], &[0u8; 0]); } #[test] @@ -2769,10 +2764,7 @@ mod tests { let output = execute(CODE_IS_CONTRACT, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 1u32 (`true`). - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(1u32.encode()) }, - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 1u32.encode() },); } #[test] @@ -2906,10 +2898,7 @@ mod tests { let output = execute(CODE_CALLER_IS_ORIGIN, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 0u32 (`false`) - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(0u32.encode()) }, - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 0u32.encode() },); } #[test] diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index edd413aa45bf0..3296492994071 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -30,7 +30,7 @@ use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; -use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_core::crypto::UncheckedFrom; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, Zero}; use sp_sandbox::SandboxMemory; @@ -483,10 +483,10 @@ where TrapReason::Return(ReturnData { flags, data }) => { let flags = ReturnFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?; - Ok(ExecReturnValue { flags, data: Bytes(data) }) + Ok(ExecReturnValue { flags, data }) }, TrapReason::Termination => - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), TrapReason::SupervisorError(error) => return Err(error.into()), } } @@ -494,7 +494,7 @@ where // Check the exact type of the error. match sandbox_result { // No traps were generated. Proceed normally. - Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). // This shouldn't happen because validation process ought to reject such binaries. @@ -879,7 +879,7 @@ where if let Ok(return_value) = call_outcome { return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), - data: return_value.data.0, + data: return_value.data, })) } } From 9472af8e2af41b47a471c084035bf8aecf61d8da Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 3 Oct 2022 16:00:57 +0300 Subject: [PATCH 31/42] Beefy on-demand justifications as a custom RequestResponse protocol (#12124) * client/beefy: create communication module and move gossip there * client/beefy: move beefy_protocol_name module to communication * client/beefy: move notification module under communication * client/beefy: add incoming request_response protocol handler * client/beefy: keep track of connected peers and their progress * client/beefy: add logic for generating Justif requests * client/beefy: cancel outdated on-demand justification requests * try Andre's suggestion for JustificationEngine * justif engine add justifs validation * client/beefy: impl OnDemandJustificationsEngine async next() * move beefy proto name test * client/beefy: initialize OnDemandJustificationsEngine * client/tests: allow for custom req-resp protocols * client/beefy: on-demand-justif: implement simple peer selection strategy * client/beefy: fix voter initialization Fix corner case where voter gets a single burst of finality notifications just when it starts. The notification stream was consumed by "wait_for_pallet" logic, then main loop would subscribe to finality notifications, but by that time some notifications might've been lost. Fix this by subscribing the main loop to notifications before waiting for pallet to become available. Share the same stream with the main loop so that notifications for blocks before pallet available are ignored, while _all_ notifications after pallet available are processed. Add regression test for this. Signed-off-by: acatangiu * client/beefy: make sure justif requests are always out for mandatory blocks * client/beefy: add test for on-demand justifications sync * client/beefy: tweak main loop event processing order * client/beefy: run on-demand-justif-handler under same async task as voter * client/beefy: add test for known-peers * client/beefy: reorg request-response module * client/beefy: add issue references for future work todos * client/beefy: consolidate on-demand-justifications engine state machine Signed-off-by: acatangiu * client/beefy: fix for polkadot companion * client/beefy: implement review suggestions * cargo fmt and clippy * fix merge damage * fix rust-doc * fix merge damage * fix merge damage * client/beefy: add test for justif proto name Signed-off-by: acatangiu --- client/beefy/rpc/src/lib.rs | 6 +- .../beefy/src/{ => communication}/gossip.rs | 20 +- client/beefy/src/communication/mod.rs | 118 +++++++ .../src/{ => communication}/notification.rs | 0 client/beefy/src/communication/peers.rs | 131 ++++++++ .../incoming_requests_handler.rs | 193 ++++++++++++ .../src/communication/request_response/mod.rs | 101 ++++++ .../outgoing_requests_engine.rs | 245 +++++++++++++++ client/beefy/src/import.rs | 2 +- client/beefy/src/lib.rs | 131 ++++---- client/beefy/src/round.rs | 20 +- client/beefy/src/tests.rs | 287 ++++++++++++------ client/beefy/src/worker.rs | 166 +++++++--- client/network/test/src/lib.rs | 7 +- 14 files changed, 1208 insertions(+), 219 deletions(-) rename client/beefy/src/{ => communication}/gossip.rs (94%) create mode 100644 client/beefy/src/communication/mod.rs rename client/beefy/src/{ => communication}/notification.rs (100%) create mode 100644 client/beefy/src/communication/peers.rs create mode 100644 client/beefy/src/communication/request_response/incoming_requests_handler.rs create mode 100644 client/beefy/src/communication/request_response/mod.rs create mode 100644 client/beefy/src/communication/request_response/outgoing_requests_engine.rs diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 3be182ceb8f39..0af474116e6d0 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -35,7 +35,9 @@ use jsonrpsee::{ }; use log::warn; -use beefy_gadget::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}; +use beefy_gadget::communication::notification::{ + BeefyBestBlockStream, BeefyVersionedFinalityProofStream, +}; mod notification; @@ -165,8 +167,8 @@ mod tests { use super::*; use beefy_gadget::{ + communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, - notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofSender}, }; use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; use codec::{Decode, Encode}; diff --git a/client/beefy/src/gossip.rs b/client/beefy/src/communication/gossip.rs similarity index 94% rename from client/beefy/src/gossip.rs rename to client/beefy/src/communication/gossip.rs index 02d5efe9e0e58..6c41a2e48932a 100644 --- a/client/beefy/src/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::BTreeMap, time::Duration}; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; use sc_network::PeerId; use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; @@ -28,13 +28,12 @@ use log::{debug, trace}; use parking_lot::{Mutex, RwLock}; use wasm_timer::Instant; +use crate::{communication::peers::KnownPeers, keystore::BeefyKeystore}; use beefy_primitives::{ crypto::{Public, Signature}, VoteMessage, }; -use crate::keystore::BeefyKeystore; - // Timeout for rebroadcasting messages. const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); @@ -103,17 +102,19 @@ where topic: B::Hash, known_votes: RwLock>, next_rebroadcast: Mutex, + known_peers: Arc>>, } impl GossipValidator where B: Block, { - pub fn new() -> GossipValidator { + pub fn new(known_peers: Arc>>) -> GossipValidator { GossipValidator { topic: topic::(), known_votes: RwLock::new(KnownVotes::new()), next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), + known_peers, } } @@ -165,6 +166,7 @@ where if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { self.known_votes.write().add_known(&round, msg_hash); + self.known_peers.lock().note_vote_for(*sender, round); return ValidationResult::ProcessAndKeep(self.topic) } else { // TODO: report peer @@ -271,7 +273,7 @@ mod tests { #[test] fn note_and_drop_round_works() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.note_round(1u64); @@ -298,7 +300,7 @@ mod tests { #[test] fn note_same_round_twice() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.note_round(3u64); gv.note_round(7u64); @@ -355,7 +357,7 @@ mod tests { #[test] fn should_avoid_verifying_signatures_twice() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let mut context = TestContext; @@ -391,7 +393,7 @@ mod tests { #[test] fn messages_allowed_and_expired() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let topic = Default::default(); let intent = MessageIntent::Broadcast; @@ -434,7 +436,7 @@ mod tests { #[test] fn messages_rebroadcast() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let topic = Default::default(); diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs new file mode 100644 index 0000000000000..93646677c0ecd --- /dev/null +++ b/client/beefy/src/communication/mod.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Communication streams for the BEEFY networking protocols. + +pub mod notification; +pub mod request_response; + +pub(crate) mod gossip; +pub(crate) mod peers; + +pub(crate) mod beefy_protocol_name { + use array_bytes::bytes2hex; + use sc_network::ProtocolName; + + /// BEEFY votes gossip protocol name suffix. + const GOSSIP_NAME: &str = "/beefy/1"; + /// BEEFY justifications protocol name suffix. + const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; + + /// Old names for the gossip protocol, used for backward compatibility. + pub(super) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; + + /// Name of the votes gossip protocol used by BEEFY. + /// + /// Must be registered towards the networking in order for BEEFY voter to properly function. + pub fn gossip_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + if let Some(fork_id) = fork_id { + format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, GOSSIP_NAME).into() + } else { + format!("/{}{}", bytes2hex("", genesis_hash), GOSSIP_NAME).into() + } + } + + /// Name of the BEEFY justifications request-response protocol. + pub fn justifications_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + if let Some(fork_id) = fork_id { + format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, JUSTIFICATIONS_NAME).into() + } else { + format!("/{}{}", bytes2hex("", genesis_hash), JUSTIFICATIONS_NAME).into() + } + } +} + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +/// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. +pub fn beefy_peers_set_config( + gossip_protocol_name: sc_network::ProtocolName, +) -> sc_network_common::config::NonDefaultSetConfig { + let mut cfg = + sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); + + cfg.allow_non_reserved(25, 25); + cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); + cfg +} + +#[cfg(test)] +mod tests { + use super::*; + + use sp_core::H256; + + #[test] + fn beefy_protocols_names() { + use beefy_protocol_name::{gossip_protocol_name, justifications_protocol_name}; + // Create protocol name using random genesis hash. + let genesis_hash = H256::random(); + let genesis_hex = array_bytes::bytes2hex("", genesis_hash.as_ref()); + + let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); + assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); + + let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); + let justif_proto_name = justifications_protocol_name(&genesis_hash, None); + assert_eq!(justif_proto_name.to_string(), expected_justif_name); + + // Create protocol name using hardcoded genesis hash. Verify exact representation. + let genesis_hash = [ + 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, + 94, 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, + ]; + let genesis_hex = "32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93"; + + let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); + assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); + + let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); + let justif_proto_name = justifications_protocol_name(&genesis_hash, None); + assert_eq!(justif_proto_name.to_string(), expected_justif_name); + } +} diff --git a/client/beefy/src/notification.rs b/client/beefy/src/communication/notification.rs similarity index 100% rename from client/beefy/src/notification.rs rename to client/beefy/src/communication/notification.rs diff --git a/client/beefy/src/communication/peers.rs b/client/beefy/src/communication/peers.rs new file mode 100644 index 0000000000000..0e20a0f4e0ff6 --- /dev/null +++ b/client/beefy/src/communication/peers.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Logic for keeping track of BEEFY peers. + +// TODO (issue #12296): replace this naive peer tracking with generic one that infers data +// from multiple network protocols. + +use sc_network::PeerId; +use sp_runtime::traits::{Block, NumberFor, Zero}; +use std::collections::{HashMap, VecDeque}; + +struct PeerData { + last_voted_on: NumberFor, +} + +impl Default for PeerData { + fn default() -> Self { + PeerData { last_voted_on: Zero::zero() } + } +} + +/// Keep a simple map of connected peers +/// and the most recent voting round they participated in. +pub struct KnownPeers { + live: HashMap>, +} + +impl KnownPeers { + pub fn new() -> Self { + Self { live: HashMap::new() } + } + + /// Add new connected `peer`. + pub fn add_new(&mut self, peer: PeerId) { + self.live.entry(peer).or_default(); + } + + /// Note vote round number for `peer`. + pub fn note_vote_for(&mut self, peer: PeerId, round: NumberFor) { + let data = self.live.entry(peer).or_default(); + data.last_voted_on = round.max(data.last_voted_on); + } + + /// Remove connected `peer`. + pub fn remove(&mut self, peer: &PeerId) { + self.live.remove(peer); + } + + /// Return _filtered and cloned_ list of peers that have voted on `block` or higher. + pub fn at_least_at_block(&self, block: NumberFor) -> VecDeque { + self.live + .iter() + .filter_map(|(k, v)| (v.last_voted_on >= block).then_some(k)) + .cloned() + .collect() + } + + /// Answer whether `peer` is part of `KnownPeers` set. + pub fn contains(&self, peer: &PeerId) -> bool { + self.live.contains_key(peer) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_track_known_peers_progress() { + let (alice, bob, charlie) = (PeerId::random(), PeerId::random(), PeerId::random()); + let mut peers = KnownPeers::::new(); + assert!(peers.live.is_empty()); + + // Alice and Bob new connected peers. + peers.add_new(alice); + peers.add_new(bob); + // 'Tracked' Bob seen voting for 5. + peers.note_vote_for(bob, 5); + // Previously unseen Charlie now seen voting for 10. + peers.note_vote_for(charlie, 10); + + assert_eq!(peers.live.len(), 3); + assert!(peers.contains(&alice)); + assert!(peers.contains(&bob)); + assert!(peers.contains(&charlie)); + + // Get peers at block >= 5 + let at_5 = peers.at_least_at_block(5); + // Should be Bob and Charlie + assert_eq!(at_5.len(), 2); + assert!(at_5.contains(&bob)); + assert!(at_5.contains(&charlie)); + + // 'Tracked' Alice seen voting for 10. + peers.note_vote_for(alice, 10); + + // Get peers at block >= 9 + let at_9 = peers.at_least_at_block(9); + // Should be Charlie and Alice + assert_eq!(at_9.len(), 2); + assert!(at_9.contains(&charlie)); + assert!(at_9.contains(&alice)); + + // Remove Alice + peers.remove(&alice); + assert_eq!(peers.live.len(), 2); + assert!(!peers.contains(&alice)); + + // Get peers at block >= 9 + let at_9 = peers.at_least_at_block(9); + // Now should be just Charlie + assert_eq!(at_9.len(), 1); + assert!(at_9.contains(&charlie)); + } +} diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs new file mode 100644 index 0000000000000..c0910a60fba3b --- /dev/null +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -0,0 +1,193 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) BEEFY justifications requests from a remote peer. + +use beefy_primitives::BEEFY_ENGINE_ID; +use codec::Decode; +use futures::{ + channel::{mpsc, oneshot}, + StreamExt, +}; +use log::{debug, trace}; +use sc_client_api::BlockBackend; +use sc_network::{config as netconfig, config::RequestResponseConfig, PeerId, ReputationChange}; +use sc_network_common::protocol::ProtocolName; +use sp_runtime::{generic::BlockId, traits::Block}; +use std::{marker::PhantomData, sync::Arc}; + +use crate::communication::request_response::{ + on_demand_justifications_protocol_config, Error, JustificationRequest, +}; + +/// A request coming in, including a sender for sending responses. +#[derive(Debug)] +pub(crate) struct IncomingRequest { + /// `PeerId` of sending peer. + pub peer: PeerId, + /// The sent request. + pub payload: JustificationRequest, + /// Sender for sending response back. + pub pending_response: oneshot::Sender, +} + +impl IncomingRequest { + /// Create new `IncomingRequest`. + pub fn new( + peer: PeerId, + payload: JustificationRequest, + pending_response: oneshot::Sender, + ) -> Self { + Self { peer, payload, pending_response } + } + + /// Try building from raw network request. + /// + /// This function will fail if the request cannot be decoded and will apply passed in + /// reputation changes in that case. + /// + /// Params: + /// - The raw request to decode + /// - Reputation changes to apply for the peer in case decoding fails. + pub fn try_from_raw( + raw: netconfig::IncomingRequest, + reputation_changes: Vec, + ) -> Result { + let netconfig::IncomingRequest { payload, peer, pending_response } = raw; + let payload = match JustificationRequest::decode(&mut payload.as_ref()) { + Ok(payload) => payload, + Err(err) => { + let response = netconfig::OutgoingResponse { + result: Err(()), + reputation_changes, + sent_feedback: None, + }; + if let Err(_) = pending_response.send(response) { + return Err(Error::DecodingErrorNoReputationChange(peer, err)) + } + return Err(Error::DecodingError(peer, err)) + }, + }; + Ok(Self::new(peer, payload, pending_response)) + } +} + +/// Receiver for incoming BEEFY justifications requests. +/// +/// Takes care of decoding and handling of invalid encoded requests. +pub(crate) struct IncomingRequestReceiver { + raw: mpsc::Receiver, +} + +impl IncomingRequestReceiver { + pub fn new(inner: mpsc::Receiver) -> Self { + Self { raw: inner } + } + + /// Try to receive the next incoming request. + /// + /// Any received request will be decoded, on decoding errors the provided reputation changes + /// will be applied and an error will be reported. + pub async fn recv(&mut self, reputation_changes: F) -> Result, Error> + where + B: Block, + F: FnOnce() -> Vec, + { + let req = match self.raw.next().await { + None => return Err(Error::RequestChannelExhausted), + Some(raw) => IncomingRequest::::try_from_raw(raw, reputation_changes())?, + }; + Ok(req) + } +} + +/// Handler for incoming BEEFY justifications requests from a remote peer. +pub struct BeefyJustifsRequestHandler { + pub(crate) request_receiver: IncomingRequestReceiver, + pub(crate) justif_protocol_name: ProtocolName, + pub(crate) client: Arc, + pub(crate) _block: PhantomData, +} + +impl BeefyJustifsRequestHandler +where + B: Block, + Client: BlockBackend + Send + Sync, +{ + /// Create a new [`BeefyJustifsRequestHandler`]. + pub fn new>( + genesis_hash: Hash, + fork_id: Option<&str>, + client: Arc, + ) -> (Self, RequestResponseConfig) { + let (request_receiver, config) = + on_demand_justifications_protocol_config(genesis_hash, fork_id); + let justif_protocol_name = config.name.clone(); + + (Self { request_receiver, justif_protocol_name, client, _block: PhantomData }, config) + } + + /// Network request-response protocol name used by this handler. + pub fn protocol_name(&self) -> ProtocolName { + self.justif_protocol_name.clone() + } + + // Sends back justification response if justification found in client backend. + fn handle_request(&self, request: IncomingRequest) -> Result<(), Error> { + // TODO (issue #12293): validate `request` and change peer reputation for invalid requests. + + let maybe_encoded_proof = self + .client + .justifications(&BlockId::Number(request.payload.begin)) + .map_err(Error::Client)? + .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) + // No BEEFY justification present. + .ok_or(()); + + request + .pending_response + .send(netconfig::OutgoingResponse { + result: maybe_encoded_proof, + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .map_err(|_| Error::SendResponse) + } + + /// Run [`BeefyJustifsRequestHandler`]. + pub async fn run(mut self) { + trace!(target: "beefy::sync", "🥩 Running BeefyJustifsRequestHandler"); + + while let Ok(request) = self.request_receiver.recv(|| vec![]).await { + let peer = request.peer; + match self.handle_request(request) { + Ok(()) => { + debug!( + target: "beefy::sync", + "🥩 Handled BEEFY justification request from {:?}.", peer + ) + }, + Err(e) => { + // TODO (issue #12293): apply reputation changes here based on error type. + debug!( + target: "beefy::sync", + "🥩 Failed to handle BEEFY justification request from {:?}: {}", peer, e, + ) + }, + } + } + } +} diff --git a/client/beefy/src/communication/request_response/mod.rs b/client/beefy/src/communication/request_response/mod.rs new file mode 100644 index 0000000000000..c83bb9d57e91b --- /dev/null +++ b/client/beefy/src/communication/request_response/mod.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Request/response protocol for syncing BEEFY justifications. + +mod incoming_requests_handler; +pub(crate) mod outgoing_requests_engine; + +pub use incoming_requests_handler::BeefyJustifsRequestHandler; + +use futures::channel::mpsc; +use std::time::Duration; + +use codec::{Decode, Encode, Error as CodecError}; +use sc_network::{config::RequestResponseConfig, PeerId}; +use sp_runtime::traits::{Block, NumberFor}; + +use crate::communication::beefy_protocol_name::justifications_protocol_name; +use incoming_requests_handler::IncomingRequestReceiver; + +// 10 seems reasonable, considering justifs are explicitly requested only +// for mandatory blocks, by nodes that are syncing/catching-up. +const JUSTIF_CHANNEL_SIZE: usize = 10; + +const MAX_RESPONSE_SIZE: u64 = 1024 * 1024; +const JUSTIF_REQUEST_TIMEOUT: Duration = Duration::from_secs(3); + +/// Get the configuration for the BEEFY justifications Request/response protocol. +/// +/// Returns a receiver for messages received on this protocol and the requested +/// `ProtocolConfig`. +/// +/// Consider using [`BeefyJustifsRequestHandler`] instead of this low-level function. +pub(crate) fn on_demand_justifications_protocol_config>( + genesis_hash: Hash, + fork_id: Option<&str>, +) -> (IncomingRequestReceiver, RequestResponseConfig) { + let name = justifications_protocol_name(genesis_hash, fork_id); + let fallback_names = vec![]; + let (tx, rx) = mpsc::channel(JUSTIF_CHANNEL_SIZE); + let rx = IncomingRequestReceiver::new(rx); + let cfg = RequestResponseConfig { + name, + fallback_names, + max_request_size: 32, + max_response_size: MAX_RESPONSE_SIZE, + // We are connected to all validators: + request_timeout: JUSTIF_REQUEST_TIMEOUT, + inbound_queue: Some(tx), + }; + (rx, cfg) +} + +/// BEEFY justification request. +#[derive(Debug, Clone, Encode, Decode)] +pub struct JustificationRequest { + /// Start collecting proofs from this block. + pub begin: NumberFor, +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + RuntimeApi(#[from] sp_api::ApiError), + + /// Decoding failed, we were able to change the peer's reputation accordingly. + #[error("Decoding request failed for peer {0}.")] + DecodingError(PeerId, #[source] CodecError), + + /// Decoding failed, but sending reputation change failed. + #[error("Decoding request failed for peer {0}, and changing reputation failed.")] + DecodingErrorNoReputationChange(PeerId, #[source] CodecError), + + /// Incoming request stream exhausted. Should only happen on shutdown. + #[error("Incoming request channel got closed.")] + RequestChannelExhausted, + + #[error("Failed to send response.")] + SendResponse, + + #[error("Received invalid response.")] + InvalidResponse, +} diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs new file mode 100644 index 0000000000000..e22958e19cd2e --- /dev/null +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Generating request logic for request/response protocol for syncing BEEFY justifications. + +use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; +use codec::Encode; +use futures::{ + channel::{oneshot, oneshot::Canceled}, + stream::{self, StreamExt}, +}; +use log::{debug, error, warn}; +use parking_lot::Mutex; +use sc_network::{PeerId, ProtocolName}; +use sc_network_common::{ + request_responses::{IfDisconnected, RequestFailure}, + service::NetworkRequest, +}; +use sp_api::ProvideRuntimeApi; +use sp_runtime::{ + generic::BlockId, + traits::{Block, NumberFor}, +}; +use std::{collections::VecDeque, result::Result, sync::Arc}; + +use crate::{ + communication::request_response::{Error, JustificationRequest}, + justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + KnownPeers, +}; + +/// Response type received from network. +type Response = Result, RequestFailure>; +/// Used to receive a response from the network. +type ResponseReceiver = oneshot::Receiver; + +enum State { + Idle(stream::Pending>), + AwaitingResponse(PeerId, NumberFor, stream::Once), +} + +pub struct OnDemandJustificationsEngine { + network: Arc, + runtime: Arc, + protocol_name: ProtocolName, + + live_peers: Arc>>, + peers_cache: VecDeque, + + state: State, +} + +impl OnDemandJustificationsEngine +where + B: Block, + R: ProvideRuntimeApi, + R::Api: BeefyApi, +{ + pub fn new( + network: Arc, + runtime: Arc, + protocol_name: ProtocolName, + live_peers: Arc>>, + ) -> Self { + Self { + network, + runtime, + protocol_name, + live_peers, + peers_cache: VecDeque::new(), + state: State::Idle(stream::pending()), + } + } + + fn reset_peers_cache_for_block(&mut self, block: NumberFor) { + // TODO (issue #12296): replace peer selection with generic one that involves all protocols. + self.peers_cache = self.live_peers.lock().at_least_at_block(block); + } + + fn try_next_peer(&mut self) -> Option { + // TODO (issue #12296): replace peer selection with generic one that involves all protocols. + let live = self.live_peers.lock(); + while let Some(peer) = self.peers_cache.pop_front() { + if live.contains(&peer) { + return Some(peer) + } + } + None + } + + fn request_from_peer(&mut self, peer: PeerId, block: NumberFor) { + debug!(target: "beefy::sync", "🥩 requesting justif #{:?} from peer {:?}", block, peer); + + let payload = JustificationRequest:: { begin: block }.encode(); + + let (tx, rx) = oneshot::channel(); + + self.network.start_request( + peer, + self.protocol_name.clone(), + payload, + tx, + IfDisconnected::ImmediateError, + ); + + self.state = State::AwaitingResponse(peer, block, stream::once(rx)); + } + + /// If no other request is in progress, start new justification request for `block`. + pub fn request(&mut self, block: NumberFor) { + // ignore new requests while there's already one pending + match &self.state { + State::AwaitingResponse(_, _, _) => return, + State::Idle(_) => (), + } + self.reset_peers_cache_for_block(block); + + // Start the requests engine - each unsuccessful received response will automatically + // trigger a new request to the next peer in the `peers_cache` until there are none left. + if let Some(peer) = self.try_next_peer() { + self.request_from_peer(peer, block); + } else { + debug!(target: "beefy::sync", "🥩 no good peers to request justif #{:?} from", block); + } + } + + /// Cancel any pending request for block numbers smaller or equal to `block`. + pub fn cancel_requests_older_than(&mut self, block: NumberFor) { + match &self.state { + State::AwaitingResponse(_, number, _) if *number <= block => { + debug!( + target: "beefy::sync", + "🥩 cancel pending request for justification #{:?}", + number + ); + self.state = State::Idle(stream::pending()); + }, + _ => (), + } + } + + fn process_response( + &mut self, + peer: PeerId, + block: NumberFor, + validator_set: &ValidatorSet, + response: Result, + ) -> Result, Error> { + response + .map_err(|e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} hung up: {:?}", + block, peer, e + ); + Error::InvalidResponse + })? + .map_err(|e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} error: {:?}", + block, peer, e + ); + Error::InvalidResponse + }) + .and_then(|encoded| { + decode_and_verify_finality_proof::(&encoded[..], block, &validator_set).map_err( + |e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} responded with invalid proof: {:?}", + block, peer, e + ); + Error::InvalidResponse + }, + ) + }) + } + + pub async fn next(&mut self) -> Option> { + let (peer, block, resp) = match &mut self.state { + State::Idle(pending) => { + let _ = pending.next().await; + // This never happens since 'stream::pending' never generates any items. + return None + }, + State::AwaitingResponse(peer, block, receiver) => { + let resp = receiver.next().await?; + (*peer, *block, resp) + }, + }; + // We received the awaited response. Our 'stream::once()' receiver will never generate any + // other response, meaning we're done with current state. Move the engine to `State::Idle`. + self.state = State::Idle(stream::pending()); + + let block_id = BlockId::number(block); + let validator_set = self + .runtime + .runtime_api() + .validator_set(&block_id) + .map_err(|e| { + error!(target: "beefy::sync", "🥩 Runtime API error {:?} in on-demand justif engine.", e); + e + }) + .ok()? + .or_else(|| { + error!(target: "beefy::sync", "🥩 BEEFY pallet not available for block {:?}.", block); + None + })?; + + self.process_response(peer, block, &validator_set, resp) + .map_err(|_| { + // No valid justification received, try next peer in our set. + if let Some(peer) = self.try_next_peer() { + self.request_from_peer(peer, block); + } else { + warn!(target: "beefy::sync", "🥩 ran out of peers to request justif #{:?} from", block); + } + }) + .map(|proof| { + debug!( + target: "beefy::sync", + "🥩 received valid on-demand justif #{:?} from {:?}", + block, peer + ); + proof + }) + .ok() + } +} diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index db4d8bfba7450..89a4517334189 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -33,8 +33,8 @@ use sc_client_api::backend::Backend; use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; use crate::{ + communication::notification::BeefyVersionedFinalityProofSender, justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, - notification::BeefyVersionedFinalityProofSender, }; /// A block-import handler for BEEFY. diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index ad527b2929585..7407f101e99a5 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -17,10 +17,12 @@ // along with this program. If not, see . use beefy_primitives::{BeefyApi, MmrRootHash}; +use parking_lot::Mutex; use prometheus::Registry; -use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_client_api::{Backend, BlockBackend, BlockchainEvents, Finalizer}; use sc_consensus::BlockImport; use sc_network::ProtocolName; +use sc_network_common::service::NetworkRequest; use sc_network_gossip::Network as GossipNetwork; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -28,68 +30,38 @@ use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::Block; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; mod error; -mod gossip; mod keystore; mod metrics; mod round; mod worker; +pub mod communication; pub mod import; pub mod justification; -pub mod notification; #[cfg(test)] mod tests; use crate::{ - import::BeefyBlockImport, - notification::{ - BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, - BeefyVersionedFinalityProofStream, + communication::{ + notification::{ + BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, + BeefyVersionedFinalityProofStream, + }, + peers::KnownPeers, + request_response::{ + outgoing_requests_engine::OnDemandJustificationsEngine, BeefyJustifsRequestHandler, + }, }, + import::BeefyBlockImport, }; -pub use beefy_protocol_name::standard_name as protocol_standard_name; - -pub(crate) mod beefy_protocol_name { - use sc_chain_spec::ChainSpec; - use sc_network::ProtocolName; - - const NAME: &str = "/beefy/1"; - /// Old names for the notifications protocol, used for backward compatibility. - pub(crate) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; - - /// Name of the notifications protocol used by BEEFY. - /// - /// Must be registered towards the networking in order for BEEFY to properly function. - pub fn standard_name>( - genesis_hash: &Hash, - chain_spec: &Box, - ) -> ProtocolName { - let genesis_hash = genesis_hash.as_ref(); - let chain_prefix = match chain_spec.fork_id() { - Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), - None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), - }; - format!("{}{}", chain_prefix, NAME).into() - } -} - -/// Returns the configuration value to put in -/// [`sc_network::config::NetworkConfiguration::extra_sets`]. -/// For standard protocol name see [`beefy_protocol_name::standard_name`]. -pub fn beefy_peers_set_config( - protocol_name: ProtocolName, -) -> sc_network_common::config::NonDefaultSetConfig { - let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); - - cfg.allow_non_reserved(25, 25); - cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); - cfg -} +pub use communication::beefy_protocol_name::{ + gossip_protocol_name, justifications_protocol_name as justifs_protocol_name, +}; /// A convenience BEEFY client trait that defines all the type bounds a BEEFY client /// has to satisfy. Ideally that should actually be a trait alias. Unfortunately as @@ -159,13 +131,13 @@ where { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - notification::BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = - notification::BeefyBestBlockStream::::channel(); + BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - notification::BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); // BlockImport let import = @@ -180,6 +152,24 @@ where (import, voter_links, rpc_links) } +/// BEEFY gadget network parameters. +pub struct BeefyNetworkParams +where + B: Block, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, +{ + /// Network implementing gossip, requests and sync-oracle. + pub network: Arc, + /// Chain specific BEEFY gossip protocol name. See + /// [`communication::beefy_protocol_name::gossip_protocol_name`]. + pub gossip_protocol_name: ProtocolName, + /// Chain specific BEEFY on-demand justifications protocol name. See + /// [`communication::beefy_protocol_name::justifications_protocol_name`]. + pub justifications_protocol_name: ProtocolName, + + pub _phantom: PhantomData, +} + /// BEEFY gadget initialization parameters. pub struct BeefyParams where @@ -188,7 +178,7 @@ where C: Client, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { /// BEEFY client pub client: Arc, @@ -198,16 +188,16 @@ where pub runtime: Arc, /// Local key store pub key_store: Option, - /// Gossip network - pub network: N, + /// BEEFY voter network params + pub network_params: BeefyNetworkParams, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry pub prometheus_registry: Option, - /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. - pub protocol_name: ProtocolName, /// Links between the block importer, the background voter and the RPC layer. pub links: BeefyVoterLinks, + /// Handler for incoming BEEFY justifications requests from a remote peer. + pub on_demand_justifications_handler: BeefyJustifsRequestHandler, } /// Start the BEEFY gadget. @@ -217,32 +207,43 @@ pub async fn start_beefy_gadget(beefy_params: BeefyParams, - C: Client, + C: Client + BlockBackend, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { let BeefyParams { client, backend, runtime, key_store, - network, + network_params, min_block_delta, prometheus_registry, - protocol_name, links, + on_demand_justifications_handler, } = beefy_params; - let sync_oracle = network.clone(); - let gossip_validator = Arc::new(gossip::GossipValidator::new()); + let BeefyNetworkParams { network, gossip_protocol_name, justifications_protocol_name, .. } = + network_params; + + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let gossip_validator = + Arc::new(communication::gossip::GossipValidator::new(known_peers.clone())); let gossip_engine = sc_network_gossip::GossipEngine::new( - network, - protocol_name, + network.clone(), + gossip_protocol_name, gossip_validator.clone(), None, ); + let on_demand_justifications = OnDemandJustificationsEngine::new( + network.clone(), + runtime.clone(), + justifications_protocol_name, + known_peers.clone(), + ); + let metrics = prometheus_registry.as_ref().map(metrics::Metrics::register).and_then( |result| match result { @@ -261,10 +262,12 @@ where client, backend, runtime, - sync_oracle, + network, key_store: key_store.into(), + known_peers, gossip_engine, gossip_validator, + on_demand_justifications, links, metrics, min_block_delta, @@ -272,5 +275,5 @@ where let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); - worker.run().await + futures::future::join(worker.run(), on_demand_justifications_handler.run()).await; } diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index c96613eb38a95..45d346ccd85eb 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -33,7 +33,7 @@ use sp_runtime::traits::{Block, NumberFor}; /// whether the local `self` validator has voted/signed. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Default)] +#[derive(Debug, Default)] struct RoundTracker { self_vote: bool, votes: HashMap, @@ -69,6 +69,7 @@ pub fn threshold(authorities: usize) -> usize { /// Only round numbers > `best_done` are of interest, all others are considered stale. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). +#[derive(Debug)] pub(crate) struct Rounds { rounds: BTreeMap<(Payload, NumberFor), RoundTracker>, session_start: NumberFor, @@ -135,7 +136,7 @@ where } } - pub(crate) fn try_conclude( + pub(crate) fn should_conclude( &mut self, round: &(P, NumberFor), ) -> Option>> { @@ -148,7 +149,6 @@ where if done { let signatures = self.rounds.remove(round)?.votes; - self.conclude(round.1); Some( self.validators() .iter() @@ -279,7 +279,7 @@ mod tests { true )); // round not concluded - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // self vote already present, should not self vote assert!(!rounds.should_self_vote(&round)); @@ -296,7 +296,7 @@ mod tests { (Keyring::Dave.public(), Keyring::Dave.sign(b"I am committed")), false )); - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // add 2nd good vote assert!(rounds.add_vote( @@ -305,7 +305,7 @@ mod tests { false )); // round not concluded - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // add 3rd good vote assert!(rounds.add_vote( @@ -314,7 +314,8 @@ mod tests { false )); // round concluded - assert!(rounds.try_conclude(&round).is_some()); + assert!(rounds.should_conclude(&round).is_some()); + rounds.conclude(round.1); // Eve is a validator, but round was concluded, adding vote disallowed assert!(!rounds.add_vote( @@ -432,11 +433,12 @@ mod tests { assert_eq!(3, rounds.rounds.len()); // conclude unknown round - assert!(rounds.try_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); + assert!(rounds.should_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); assert_eq!(3, rounds.rounds.len()); // conclude round 2 - let signatures = rounds.try_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); + let signatures = rounds.should_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); + rounds.conclude(2); assert_eq!(1, rounds.rounds.len()); assert_eq!( diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 3e49f4e05cc91..8057bd7cab7a5 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -21,10 +21,9 @@ use futures::{future, stream::FuturesUnordered, Future, StreamExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, sync::Arc, task::Poll}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, task::Poll}; use tokio::{runtime::Runtime, time::Duration}; -use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, @@ -33,7 +32,7 @@ use sc_consensus::{ use sc_keystore::LocalKeystore; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, - TestNetFactory, + PeersFullClient, TestNetFactory, }; use sc_utils::notification::NotificationReceiver; @@ -42,6 +41,7 @@ use beefy_primitives::{ BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; +use sc_network::{config::RequestResponseConfig, ProtocolName}; use sp_mmr_primitives::{ BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, }; @@ -60,11 +60,21 @@ use sp_runtime::{ use substrate_test_runtime_client::{runtime::Header, ClientExt}; use crate::{ - beefy_block_import_and_links, beefy_protocol_name, justification::*, - keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, + beefy_block_import_and_links, + communication::request_response::{ + on_demand_justifications_protocol_config, BeefyJustifsRequestHandler, + }, + gossip_protocol_name, + justification::*, + keystore::tests::Keyring as BeefyKeyring, + BeefyRPCLinks, BeefyVoterLinks, }; -pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; +const GENESIS_HASH: H256 = H256::zero(); +fn beefy_gossip_proto_name() -> ProtocolName { + gossip_protocol_name(GENESIS_HASH, None) +} + const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); @@ -89,35 +99,12 @@ impl BuildStorage for Genesis { } } -#[test] -fn beefy_protocol_name() { - let chain_spec = GenericChainSpec::::from_json_bytes( - &include_bytes!("../../chain-spec/res/chain_spec.json")[..], - ) - .unwrap() - .cloned_box(); - - // Create protocol name using random genesis hash. - let genesis_hash = H256::random(); - let expected = format!("/{}/beefy/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); - let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); - assert_eq!(proto_name.to_string(), expected); - - // Create protocol name using hardcoded genesis hash. Verify exact representation. - let genesis_hash = [ - 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, 94, - 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, - ]; - let expected = - "/32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93/beefy/1".to_string(); - let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); - assert_eq!(proto_name.to_string(), expected); -} - #[derive(Default)] pub(crate) struct PeerData { pub(crate) beefy_rpc_links: Mutex>>, pub(crate) beefy_voter_links: Mutex>>, + pub(crate) beefy_justif_req_handler: + Mutex>>, } #[derive(Default)] @@ -126,23 +113,34 @@ pub(crate) struct BeefyTestNet { } impl BeefyTestNet { - pub(crate) fn new(n_authority: usize, n_full: usize) -> Self { - let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority + n_full) }; - for _ in 0..n_authority { - net.add_authority_peer(); - } - for _ in 0..n_full { - net.add_full_peer(); + pub(crate) fn new(n_authority: usize) -> Self { + let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority) }; + + for i in 0..n_authority { + let (rx, cfg) = on_demand_justifications_protocol_config(GENESIS_HASH, None); + let justif_protocol_name = cfg.name.clone(); + + net.add_authority_peer(vec![cfg]); + + let client = net.peers[i].client().as_client(); + let justif_handler = BeefyJustifsRequestHandler { + request_receiver: rx, + justif_protocol_name, + client, + _block: PhantomData, + }; + *net.peers[i].data.beefy_justif_req_handler.lock() = Some(justif_handler); } net } - pub(crate) fn add_authority_peer(&mut self) { + pub(crate) fn add_authority_peer(&mut self, req_resp_cfgs: Vec) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], + notifications_protocols: vec![beefy_gossip_proto_name()], + request_response_protocols: req_resp_cfgs, is_authority: true, ..Default::default() - }) + }); } pub(crate) fn generate_blocks_and_sync( @@ -198,6 +196,7 @@ impl TestNetFactory for BeefyTestNet { let peer_data = PeerData { beefy_rpc_links: Mutex::new(Some(rpc_links)), beefy_voter_links: Mutex::new(Some(voter_links)), + ..Default::default() }; (BlockImportAdapter::new(block_import), None, peer_data) } @@ -215,11 +214,8 @@ impl TestNetFactory for BeefyTestNet { } fn add_full_peer(&mut self) { - self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], - is_authority: false, - ..Default::default() - }) + // `add_authority_peer()` used instead. + unimplemented!() } } @@ -354,7 +350,7 @@ where API: ProvideRuntimeApi + Default + Sync + Send, API::Api: BeefyApi + MmrApi, { - let voters = FuturesUnordered::new(); + let tasks = FuturesUnordered::new(); for (peer_id, key, api) in peers.into_iter() { let peer = &net.peers[peer_id]; @@ -362,31 +358,40 @@ where let keystore = create_beefy_keystore(*key); let (_, _, peer_data) = net.make_block_import(peer.client().clone()); - let PeerData { beefy_rpc_links, beefy_voter_links } = peer_data; + let PeerData { beefy_rpc_links, beefy_voter_links, .. } = peer_data; let beefy_voter_links = beefy_voter_links.lock().take(); *peer.data.beefy_rpc_links.lock() = beefy_rpc_links.lock().take(); *peer.data.beefy_voter_links.lock() = beefy_voter_links.clone(); + let on_demand_justif_handler = peer.data.beefy_justif_req_handler.lock().take().unwrap(); + + let network_params = crate::BeefyNetworkParams { + network: peer.network_service().clone(), + gossip_protocol_name: beefy_gossip_proto_name(), + justifications_protocol_name: on_demand_justif_handler.protocol_name(), + _phantom: PhantomData, + }; + let beefy_params = crate::BeefyParams { client: peer.client().as_client(), backend: peer.client().as_backend(), runtime: api.clone(), key_store: Some(keystore), - network: peer.network_service().clone(), + network_params, links: beefy_voter_links.unwrap(), min_block_delta, prometheus_registry: None, - protocol_name: BEEFY_PROTOCOL_NAME.into(), + on_demand_justifications_handler: on_demand_justif_handler, }; - let gadget = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} - assert_send(&gadget); - voters.push(gadget); + assert_send(&task); + tasks.push(task); } - voters.for_each(|_| async move {}) + tasks.for_each(|_| async move {}) } fn block_until(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { @@ -404,18 +409,19 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, - peers: &[BeefyKeyring], + // peer index and key + peers: impl Iterator, ) -> (Vec>, Vec>>) { let mut best_block_streams = Vec::new(); let mut versioned_finality_proof_streams = Vec::new(); - for peer_id in 0..peers.len() { - let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); + peers.for_each(|(index, _)| { + let beefy_rpc_links = net.peer(index).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; best_block_streams.push(from_voter_best_beefy_stream.subscribe()); versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); - } + }); (best_block_streams, versioned_finality_proof_streams) } @@ -493,18 +499,24 @@ fn streams_empty_after_timeout( fn finalize_block_and_wait_for_beefy( net: &Arc>, - peers: &[BeefyKeyring], + // peer index and key + peers: impl Iterator + Clone, runtime: &mut Runtime, finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); for block in finalize_targets { let finalize = BlockId::number(*block); - for i in 0..peers.len() { - net.lock().peer(i).client().as_client().finalize_block(finalize, None).unwrap(); - } + peers.clone().for_each(|(index, _)| { + net.lock() + .peer(index) + .client() + .as_client() + .finalize_block(finalize, None) + .unwrap(); + }) } if expected_beefy.is_empty() { @@ -524,12 +536,12 @@ fn beefy_finalizing_blocks() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 10; let min_block_delta = 4; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); @@ -542,17 +554,18 @@ fn beefy_finalizing_blocks() { // Minimum BEEFY block delta is 4. + let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[5], &[1, 5]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[5], &[1, 5]); // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[10]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[10], &[10]); // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[18], &[14, 18]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[18], &[14, 18]); // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[20], &[20]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[20], &[20]); // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[21], &[]); @@ -563,12 +576,12 @@ fn lagging_validators() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 30; let min_block_delta = 1; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); @@ -578,13 +591,20 @@ fn lagging_validators() { let net = Arc::new(Mutex::new(net)); + let peers = peers.into_iter().enumerate(); // finalize block #15 -> BEEFY should finalize #1 (mandatory) and #9, #13, #14, #15 from // diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[15], &[1, 9, 13, 14, 15]); + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &mut runtime, + &[15], + &[1, 9, 13, 14, 15], + ); // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); @@ -592,21 +612,21 @@ fn lagging_validators() { streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[30, 32], &[30, 31, 32]); // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY @@ -617,7 +637,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); - // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 + // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); } @@ -627,13 +647,12 @@ fn correct_beefy_payload() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = - &[BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 20; let min_block_delta = 2; - let mut net = BeefyTestNet::new(4, 0); + let mut net = BeefyTestNet::new(4); // Alice, Bob, Charlie will vote on good payloads let good_api = Arc::new(four_validators::TestApi {}); @@ -649,15 +668,16 @@ fn correct_beefy_payload() { let bad_peers = vec![(3, &BeefyKeyring::Dave, bad_api)]; runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); - // push 10 blocks + // push 12 blocks net.generate_blocks_and_sync(12, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); + let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); // now 2 good validators and 1 bad one are voting net.lock() @@ -686,7 +706,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); net.lock() .peer(2) .client() @@ -707,11 +727,11 @@ fn beefy_importing_blocks() { sp_tracing::try_init_simple(); - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let client = net.peer(0).client().clone(); let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); - let PeerData { beefy_rpc_links: _, beefy_voter_links } = peer_data; + let PeerData { beefy_voter_links, .. } = peer_data; let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; let params = |block: Block, justifications: Option| { @@ -826,18 +846,18 @@ fn voter_initialization() { // after waiting for BEEFY pallet availability. let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 5; // Should vote on all mandatory blocks no matter the `min_block_delta`. let min_block_delta = 10; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); - // push 30 blocks + // push 26 blocks net.generate_blocks_and_sync(26, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); @@ -846,9 +866,90 @@ fn voter_initialization() { // Expect voters to pick up all of them and BEEFY-finalize the mandatory blocks of each session. finalize_block_and_wait_for_beefy( &net, - peers, + peers.into_iter().enumerate(), &mut runtime, &[1, 6, 10, 17, 24, 26], &[1, 5, 10, 15, 20, 25], ); } + +#[test] +fn on_demand_beefy_justification_sync() { + sp_tracing::try_init_simple(); + + let mut runtime = Runtime::new().unwrap(); + let all_peers = + [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(&all_peers), 0).unwrap(); + let session_len = 5; + let min_block_delta = 5; + + let mut net = BeefyTestNet::new(4); + + // Alice, Bob, Charlie start first and make progress through voting. + let api = Arc::new(four_validators::TestApi {}); + let fast_peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; + let voting_peers = + fast_peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); + runtime.spawn(initialize_beefy(&mut net, voting_peers, min_block_delta)); + + // Dave will start late and have to catch up using on-demand justification requests (since + // in this test there is no block import queue to automatically import justifications). + let dave = vec![(3, &BeefyKeyring::Dave, api)]; + // Instantiate but don't run Dave, yet. + let dave_task = initialize_beefy(&mut net, dave, min_block_delta); + let dave_index = 3; + + // push 30 blocks + net.generate_blocks_and_sync(30, session_len, &validator_set, false); + + let fast_peers = fast_peers.into_iter().enumerate(); + let net = Arc::new(Mutex::new(net)); + // With 3 active voters and one inactive, consensus should happen and blocks BEEFY-finalized. + // Need to finalize at least one block in each session, choose randomly. + finalize_block_and_wait_for_beefy( + &net, + fast_peers.clone(), + &mut runtime, + &[1, 6, 10, 17, 24], + &[1, 5, 10, 15, 20], + ); + + // Spawn Dave, he's now way behind voting and can only catch up through on-demand justif sync. + runtime.spawn(dave_task); + // give Dave a chance to spawn and init. + run_for(Duration::from_millis(400), &net, &mut runtime); + + let (dave_best_blocks, _) = + get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); + net.lock() + .peer(dave_index) + .client() + .as_client() + .finalize_block(BlockId::number(1), None) + .unwrap(); + // Give Dave task some cpu cycles to process the finality notification, + run_for(Duration::from_millis(100), &net, &mut runtime); + // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. + + // Have the other peers do some gossip so Dave finds out about their progress. + finalize_block_and_wait_for_beefy(&net, fast_peers, &mut runtime, &[25], &[25]); + + // Now verify Dave successfully finalized #1 (through on-demand justification request). + wait_for_best_beefy_blocks(dave_best_blocks, &net, &mut runtime, &[1]); + + // Give Dave all tasks some cpu cycles to burn through their events queues, + run_for(Duration::from_millis(100), &net, &mut runtime); + // then verify Dave catches up through on-demand justification requests. + finalize_block_and_wait_for_beefy( + &net, + [(dave_index, BeefyKeyring::Dave)].into_iter(), + &mut runtime, + &[6, 10, 17, 24, 26], + &[5, 10, 15, 20, 25], + ); + + let all_peers = all_peers.into_iter().enumerate(); + // Now that Dave has caught up, sanity check voting works for all of them. + finalize_block_and_wait_for_beefy(&net, all_peers, &mut runtime, &[30], &[30]); +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 6e8c89d804984..832b43315515f 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -24,10 +24,15 @@ use std::{ }; use codec::{Codec, Decode, Encode}; -use futures::{stream::Fuse, StreamExt}; +use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; +use parking_lot::Mutex; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; +use sc_network_common::{ + protocol::event::Event as NetEvent, + service::{NetworkEventStream, NetworkRequest}, +}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; @@ -48,14 +53,17 @@ use beefy_primitives::{ }; use crate::{ + communication::{ + gossip::{topic, GossipValidator}, + request_response::outgoing_requests_engine::OnDemandJustificationsEngine, + }, error::Error, - gossip::{topic, GossipValidator}, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, round::Rounds, - BeefyVoterLinks, Client, + BeefyVoterLinks, Client, KnownPeers, }; enum RoundAction { @@ -113,6 +121,17 @@ impl VoterOracle { } } + /// Return current pending mandatory block, if any. + pub fn mandatory_pending(&self) -> Option> { + self.sessions.front().and_then(|round| { + if round.mandatory_done() { + None + } else { + Some(round.session_start()) + } + }) + } + /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. pub fn accepted_interval( &self, @@ -175,29 +194,35 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, pub runtime: Arc, - pub sync_oracle: SO, + pub network: N, pub key_store: BeefyKeystore, + pub known_peers: Arc>>, pub gossip_engine: GossipEngine, pub gossip_validator: Arc>, + pub on_demand_justifications: OnDemandJustificationsEngine, pub links: BeefyVoterLinks, pub metrics: Option, pub min_block_delta: u32, } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities client: Arc, backend: Arc, runtime: Arc, - sync_oracle: SO, + network: N, key_store: BeefyKeystore, + + // communication + known_peers: Arc>>, gossip_engine: GossipEngine, gossip_validator: Arc>, + on_demand_justifications: OnDemandJustificationsEngine, // channels /// Links between the block importer, the background voter and the RPC layer. @@ -218,14 +243,14 @@ pub(crate) struct BeefyWorker { voting_oracle: VoterOracle, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, C: Client, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - SO: SyncOracle + Send + Sync + Clone + 'static, + N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, { /// Return a new BEEFY worker instance. /// @@ -233,15 +258,17 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { client, backend, runtime, key_store, - sync_oracle, + network, gossip_engine, gossip_validator, + on_demand_justifications, + known_peers, links, metrics, min_block_delta, @@ -256,10 +283,12 @@ where client: client.clone(), backend, runtime, - sync_oracle, + network, + known_peers, key_store, gossip_engine, gossip_validator, + on_demand_justifications, links, metrics, best_grandpa_block_header: last_finalized_header, @@ -366,8 +395,6 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); - // TODO (grandpa-bridge-gadget/issues/20): when adding SYNC protocol, - // fire up a request for justification for this mandatory block here. } } } @@ -408,7 +435,10 @@ where let block_num = signed_commitment.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { - RoundAction::Process => self.finalize(justification)?, + RoundAction::Process => { + debug!(target: "beefy", "🥩 Process justification for round: {:?}.", block_num); + self.finalize(justification)? + }, RoundAction::Enqueue => { debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); self.pending_justifications.entry(block_num).or_insert(justification); @@ -429,7 +459,7 @@ where let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if rounds.add_vote(&round, vote, self_vote) { - if let Some(signatures) = rounds.try_conclude(&round) { + if let Some(signatures) = rounds.should_conclude(&round) { self.gossip_validator.conclude_round(round.1); let block_num = round.1; @@ -474,6 +504,8 @@ where self.best_beefy_block = Some(block_num); metric_set!(self, beefy_best_block, block_num); + self.on_demand_justifications.cancel_requests_older_than(block_num); + if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), (BEEFY_ENGINE_ID, finality_proof.clone().encode()), @@ -735,7 +767,7 @@ where let at = BlockId::hash(notif.header.hash()); if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { self.initialize_voter(¬if.header, active); - if !self.sync_oracle.is_major_syncing() { + if !self.network.is_major_syncing() { if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); } @@ -768,6 +800,7 @@ where self.wait_for_runtime_pallet(&mut finality_notifications).await; trace!(target: "beefy", "🥩 BEEFY pallet available, starting voter."); + let mut network_events = self.network.event_stream("network-gossip").fuse(); let mut votes = Box::pin( self.gossip_engine .messages_for(topic::()) @@ -788,15 +821,38 @@ where // The branches below only change 'state', actual voting happen afterwards, // based on the new resulting 'state'. futures::select_biased! { + // Use `select_biased!` to prioritize order below. + // Make sure to pump gossip engine. + _ = gossip_engine => { + error!(target: "beefy", "🥩 Gossip engine has terminated, closing worker."); + return; + }, + // Keep track of connected peers. + net_event = network_events.next() => { + if let Some(net_event) = net_event { + self.handle_network_event(net_event); + } else { + error!(target: "beefy", "🥩 Network events stream terminated, closing worker."); + return; + } + }, + // Process finality notifications first since these drive the voter. notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); } else { + error!(target: "beefy", "🥩 Finality stream terminated, closing worker."); return; } }, - // TODO: when adding SYNC protocol, join the on-demand justifications stream to - // this one, and handle them both here. + // Process incoming justifications as these can make some in-flight votes obsolete. + justif = self.on_demand_justifications.next().fuse() => { + if let Some(justif) = justif { + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: "beefy", "🥩 {}", err); + } + } + }, justif = block_import_justif.next() => { if let Some(justif) = justif { // Block import justifications have already been verified to be valid @@ -805,9 +861,11 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { + error!(target: "beefy", "🥩 Block import stream terminated, closing worker."); return; } }, + // Finally process incoming votes. vote = votes.next() => { if let Some(vote) = vote { // Votes have already been verified to be valid by the gossip validator. @@ -815,13 +873,10 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { + error!(target: "beefy", "🥩 Votes gossiping stream terminated, closing worker."); return; } }, - _ = gossip_engine => { - error!(target: "beefy", "🥩 Gossip engine has terminated."); - return; - } } // Handle pending justifications and/or votes for now GRANDPA finalized blocks. @@ -829,8 +884,14 @@ where debug!(target: "beefy", "🥩 {}", err); } - // Don't bother voting during major sync. - if !self.sync_oracle.is_major_syncing() { + // Don't bother voting or requesting justifications during major sync. + if !self.network.is_major_syncing() { + // If the current target is a mandatory block, + // make sure there's also an on-demand justification request out for it. + if let Some(block) = self.voting_oracle.mandatory_pending() { + // This only starts new request if there isn't already an active one. + self.on_demand_justifications.request(block); + } // There were external events, 'state' is changed, author a vote if needed/possible. if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); @@ -840,6 +901,20 @@ where } } } + + /// Update known peers based on network events. + fn handle_network_event(&mut self, event: NetEvent) { + match event { + NetEvent::SyncConnected { remote } => { + self.known_peers.lock().add_new(remote); + }, + NetEvent::SyncDisconnected { remote } => { + self.known_peers.lock().remove(&remote); + }, + // We don't care about other events. + _ => (), + } + } } /// Extract the MMR root hash from a digest in the given header, if it exists. @@ -932,11 +1007,11 @@ where pub(crate) mod tests { use super::*; use crate::{ + communication::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, keystore::tests::Keyring, - notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, - BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, + BeefyPeer, BeefyTestNet, }, BeefyRPCLinks, }; @@ -979,21 +1054,29 @@ pub(crate) mod tests { let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); - let sync_oracle = network.clone(); - let gossip_validator = Arc::new(crate::gossip::GossipValidator::new()); + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone())); let gossip_engine = - GossipEngine::new(network, BEEFY_PROTOCOL_NAME, gossip_validator.clone(), None); + GossipEngine::new(network.clone(), "/beefy/1", gossip_validator.clone(), None); + let on_demand_justifications = OnDemandJustificationsEngine::new( + network.clone(), + api.clone(), + "/beefy/justifs/1".into(), + known_peers.clone(), + ); let worker_params = crate::worker::WorkerParams { client: peer.client().as_client(), backend: peer.client().as_backend(), runtime: api, key_store: Some(keystore).into(), + known_peers, links, gossip_engine, gossip_validator, min_block_delta, metrics: None, - sync_oracle, + network, + on_demand_justifications, }; BeefyWorker::<_, _, _, _, _>::new(worker_params) } @@ -1245,7 +1328,7 @@ pub(crate) mod tests { fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); // keystore doesn't contain other keys than validators' @@ -1266,13 +1349,15 @@ pub(crate) mod tests { #[test] fn should_finalize_correctly() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let keys = [Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(&keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); + let keys = keys.iter().cloned().enumerate(); + let (mut best_block_streams, mut finality_proofs) = + get_beefy_streams(&mut net, keys.clone()); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); @@ -1294,7 +1379,8 @@ pub(crate) mod tests { })); // unknown hash for block #1 - let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = + get_beefy_streams(&mut net, keys.clone()); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); let justif = create_finality_proof(1); @@ -1355,7 +1441,7 @@ pub(crate) mod tests { fn should_init_session() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); assert!(worker.voting_oracle.sessions.is_empty()); @@ -1389,7 +1475,7 @@ pub(crate) mod tests { fn should_triage_votes_and_process_later() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); fn new_vote( @@ -1450,7 +1536,7 @@ pub(crate) mod tests { fn should_initialize_correct_voter() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 2f6b788e368b3..9d5abf98ceff0 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -48,7 +48,7 @@ use sc_consensus::{ Verifier, }; use sc_network::{ - config::{NetworkConfiguration, Role, SyncMode}, + config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ @@ -688,6 +688,8 @@ pub struct FullPeerConfig { pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. pub notifications_protocols: Vec, + /// List of request-response protocols that the network must support. + pub request_response_protocols: Vec, /// The indices of the peers the peer should be connected to. /// /// If `None`, it will be connected to all other peers. @@ -790,6 +792,9 @@ where network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + network_config + .request_response_protocols + .extend(config.request_response_protocols); network_config.extra_sets = config .notifications_protocols .into_iter() From 25795506052363e8b5795eb3526e61ef2a27d89a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 3 Oct 2022 15:17:59 +0200 Subject: [PATCH 32/42] Fix `Weight::is_zero` (#12396) * Fix Weight::is_zero Signed-off-by: Oliver Tale-Yazdi * Add test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- primitives/weights/src/weight_v2.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index a8eaf79a28711..8596a782c1fa7 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -318,7 +318,7 @@ impl Zero for Weight { } fn is_zero(&self) -> bool { - self.ref_time == 0 + self == &Self::zero() } } @@ -447,3 +447,16 @@ impl SubAssign for Weight { }; } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_zero_works() { + assert!(Weight::zero().is_zero()); + assert!(!Weight::from_components(1, 0).is_zero()); + assert!(!Weight::from_components(0, 1).is_zero()); + assert!(!Weight::MAX.is_zero()); + } +} From 1b23ec9f6d7880b358072b97d0030d3352cb20aa Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 3 Oct 2022 23:50:00 +0800 Subject: [PATCH 33/42] Remove unnecessary Clone trait bounds on CountedStorageMap (#12402) * Remove unnecessary Clone trait bounds on CountedStorageMap * cargo fmt --- .../support/src/storage/types/counted_map.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index c4027acfe7232..8c19434767f49 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -143,10 +143,7 @@ where } /// Store a value to be associated with the given key from the map. - pub fn insert + Clone, ValArg: EncodeLike>( - key: KeyArg, - val: ValArg, - ) { + pub fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { if !::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_inc()); } @@ -154,7 +151,7 @@ where } /// Remove the value under a key. - pub fn remove + Clone>(key: KeyArg) { + pub fn remove>(key: KeyArg) { if ::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_dec()); } @@ -162,7 +159,7 @@ where } /// Mutate the value under a key. - pub fn mutate + Clone, R, F: FnOnce(&mut QueryKind::Query) -> R>( + pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( key: KeyArg, f: F, ) -> R { @@ -173,7 +170,7 @@ where /// Mutate the item, only if an `Ok` value is returned. pub fn try_mutate(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike + Clone, + KeyArg: EncodeLike, F: FnOnce(&mut QueryKind::Query) -> Result, { Self::try_mutate_exists(key, |option_value_ref| { @@ -187,7 +184,7 @@ where } /// Mutate the value under a key. Deletes the item if mutated to a `None`. - pub fn mutate_exists + Clone, R, F: FnOnce(&mut Option) -> R>( + pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( key: KeyArg, f: F, ) -> R { @@ -200,7 +197,7 @@ where /// or if the storage item does not exist (`None`), independent of the `QueryType`. pub fn try_mutate_exists(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike + Clone, + KeyArg: EncodeLike, F: FnOnce(&mut Option) -> Result, { ::Map::try_mutate_exists(key, |option_value| { @@ -222,7 +219,7 @@ where } /// Take the value under a key. - pub fn take + Clone>(key: KeyArg) -> QueryKind::Query { + pub fn take>(key: KeyArg) -> QueryKind::Query { let removed_value = ::Map::mutate_exists(key, |value| value.take()); if removed_value.is_some() { CounterFor::::mutate(|value| value.saturating_dec()); @@ -240,7 +237,7 @@ where /// `[item]`. Any default value set for the storage item will be ignored on overwrite. pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) where - EncodeLikeKey: EncodeLike + Clone, + EncodeLikeKey: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageAppend, @@ -355,7 +352,7 @@ where /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> where - KArg: EncodeLike + Clone, + KArg: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageTryAppend, From 6d7f76b5de00d4d4fdc55596abe86beb7d55f0b3 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 4 Oct 2022 09:58:05 +0300 Subject: [PATCH 34/42] docs/CODEOWNERS: add @acatangiu as MMR owner (#12406) --- docs/CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 0b9e6e7783058..cf2067d19450d 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -36,11 +36,13 @@ /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas -# BEEFY +# BEEFY, MMR /client/beefy/ @acatangiu /frame/beefy/ @acatangiu /frame/beefy-mmr/ @acatangiu +/frame/merkle-mountain-range/ @acatangiu /primitives/beefy/ @acatangiu +/primitives/merkle-mountain-range/ @acatangiu # Contracts /frame/contracts/ @athei From 594d71afca8e70ed84297b01472bb1250d89ebd1 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 4 Oct 2022 17:01:50 +0900 Subject: [PATCH 35/42] Add @koute to `docs/CODEOWNERS` and update stale paths (#12408) --- docs/CODEOWNERS | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index cf2067d19450d..133ba7b094d43 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -24,8 +24,19 @@ /.gitlab-ci.yml @paritytech/ci # Sandboxing capability of Substrate Runtime -/primitives/sr-sandbox/ @pepyakin -/primitives/core/src/sandbox.rs @pepyakin +/primitives/sandbox/ @pepyakin @koute + +# WASM executor, low-level client <-> WASM interface and other WASM-related code +/client/executor/ @koute +/client/allocator/ @koute +/primitives/wasm-interface/ @koute +/primitives/runtime-interface/ @koute +/primitives/panic-handler/ @koute +/utils/wasm-builder/ @koute + +# Systems-related bits and bobs on the client side +/client/sysinfo/ @koute +/client/tracing/ @koute # GRANDPA, BABE, consensus stuff /frame/babe/ @andresilva From e77cbe39c4d1bfd978bb03c686fc9f60d86a3d06 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 4 Oct 2022 11:47:13 +0300 Subject: [PATCH 36/42] BEEFY: Simplify hashing for pallet-beefy-mmr (#12393) * beefy-mmr: reuse sp_runtime::traits::Keccak256 * beefy-mmr: use sp_runtime::traits:Hash for generating merkle proofs * beefy-mmr: use sp_runtime::traits:Hash for validating merkle proofs * beefy-mmr: remove primitives::Hasher and primitives::Hash * fixes * beefy-mmr: reduce the number of generic parameters for merkle_root() * fix * compute upper Vec capacity more accurately --- Cargo.lock | 2 +- frame/beefy-mmr/primitives/Cargo.toml | 8 +- frame/beefy-mmr/primitives/src/lib.rs | 236 ++++++++++++-------------- frame/beefy-mmr/src/lib.rs | 27 +-- frame/beefy-mmr/src/mock.rs | 3 +- 5 files changed, 118 insertions(+), 158 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 723a09ee9a39f..2f0a2df0f101b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -525,7 +525,7 @@ dependencies = [ "env_logger", "log", "sp-api", - "tiny-keccak", + "sp-runtime", ] [[package]] diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index 1aa2573c7f680..a097da0fc30fd 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -11,10 +11,10 @@ homepage = "https://substrate.io" [dependencies] array-bytes = { version = "4.1", optional = true } log = { version = "0.4", default-features = false, optional = true } -tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } beefy-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/beefy" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] array-bytes = "4.1" @@ -22,9 +22,9 @@ env_logger = "0.9" [features] debug = ["array-bytes", "log"] -default = ["debug", "keccak", "std"] -keccak = ["tiny-keccak"] +default = ["debug", "std"] std = [ "beefy-primitives/std", - "sp-api/std" + "sp-api/std", + "sp-runtime/std" ] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 38831d7914715..f56be8bcafe5b 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -25,88 +25,49 @@ //! compilation targets. //! //! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the -//! same [Hasher] as the inner nodes. +//! same hasher as the inner nodes. //! Inner nodes are created by concatenating child hashes and hashing again. The implementation //! does not perform any sorting of the input data (leaves) nor when inner nodes are created. //! //! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. -#[cfg(not(feature = "std"))] -extern crate alloc; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; +pub use sp_runtime::traits::Keccak256; +use sp_runtime::{app_crypto::sp_core, sp_std, traits::Hash as HashT}; +use sp_std::{vec, vec::Vec}; use beefy_primitives::mmr::{BeefyAuthoritySet, BeefyNextAuthoritySet}; -/// Supported hashing output size. -/// -/// The size is restricted to 32 bytes to allow for a more optimised implementation. -pub type Hash = [u8; 32]; - -/// Generic hasher trait. -/// -/// Implement the function to support custom way of hashing data. -/// The implementation must return a [Hash](type@Hash) type, so only 32-byte output hashes are -/// supported. -pub trait Hasher { - /// Hash given arbitrary-length piece of data. - fn hash(data: &[u8]) -> Hash; -} - -#[cfg(feature = "keccak")] -mod keccak256 { - use tiny_keccak::{Hasher as _, Keccak}; - - /// Keccak256 hasher implementation. - pub struct Keccak256; - impl Keccak256 { - /// Hash given data. - pub fn hash(data: &[u8]) -> super::Hash { - ::hash(data) - } - } - impl super::Hasher for Keccak256 { - fn hash(data: &[u8]) -> super::Hash { - let mut keccak = Keccak::v256(); - keccak.update(data); - let mut output = [0_u8; 32]; - keccak.finalize(&mut output); - output - } - } -} -#[cfg(feature = "keccak")] -pub use keccak256::Keccak256; - /// Construct a root hash of a Binary Merkle Tree created from given leaves. /// /// See crate-level docs for details about Merkle Tree construction. /// /// In case an empty list of leaves is passed the function returns a 0-filled hash. -pub fn merkle_root(leaves: I) -> Hash +pub fn merkle_root(leaves: I) -> H::Output where - H: Hasher, - I: IntoIterator, - T: AsRef<[u8]>, + H: HashT, + H::Output: Default + AsRef<[u8]>, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let iter = leaves.into_iter().map(|l| H::hash(l.as_ref())); - merkelize::(iter, &mut ()) + let iter = leaves.into_iter().map(|l| ::hash(l.as_ref())); + merkelize::(iter, &mut ()).into() } -fn merkelize(leaves: I, visitor: &mut V) -> Hash +fn merkelize(leaves: I, visitor: &mut V) -> H::Output where - H: Hasher, - V: Visitor, - I: Iterator, + H: HashT, + H::Output: Default + AsRef<[u8]>, + V: Visitor, + I: Iterator, { - let upper = Vec::with_capacity(leaves.size_hint().0); + let upper = Vec::with_capacity((leaves.size_hint().1.unwrap_or(0).saturating_add(1)) / 2); let mut next = match merkelize_row::(leaves, upper, visitor) { Ok(root) => return root, - Err(next) if next.is_empty() => return Hash::default(), + Err(next) if next.is_empty() => return H::Output::default(), Err(next) => next, }; - let mut upper = Vec::with_capacity((next.len() + 1) / 2); + let mut upper = Vec::with_capacity((next.len().saturating_add(1)) / 2); loop { visitor.move_up(); @@ -125,14 +86,14 @@ where /// /// The structure contains all necessary data to later on verify the proof and the leaf itself. #[derive(Debug, PartialEq, Eq)] -pub struct MerkleProof { +pub struct MerkleProof { /// Root hash of generated merkle tree. - pub root: Hash, + pub root: H, /// Proof items (does not contain the leaf hash, nor the root obviously). /// /// This vec contains all inner node hashes necessary to reconstruct the root hash given the /// leaf hash. - pub proof: Vec, + pub proof: Vec, /// Number of leaves in the original tree. /// /// This is needed to detect a case where we have an odd number of leaves that "get promoted" @@ -141,14 +102,14 @@ pub struct MerkleProof { /// Index of the leaf the proof is for (0-based). pub leaf_index: usize, /// Leaf content. - pub leaf: T, + pub leaf: L, } /// A trait of object inspecting merkle root creation. /// /// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified /// about tree traversal. -trait Visitor { +trait Visitor { /// We are moving one level up in the tree. fn move_up(&mut self); @@ -158,13 +119,13 @@ trait Visitor { /// The method will also visit the `root` hash (level 0). /// /// The `index` is an index of `left` item. - fn visit(&mut self, index: usize, left: &Option, right: &Option); + fn visit(&mut self, index: usize, left: &Option, right: &Option); } /// No-op implementation of the visitor. -impl Visitor for () { +impl Visitor for () { fn move_up(&mut self) {} - fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} + fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} } /// Construct a Merkle Proof for leaves given by indices. @@ -177,16 +138,17 @@ impl Visitor for () { /// # Panic /// /// The function will panic if given `leaf_index` is greater than the number of leaves. -pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof +pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof where - H: Hasher, + H: HashT, + H::Output: Default + Copy + AsRef<[u8]>, I: IntoIterator, I::IntoIter: ExactSizeIterator, T: AsRef<[u8]>, { let mut leaf = None; let iter = leaves.into_iter().enumerate().map(|(idx, l)| { - let hash = H::hash(l.as_ref()); + let hash = ::hash(l.as_ref()); if idx == leaf_index { leaf = Some(l); } @@ -194,23 +156,23 @@ where }); /// The struct collects a proof for single leaf. - struct ProofCollection { - proof: Vec, + struct ProofCollection { + proof: Vec, position: usize, } - impl ProofCollection { + impl ProofCollection { fn new(position: usize) -> Self { ProofCollection { proof: Default::default(), position } } } - impl Visitor for ProofCollection { + impl Visitor for ProofCollection { fn move_up(&mut self) { self.position /= 2; } - fn visit(&mut self, index: usize, left: &Option, right: &Option) { + fn visit(&mut self, index: usize, left: &Option, right: &Option) { // we are at left branch - right goes to the proof. if self.position == index { if let Some(right) = right { @@ -238,7 +200,7 @@ where collect_proof .proof .iter() - .map(|s| array_bytes::bytes2hex("", s)) + .map(|s| array_bytes::bytes2hex("", s.as_ref())) .collect::>() ); @@ -250,25 +212,19 @@ where /// Can be either a value that needs to be hashed first, /// or the hash itself. #[derive(Debug, PartialEq, Eq)] -pub enum Leaf<'a> { +pub enum Leaf<'a, H> { /// Leaf content. Value(&'a [u8]), /// Hash of the leaf content. - Hash(Hash), + Hash(H), } -impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { +impl<'a, H, T: AsRef<[u8]>> From<&'a T> for Leaf<'a, H> { fn from(v: &'a T) -> Self { Leaf::Value(v.as_ref()) } } -impl<'a> From for Leaf<'a> { - fn from(v: Hash) -> Self { - Leaf::Hash(v) - } -} - /// Verify Merkle Proof correctness versus given root hash. /// /// The proof is NOT expected to contain leaf hash as the first @@ -277,45 +233,47 @@ impl<'a> From for Leaf<'a> { /// /// The proof must not contain the root hash. pub fn verify_proof<'a, H, P, L>( - root: &'a Hash, + root: &'a H::Output, proof: P, number_of_leaves: usize, leaf_index: usize, leaf: L, ) -> bool where - H: Hasher, - P: IntoIterator, - L: Into>, + H: HashT, + H::Output: PartialEq + AsRef<[u8]>, + P: IntoIterator, + L: Into>, { if leaf_index >= number_of_leaves { return false } let leaf_hash = match leaf.into() { - Leaf::Value(content) => H::hash(content), + Leaf::Value(content) => ::hash(content), Leaf::Hash(hash) => hash, }; - let mut combined = [0_u8; 64]; + let hash_len = ::LENGTH; + let mut combined = vec![0_u8; hash_len * 2]; let mut position = leaf_index; let mut width = number_of_leaves; let computed = proof.into_iter().fold(leaf_hash, |a, b| { if position % 2 == 1 || position + 1 == width { - combined[0..32].copy_from_slice(&b); - combined[32..64].copy_from_slice(&a); + combined[..hash_len].copy_from_slice(&b.as_ref()); + combined[hash_len..].copy_from_slice(&a.as_ref()); } else { - combined[0..32].copy_from_slice(&a); - combined[32..64].copy_from_slice(&b); + combined[..hash_len].copy_from_slice(&a.as_ref()); + combined[hash_len..].copy_from_slice(&b.as_ref()); } - let hash = H::hash(&combined); + let hash = ::hash(&combined); #[cfg(feature = "debug")] log::debug!( "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", - array_bytes::bytes2hex("", &a), - array_bytes::bytes2hex("", &b), - array_bytes::bytes2hex("", &hash), - array_bytes::bytes2hex("", &combined) + array_bytes::bytes2hex("", &a.as_ref()), + array_bytes::bytes2hex("", &b.as_ref()), + array_bytes::bytes2hex("", &hash.as_ref()), + array_bytes::bytes2hex("", &combined.as_ref()) ); position /= 2; width = ((width - 1) / 2) + 1; @@ -332,20 +290,22 @@ where /// empty iterator) an `Err` with the inner nodes of upper layer is returned. fn merkelize_row( mut iter: I, - mut next: Vec, + mut next: Vec, visitor: &mut V, -) -> Result> +) -> Result> where - H: Hasher, - V: Visitor, - I: Iterator, + H: HashT, + H::Output: AsRef<[u8]>, + V: Visitor, + I: Iterator, { #[cfg(feature = "debug")] log::debug!("[merkelize_row]"); next.clear(); + let hash_len = ::LENGTH; let mut index = 0; - let mut combined = [0_u8; 64]; + let mut combined = vec![0_u8; hash_len * 2]; loop { let a = iter.next(); let b = iter.next(); @@ -354,17 +314,17 @@ where #[cfg(feature = "debug")] log::debug!( " {:?}\n {:?}", - a.as_ref().map(|s| array_bytes::bytes2hex("", s)), - b.as_ref().map(|s| array_bytes::bytes2hex("", s)) + a.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())), + b.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())) ); index += 2; match (a, b) { (Some(a), Some(b)) => { - combined[0..32].copy_from_slice(&a); - combined[32..64].copy_from_slice(&b); + combined[..hash_len].copy_from_slice(a.as_ref()); + combined[hash_len..].copy_from_slice(b.as_ref()); - next.push(H::hash(&combined)); + next.push(::hash(&combined)); }, // Odd number of items. Promote the item to the upper layer. (Some(a), None) if !next.is_empty() => { @@ -377,7 +337,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkelize_row] Next: {:?}", - next.iter().map(|s| array_bytes::bytes2hex("", s)).collect::>() + next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() ); return Err(next) }, @@ -389,7 +349,6 @@ sp_api::decl_runtime_apis! { /// API useful for BEEFY light clients. pub trait BeefyMmrApi where - H: From + Into, BeefyAuthoritySet: sp_api::Decode, { /// Return the currently active BEEFY authority set proof. @@ -403,6 +362,7 @@ sp_api::decl_runtime_apis! { #[cfg(test)] mod tests { use super::*; + use crate::sp_core::H256; #[test] fn should_generate_empty_root() { @@ -411,11 +371,11 @@ mod tests { let data: Vec<[u8; 1]> = Default::default(); // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "0000000000000000000000000000000000000000000000000000000000000000" ); } @@ -429,11 +389,11 @@ mod tests { )]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" ); } @@ -448,11 +408,11 @@ mod tests { ]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" ); } @@ -461,7 +421,10 @@ mod tests { fn should_generate_root_complex() { let _ = env_logger::try_init(); let test = |root, data| { - assert_eq!(array_bytes::bytes2hex("", &merkle_root::(data)), root); + assert_eq!( + array_bytes::bytes2hex("", &merkle_root::(data).as_ref()), + root + ); }; test( @@ -521,18 +484,19 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", &proof0.root), - array_bytes::bytes2hex("", &proof1.root) + array_bytes::bytes2hex("", &proof0.root.as_ref()), + array_bytes::bytes2hex("", &proof1.root.as_ref()) ); assert_eq!( - array_bytes::bytes2hex("", &proof2.root), - array_bytes::bytes2hex("", &proof1.root) + array_bytes::bytes2hex("", &proof2.root.as_ref()), + array_bytes::bytes2hex("", &proof1.root.as_ref()) ); assert!(!verify_proof::( &array_bytes::hex2array_unchecked( "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239" - ), + ) + .into(), proof0.proof, data.len(), proof0.leaf_index, @@ -540,7 +504,7 @@ mod tests { )); assert!(!verify_proof::( - &proof0.root, + &proof0.root.into(), vec![], data.len(), proof0.leaf_index, @@ -796,9 +760,10 @@ mod tests { "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", ]; - let root = array_bytes::hex2array_unchecked( + let root: H256 = array_bytes::hex2array_unchecked( "72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53", - ); + ) + .into(); let data = addresses .into_iter() @@ -808,7 +773,10 @@ mod tests { for l in 0..data.len() { // when let proof = merkle_proof::(data.clone(), l); - assert_eq!(array_bytes::bytes2hex("", &proof.root), array_bytes::bytes2hex("", &root)); + assert_eq!( + array_bytes::bytes2hex("", &proof.root.as_ref()), + array_bytes::bytes2hex("", &root.as_ref()) + ); assert_eq!(proof.leaf_index, l); assert_eq!(&proof.leaf, &data[l]); @@ -831,16 +799,20 @@ mod tests { proof: vec![ array_bytes::hex2array_unchecked( "340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f" - ), + ) + .into(), array_bytes::hex2array_unchecked( "ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f" - ), + ) + .into(), array_bytes::hex2array_unchecked( "d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79" - ), + ) + .into(), array_bytes::hex2array_unchecked( "ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e" - ), + ) + .into(), ], number_of_leaves: data.len(), leaf_index: data.len() - 1, diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 456d6e77aa8eb..5b82c89ce84b6 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -33,7 +33,7 @@ //! //! and thanks to versioning can be easily updated in the future. -use sp_runtime::traits::{Convert, Hash, Member}; +use sp_runtime::traits::{Convert, Member}; use sp_std::prelude::*; use beefy_primitives::{ @@ -142,10 +142,7 @@ pub mod pallet { StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; } -impl LeafDataProvider for Pallet -where - MerkleRootOf: From + Into, -{ +impl LeafDataProvider for Pallet { type LeafData = MmrLeaf< ::BlockNumber, ::Hash, @@ -163,19 +160,9 @@ where } } -impl beefy_merkle_tree::Hasher for Pallet -where - MerkleRootOf: Into, -{ - fn hash(data: &[u8]) -> beefy_merkle_tree::Hash { - ::Hashing::hash(data).into() - } -} - impl beefy_primitives::OnNewValidatorSet<::BeefyId> for Pallet where T: pallet::Config, - MerkleRootOf: From + Into, { /// Compute and cache BEEFY authority sets based on updated BEEFY validator sets. fn on_new_validator_set( @@ -190,10 +177,7 @@ where } } -impl Pallet -where - MerkleRootOf: From + Into, -{ +impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { Pallet::::beefy_authorities() @@ -220,7 +204,10 @@ where .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); let len = beefy_addresses.len() as u32; - let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); + let root = beefy_merkle_tree::merkle_root::<::Hashing, _>( + beefy_addresses, + ) + .into(); BeefyAuthoritySet { id, len, root } } } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 602d0aa5fe1a6..0a64ad3fc9976 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -147,9 +147,10 @@ impl BeefyDataProvider> for DummyDataProvider { fn extra_data() -> Vec { let mut col = vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])]; col.sort(); - beefy_merkle_tree::merkle_root::, _, _>( + beefy_merkle_tree::merkle_root::<::Hashing, _>( col.into_iter().map(|pair| pair.encode()), ) + .as_ref() .to_vec() } } From 005195011f303bf5e2e468dbf379dd42971bf6ae Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 4 Oct 2022 14:34:54 +0300 Subject: [PATCH 37/42] client/beefy: small code improvements (#12414) * client/beefy: remove bounds on type definitions * client/beefy: remove gossip protocol legacy name * client/beefy: simplify justification request response engine Signed-off-by: Adrian Catangiu --- client/beefy/src/communication/mod.rs | 5 --- .../outgoing_requests_engine.rs | 34 ++++++++----------- client/beefy/src/lib.rs | 16 ++------- 3 files changed, 17 insertions(+), 38 deletions(-) diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs index 93646677c0ecd..91798d4ae0d33 100644 --- a/client/beefy/src/communication/mod.rs +++ b/client/beefy/src/communication/mod.rs @@ -33,9 +33,6 @@ pub(crate) mod beefy_protocol_name { /// BEEFY justifications protocol name suffix. const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; - /// Old names for the gossip protocol, used for backward compatibility. - pub(super) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; - /// Name of the votes gossip protocol used by BEEFY. /// /// Must be registered towards the networking in order for BEEFY voter to properly function. @@ -73,9 +70,7 @@ pub fn beefy_peers_set_config( ) -> sc_network_common::config::NonDefaultSetConfig { let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); - cfg.allow_non_reserved(25, 25); - cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); cfg } diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs index e22958e19cd2e..c4d3c926190e6 100644 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -20,10 +20,7 @@ use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; use codec::Encode; -use futures::{ - channel::{oneshot, oneshot::Canceled}, - stream::{self, StreamExt}, -}; +use futures::channel::{oneshot, oneshot::Canceled}; use log::{debug, error, warn}; use parking_lot::Mutex; use sc_network::{PeerId, ProtocolName}; @@ -50,8 +47,8 @@ type Response = Result, RequestFailure>; type ResponseReceiver = oneshot::Receiver; enum State { - Idle(stream::Pending>), - AwaitingResponse(PeerId, NumberFor, stream::Once), + Idle, + AwaitingResponse(PeerId, NumberFor, ResponseReceiver), } pub struct OnDemandJustificationsEngine { @@ -83,7 +80,7 @@ where protocol_name, live_peers, peers_cache: VecDeque::new(), - state: State::Idle(stream::pending()), + state: State::Idle, } } @@ -118,15 +115,14 @@ where IfDisconnected::ImmediateError, ); - self.state = State::AwaitingResponse(peer, block, stream::once(rx)); + self.state = State::AwaitingResponse(peer, block, rx); } /// If no other request is in progress, start new justification request for `block`. pub fn request(&mut self, block: NumberFor) { // ignore new requests while there's already one pending - match &self.state { - State::AwaitingResponse(_, _, _) => return, - State::Idle(_) => (), + if matches!(self.state, State::AwaitingResponse(_, _, _)) { + return } self.reset_peers_cache_for_block(block); @@ -148,7 +144,7 @@ where "🥩 cancel pending request for justification #{:?}", number ); - self.state = State::Idle(stream::pending()); + self.state = State::Idle; }, _ => (), } @@ -194,19 +190,19 @@ where pub async fn next(&mut self) -> Option> { let (peer, block, resp) = match &mut self.state { - State::Idle(pending) => { - let _ = pending.next().await; - // This never happens since 'stream::pending' never generates any items. + State::Idle => { + futures::pending!(); + // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. return None }, State::AwaitingResponse(peer, block, receiver) => { - let resp = receiver.next().await?; + let resp = receiver.await; (*peer, *block, resp) }, }; - // We received the awaited response. Our 'stream::once()' receiver will never generate any - // other response, meaning we're done with current state. Move the engine to `State::Idle`. - self.state = State::Idle(stream::pending()); + // We received the awaited response. Our 'receiver' will never generate any other response, + // meaning we're done with current state. Move the engine to `State::Idle`. + self.state = State::Idle; let block_id = BlockId::number(block); let validator_set = self diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 7407f101e99a5..760fc753b18a3 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -153,11 +153,7 @@ where } /// BEEFY gadget network parameters. -pub struct BeefyNetworkParams -where - B: Block, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, -{ +pub struct BeefyNetworkParams { /// Network implementing gossip, requests and sync-oracle. pub network: Arc, /// Chain specific BEEFY gossip protocol name. See @@ -171,15 +167,7 @@ where } /// BEEFY gadget initialization parameters. -pub struct BeefyParams -where - B: Block, - BE: Backend, - C: Client, - R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, -{ +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend From 07e5ec5eb8a9c470bf7dc9afbdb3ac0579be4f4b Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 4 Oct 2022 14:26:14 +0200 Subject: [PATCH 38/42] [Fix] Rename VoterBagsList -> VoterList to match pdot (#12416) --- bin/node/runtime/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f0c68b5b225cd..c2d29731ea2e6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -569,7 +569,7 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::UnboundedExecution; - type VoterList = VoterBagsList; + type VoterList = VoterList; // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; @@ -1651,7 +1651,7 @@ construct_runtime!( Gilt: pallet_gilt, Uniques: pallet_uniques, TransactionStorage: pallet_transaction_storage, - VoterBagsList: pallet_bags_list::, + VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, ChildBounties: pallet_child_bounties, Referenda: pallet_referenda, @@ -1739,7 +1739,7 @@ mod benches { [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] - [pallet_bags_list, VoterBagsList] + [pallet_bags_list, VoterList] [pallet_balances, Balances] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] From d11dd02dda6a00800a13cd59a0c2f07ac75e082d Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Tue, 4 Oct 2022 15:15:57 +0200 Subject: [PATCH 39/42] Use saturating add for alliance::disband witness data (#12418) --- frame/alliance/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 24111b44ced9e..fca17e69c7652 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -706,7 +706,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::disband( witness.voting_members, witness.ally_members, - witness.voting_members + witness.ally_members, + witness.voting_members.saturating_add(witness.ally_members), ))] pub fn disband( origin: OriginFor, From 91d072df4273bbc04f8152099b23a66a0c1d531b Mon Sep 17 00:00:00 2001 From: Chevdor Date: Tue, 4 Oct 2022 21:30:45 +0200 Subject: [PATCH 40/42] Bump prost to 0.11+ (#12419) --- Cargo.lock | 16 ++++++++-------- client/authority-discovery/Cargo.toml | 4 ++-- client/network/Cargo.toml | 2 +- client/network/common/Cargo.toml | 2 +- client/network/light/Cargo.toml | 4 ++-- client/network/sync/Cargo.toml | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f0a2df0f101b..a35dbba7d089e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7830,8 +7830,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8486,7 +8486,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "pin-project", - "prost 0.10.3", + "prost 0.11.0", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8553,7 +8553,7 @@ dependencies = [ "libp2p", "linked_hash_set", "parity-scale-codec", - "prost-build 0.10.4", + "prost-build 0.11.1", "sc-consensus", "sc-peerset", "serde", @@ -8595,8 +8595,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8617,8 +8617,8 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "quickcheck", "sc-block-builder", "sc-client-api", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index d9e9df4f2a97c..37377cdc6dde3 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -23,7 +23,7 @@ futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } log = "0.4.17" -prost = "0.10" +prost = "0.11" rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index e96749df40aa2..8e3d68851c423 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -33,7 +33,7 @@ log = "0.4.17" lru = "0.7.5" parking_lot = "0.12.1" pin-project = "1.0.10" -prost = "0.10" +prost = "0.11" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 1ee7b15538366..0e9801ec79e63 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] async-trait = "0.1.57" diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index c2a77c3b577ba..a1a5dcf85eb5d 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] array-bytes = "4.1" @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.16" -prost = "0.10" +prost = "0.11" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-network-common = { version = "0.10.0-dev", path = "../common" } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 269214aeff3f7..24d418f7233d7 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] array-bytes = "4.1" @@ -25,7 +25,7 @@ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" -prost = "0.10" +prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } From 241b0d0455453499763d0db0b4ea4188012b372f Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 5 Oct 2022 00:16:07 +0200 Subject: [PATCH 41/42] Improved election pallet testing (#12327) * Improved election pallet testing * fmt * remove comment * more checks * fixes in logic * roll_to_signed * switch to roll_to_signed * Update frame/election-provider-multi-phase/src/mock.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove useless checks * remove warning * add checks to signed.rs * add some checks to unsigned.rs * fmt * use roll_to_signed and roll_to_unsigned * remove nonsense * remove even more nonsense * fix * fix * remove useless checks Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: parity-processbot <> --- .../election-provider-multi-phase/src/lib.rs | 161 +++++++++++++++--- .../election-provider-multi-phase/src/mock.rs | 11 ++ .../src/signed.rs | 130 +++++++++++--- .../src/unsigned.rs | 75 +++++--- 4 files changed, 305 insertions(+), 72 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index fb17bd25ea541..3dc6161bb202a 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1842,9 +1842,9 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, raw_solution, roll_to, AccountId, ExtBuilder, MockWeightInfo, - MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxSubmissions, System, - TargetIndex, Targets, + multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, AccountId, + ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, + SignedMaxSubmissions, System, TargetIndex, Targets, }, Phase, }; @@ -1868,7 +1868,7 @@ mod tests { assert!(MultiPhase::snapshot().is_none()); assert_eq!(MultiPhase::round(), 1); - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert!(MultiPhase::snapshot().is_some()); @@ -1879,7 +1879,7 @@ mod tests { assert!(MultiPhase::snapshot().is_some()); assert_eq!(MultiPhase::round(), 1); - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert_eq!( multi_phase_events(), @@ -1912,11 +1912,29 @@ mod tests { roll_to(44); assert!(MultiPhase::current_phase().is_off()); - roll_to(45); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); roll_to(55); assert!(MultiPhase::current_phase().is_unsigned_open_at(55)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + }, + Event::SignedPhaseStarted { round: 2 }, + Event::UnsignedPhaseStarted { round: 2 } + ] + ); }) } @@ -1940,6 +1958,21 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }); } @@ -1952,7 +1985,7 @@ mod tests { roll_to(19); assert!(MultiPhase::current_phase().is_off()); - roll_to(20); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert!(MultiPhase::snapshot().is_some()); @@ -1963,6 +1996,21 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ) }); } @@ -1985,6 +2033,14 @@ mod tests { assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); + + assert_eq!( + multi_phase_events(), + vec![Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } + }] + ); }); } @@ -1993,16 +2049,13 @@ mod tests { // An early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // Signed phase started at block 15 and will end at 25. - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to(15); + roll_to_signed(); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); // An unexpected call to elect. - roll_to(20); assert_ok!(MultiPhase::elect()); // We surely can't have any feasible solutions. This will cause an on-chain election. @@ -2031,10 +2084,8 @@ mod tests { // an early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // signed phase started at block 15 and will end at 25. - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to(15); + roll_to_signed(); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); @@ -2052,7 +2103,6 @@ mod tests { } // an unexpected call to elect. - roll_to(20); assert_ok!(MultiPhase::elect()); // all storage items must be cleared. @@ -2062,16 +2112,38 @@ mod tests { assert!(MultiPhase::desired_targets().is_none()); assert!(MultiPhase::queued_solution().is_none()); assert!(MultiPhase::signed_submissions().is_empty()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }) } #[test] fn check_events_with_compute_signed() { ExtBuilder::default().build_and_execute(|| { - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -2106,7 +2178,7 @@ mod tests { #[test] fn check_events_with_compute_unsigned() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -2125,7 +2197,6 @@ mod tests { )); assert!(MultiPhase::queued_solution().is_some()); - roll_to(30); assert_ok!(MultiPhase::elect()); assert_eq!( @@ -2153,7 +2224,7 @@ mod tests { #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far, but we get a result. @@ -2166,11 +2237,27 @@ mod tests { (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) ] - ) + ); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }); ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2178,13 +2265,22 @@ mod tests { assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFailed + ] + ); }) } #[test] fn governance_fallback_works() { ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2243,9 +2339,16 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Off); // On-chain backup works though. - roll_to(29); let supports = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); + + assert_eq!( + multi_phase_events(), + vec![Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } + }] + ); }); } @@ -2269,6 +2372,8 @@ mod tests { let err = MultiPhase::elect().unwrap_err(); assert_eq!(err, ElectionError::Fallback("NoFallback.")); assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + + assert_eq!(multi_phase_events(), vec![Event::ElectionFailed]); }); } @@ -2282,7 +2387,7 @@ mod tests { crate::mock::MaxElectingVoters::set(2); // Signed phase opens just fine. - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!( @@ -2295,7 +2400,7 @@ mod tests { #[test] fn untrusted_score_verification_is_respected() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); // set the solution balancing to get the desired score. diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index d3082be0cf750..2615d863c91e0 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -99,6 +99,17 @@ pub fn roll_to(n: BlockNumber) { } } +pub fn roll_to_unsigned() { + while !matches!(MultiPhase::current_phase(), Phase::Unsigned(_)) { + roll_to(System::block_number() + 1); + } +} +pub fn roll_to_signed() { + while !matches!(MultiPhase::current_phase(), Phase::Signed) { + roll_to(System::block_number() + 1); + } +} + pub fn roll_to_with_ocw(n: BlockNumber) { let now = System::block_number(); for i in now + 1..=n { diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 2e01d99be0a42..175c92757f35e 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -528,10 +528,11 @@ mod tests { use super::*; use crate::{ mock::{ - balances, raw_solution, roll_to, Balances, ExtBuilder, MockedWeightInfo, MultiPhase, - Runtime, RuntimeOrigin, SignedMaxRefunds, SignedMaxSubmissions, SignedMaxWeight, + balances, multi_phase_events, raw_solution, roll_to, roll_to_signed, Balances, + ExtBuilder, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxRefunds, + SignedMaxSubmissions, SignedMaxWeight, }, - Error, Perbill, Phase, + Error, Event, Perbill, Phase, }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; @@ -555,7 +556,7 @@ mod tests { #[test] fn should_pay_deposit() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -565,13 +566,21 @@ mod tests { assert_eq!(balances(&99), (95, 5)); assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false } + ] + ); }) } #[test] fn good_solution_is_rewarded() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -582,13 +591,22 @@ mod tests { assert!(MultiPhase::finalize_signed_phase()); assert_eq!(balances(&99), (100 + 7 + 8, 0)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } #[test] fn bad_solution_is_slashed() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -604,13 +622,22 @@ mod tests { assert!(!MultiPhase::finalize_signed_phase()); // and the bond is gone. assert_eq!(balances(&99), (95, 0)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 99, value: 5 } + ] + ); }) } #[test] fn suppressed_solution_gets_bond_back() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -633,13 +660,22 @@ mod tests { assert_eq!(balances(&99), (100 + 7 + 8, 0)); // 999 gets everything back, including the call fee. assert_eq!(balances(&999), (100 + 8, 0)); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } #[test] fn cannot_submit_worse_with_full_queue() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -667,7 +703,7 @@ mod tests { #[test] fn call_fee_refund_is_limited_by_signed_max_refunds() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(SignedMaxRefunds::get(), 1); assert!(SignedMaxSubmissions::get() > 2); @@ -683,7 +719,7 @@ mod tests { assert_eq!(balances(&account), (95, 5)); } - assert!(MultiPhase::finalize_signed_phase()); + assert_ok!(MultiPhase::do_elect()); for s in 0..SignedMaxSubmissions::get() { let account = 99 + s as u64; @@ -699,6 +735,26 @@ mod tests { assert_eq!(balances(&account), (100, 0)); } } + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 }, + Event::ElectionFinalized { + compute: ElectionCompute::Signed, + score: ElectionScore { + minimal_stake: 40, + sum_stake: 100, + sum_stake_squared: 5200 + } + } + ] + ); }); } @@ -708,7 +764,7 @@ mod tests { .signed_max_submission(1) .better_signed_threshold(Perbill::from_percent(20)) .build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = RawSolution { @@ -747,13 +803,27 @@ mod tests { }; assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Signed, + prev_ejected: false + }, + Event::SolutionStored { + compute: ElectionCompute::Signed, + prev_ejected: true + } + ] + ); }) } #[test] fn weakest_is_removed_if_better_provided() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -800,7 +870,7 @@ mod tests { #[test] fn replace_weakest_works() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 1..SignedMaxSubmissions::get() { @@ -847,7 +917,7 @@ mod tests { #[test] fn early_ejected_solution_gets_bond_back() { ExtBuilder::default().signed_deposit(2, 0, 0).build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -878,7 +948,7 @@ mod tests { #[test] fn equally_good_solution_is_not_accepted() { ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for i in 0..SignedMaxSubmissions::get() { @@ -915,7 +985,7 @@ mod tests { // - bad_solution_is_slashed // - suppressed_solution_gets_bond_back ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(balances(&99), (100, 0)); @@ -951,6 +1021,17 @@ mod tests { assert_eq!(balances(&999), (95, 0)); // 9999 gets everything back, including the call fee. assert_eq!(balances(&9999), (100 + 8, 0)); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 999, value: 5 }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } @@ -960,7 +1041,7 @@ mod tests { .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -994,7 +1075,7 @@ mod tests { #[test] fn insufficient_deposit_does_not_store_submission() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -1014,7 +1095,7 @@ mod tests { #[test] fn insufficient_deposit_with_full_queue_works_properly() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -1060,7 +1141,7 @@ mod tests { #[test] fn finalize_signed_phase_is_idempotent_given_submissions() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -1073,6 +1154,15 @@ mod tests { // calling it again doesn't change anything assert_storage_noop!(MultiPhase::finalize_signed_phase()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } } diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 025ff832bb08a..7340605dfe621 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -1050,11 +1050,12 @@ mod tests { use super::*; use crate::{ mock::{ - roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Runtime, RuntimeCall, RuntimeOrigin, System, - TestNposSolution, TrimHelpers, UnsignedPhase, + multi_phase_events, roll_to, roll_to_signed, roll_to_unsigned, roll_to_with_ocw, + trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, + Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, + UnsignedPhase, }, - CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, + CurrentPhase, Event, InvalidTransaction, Phase, QueuedSolution, TransactionSource, TransactionValidityError, }; use codec::Decode; @@ -1100,7 +1101,7 @@ mod tests { )); // signed - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert!(matches!( ::validate_unsigned( @@ -1116,7 +1117,7 @@ mod tests { )); // unsigned - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); assert!(::validate_unsigned( @@ -1147,7 +1148,7 @@ mod tests { #[test] fn validate_unsigned_retracts_low_score() { ExtBuilder::default().desired_targets(0).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1193,7 +1194,7 @@ mod tests { #[test] fn validate_unsigned_retracts_incorrect_winner_count() { ExtBuilder::default().desired_targets(1).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let raw = RawSolution:: { @@ -1222,7 +1223,7 @@ mod tests { .miner_tx_priority(20) .desired_targets(0) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1253,7 +1254,7 @@ mod tests { Some(\"PreDispatchWrongWinnerCount\") })")] fn unfeasible_solution_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // This is in itself an invalid BS solution. @@ -1275,7 +1276,7 @@ mod tests { deprive validator from their authoring reward.")] fn wrong_witness_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // This solution is unfeasible as well, but we won't even get there. @@ -1299,7 +1300,7 @@ mod tests { #[test] fn miner_works() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -1317,6 +1318,17 @@ mod tests { witness )); assert!(MultiPhase::queued_solution().is_some()); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Unsigned, + prev_ejected: false + } + ] + ); }) } @@ -1326,7 +1338,7 @@ mod tests { .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -1360,7 +1372,7 @@ mod tests { fn miner_will_not_submit_if_not_enough_winners() { let (mut ext, _) = ExtBuilder::default().desired_targets(8).build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // Force the number of winners to be bigger to fail @@ -1386,7 +1398,7 @@ mod tests { .add_voter(8, 5, bounded_vec![10]) .better_unsigned_threshold(Perbill::from_percent(50)) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); assert_eq!(MultiPhase::desired_targets().unwrap(), 1); @@ -1488,7 +1500,7 @@ mod tests { ext.execute_with(|| { let offchain_repeat = ::OffchainRepeat::get(); - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // first execution -- okay. @@ -1529,7 +1541,7 @@ mod tests { let guard = StorageValueRef::persistent(&OFFCHAIN_LOCK); let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // initially, the lock is not set. @@ -1550,7 +1562,7 @@ mod tests { // ensure that if the guard is in hold, a new execution is not allowed. let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // artificially set the value, as if another thread is mid-way. @@ -1578,7 +1590,7 @@ mod tests { fn ocw_only_runs_when_unsigned_open_now() { let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // we must clear the offchain storage to ensure the offchain execution check doesn't get @@ -1658,6 +1670,21 @@ mod tests { // the submitted solution changes because the cache was cleared. assert_eq!(tx_cache_1, tx_cache_3); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }) } @@ -1797,7 +1824,7 @@ mod tests { #[test] fn trim_assignments_length_does_not_modify_when_short_enough() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1822,7 +1849,7 @@ mod tests { #[test] fn trim_assignments_length_modifies_when_too_long() { ExtBuilder::default().build().execute_with(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1848,7 +1875,7 @@ mod tests { #[test] fn trim_assignments_length_trims_lowest_stake() { ExtBuilder::default().build().execute_with(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { voters, mut assignments, encoded_size_of, voter_index } = @@ -1911,7 +1938,7 @@ mod tests { // or when we trim it to zero. ExtBuilder::default().build_and_execute(|| { // we need snapshot for `trim_helpers` to work. - roll_to(25); + roll_to_unsigned(); let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); assert!(assignments.len() > 0); @@ -1933,7 +1960,7 @@ mod tests { #[test] fn mine_solution_solutions_always_within_acceptable_length() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); // how long would the default solution be? let solution = MultiPhase::mine_solution().unwrap(); From 7a8de4995715cc6cd11a79eb262bf41e5b190943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 5 Oct 2022 13:10:20 +0200 Subject: [PATCH 42/42] Adapt `pallet-contracts` to WeightV2 (#12421) * Replace contract access weight by proper PoV component * Return the whole weight struct from dry-runs * Fixup `seal_call` and `seal_instantiate` * Fix duplicate extrinsics * Remove ContractAccessWeight from runtime * Fix doc link * Remove leftover debugging output --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 1 - frame/contracts/primitives/Cargo.toml | 1 + frame/contracts/primitives/src/lib.rs | 11 +- frame/contracts/src/gas.rs | 61 +++++---- frame/contracts/src/lib.rs | 181 +++++-------------------- frame/contracts/src/tests.rs | 36 ++--- frame/contracts/src/wasm/code_cache.rs | 15 +- 8 files changed, 96 insertions(+), 211 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a35dbba7d089e..309742e5bf17e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5531,6 +5531,7 @@ dependencies = [ "parity-scale-codec", "sp-runtime", "sp-std", + "sp-weights", ] [[package]] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c2d29731ea2e6..4898312f9608f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1175,7 +1175,6 @@ impl pallet_contracts::Config for Runtime { type DeletionWeightLimit = DeletionWeightLimit; type Schedule = Schedule; type AddressGenerator = pallet_contracts::DefaultAddressGenerator; - type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } diff --git a/frame/contracts/primitives/Cargo.toml b/frame/contracts/primitives/Cargo.toml index 64e332007350b..c8b7c4a2f7c37 100644 --- a/frame/contracts/primitives/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -19,6 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = # Substrate Dependencies (This crate should not rely on frame) sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../../primitives/weights" } [features] default = ["std"] diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index 5daf875ac2651..4faea9eb3ee75 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -26,17 +26,18 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; use sp_std::prelude::*; +use sp_weights::Weight; /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub struct ContractResult { - /// How much gas was consumed during execution. - pub gas_consumed: u64, - /// How much gas is required as gas limit in order to execute this call. + /// How much weight was consumed during execution. + pub gas_consumed: Weight, + /// How much weight is required as gas limit in order to execute this call. /// - /// This value should be used to determine the gas limit for on-chain execution. + /// This value should be used to determine the weight limit for on-chain execution. /// /// # Note /// @@ -44,7 +45,7 @@ pub struct ContractResult { /// is used. Currently, only `seal_call_runtime` makes use of pre charging. /// Additionally, any `seal_call` or `seal_instantiate` makes use of pre-charging /// when a non-zero `gas_limit` argument is supplied. - pub gas_required: u64, + pub gas_required: Weight, /// How much balance was deposited and reserved during execution in order to pay for storage. /// /// The storage deposit is never actually charged from the caller in case of [`Self::result`] diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 215b4d42daa06..d0076652dd6d4 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -107,32 +107,45 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { - let amount = if amount == Weight::zero() { self.gas_left } else { amount }; - // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - if self.gas_left.any_lt(amount) { - Err(>::OutOfGas.into()) - } else { - self.gas_left -= amount; - Ok(GasMeter::new(amount)) - } + let amount = Weight::from_components( + if amount.ref_time().is_zero() { + self.gas_left().ref_time() + } else { + amount.ref_time() + }, + if amount.proof_size().is_zero() { + self.gas_left().proof_size() + } else { + amount.proof_size() + }, + ); + self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| >::OutOfGas)?; + Ok(GasMeter::new(amount)) } /// Absorb the remaining gas of a nested meter after we are done using it. pub fn absorb_nested(&mut self, nested: Self) { - if self.gas_left == Weight::zero() { + if self.gas_left.ref_time().is_zero() { // All of the remaining gas was inherited by the nested gas meter. When absorbing // we can therefore safely inherit the lowest gas that the nested gas meter experienced // as long as it is lower than the lowest gas that was experienced by the parent. // We cannot call `self.gas_left_lowest()` here because in the state that this // code is run the parent gas meter has `0` gas left. - self.gas_left_lowest = nested.gas_left_lowest().min(self.gas_left_lowest); + *self.gas_left_lowest.ref_time_mut() = + nested.gas_left_lowest().ref_time().min(self.gas_left_lowest.ref_time()); } else { // The nested gas meter was created with a fixed amount that did not consume all of the // parents (self) gas. The lowest gas that self will experience is when the nested // gas was pre charged with the fixed amount. - self.gas_left_lowest = self.gas_left_lowest(); + *self.gas_left_lowest.ref_time_mut() = self.gas_left_lowest().ref_time(); + } + if self.gas_left.proof_size().is_zero() { + *self.gas_left_lowest.proof_size_mut() = + nested.gas_left_lowest().proof_size().min(self.gas_left_lowest.proof_size()); + } else { + *self.gas_left_lowest.proof_size_mut() = self.gas_left_lowest().proof_size(); } self.gas_left += nested.gas_left; } @@ -155,17 +168,11 @@ where ErasedToken { description: format!("{:?}", token), token: Box::new(token) }; self.tokens.push(erased_tok); } - let amount = token.weight(); - let new_value = self.gas_left.checked_sub(&amount); - - // We always consume the gas even if there is not enough gas. - self.gas_left = new_value.unwrap_or_else(Zero::zero); - - match new_value { - Some(_) => Ok(ChargedAmount(amount)), - None => Err(Error::::OutOfGas.into()), - } + // It is OK to not charge anything on failure because we always charge _before_ we perform + // any action + self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| Error::::OutOfGas)?; + Ok(ChargedAmount(amount)) } /// Adjust a previously charged amount down to its actual amount. @@ -298,20 +305,16 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that if the gas meter is charged by exceeding amount then not only an error - // returned for that charge, but also for all consequent charges. - // - // This is not strictly necessary, because the execution should be interrupted immediately - // if the gas meter runs out of gas. However, this is just a nice property to have. + // Make sure that the gas meter does not charge in case of overcharger #[test] - fn overcharge_is_unrecoverable() { + fn overcharge_does_not_charge() { let mut gas_meter = GasMeter::::new(Weight::from_ref_time(200)); // The first charge is should lead to OOG. assert!(gas_meter.charge(SimpleToken(300)).is_err()); - // The gas meter is emptied at this moment, so this should also fail. - assert!(gas_meter.charge(SimpleToken(1)).is_err()); + // The gas meter should still contain the full 200. + assert!(gas_meter.charge(SimpleToken(200)).is_ok()); } // Charging the exact amount that the user paid for should be diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 3aeb8742705c2..0c90c3ff433b4 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -107,7 +107,7 @@ use crate::{ }; use codec::{Encode, HasCompact}; use frame_support::{ - dispatch::{DispatchClass, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, + dispatch::{Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, traits::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, @@ -116,7 +116,7 @@ use frame_support::{ weights::{OldWeight, Weight}, BoundedVec, WeakBoundedVec, }; -use frame_system::{limits::BlockWeights, Pallet as System}; +use frame_system::Pallet as System; use pallet_contracts_primitives::{ Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, ContractInstantiateResult, ExecReturnValue, GetStorageResult, InstantiateReturnValue, @@ -199,29 +199,6 @@ where } } -/// A conservative implementation to be used for [`pallet::Config::ContractAccessWeight`]. -/// -/// This derives the weight from the [`BlockWeights`] passed as `B` and the `maxPovSize` passed -/// as `P`. The default value for `P` is the `maxPovSize` used by Polkadot and Kusama. -/// -/// It simply charges from the weight meter pro rata: If loading the contract code would consume -/// 50% of the max storage proof then this charges 50% of the max block weight. -pub struct DefaultContractAccessWeight, const P: u32 = 5_242_880>( - PhantomData, -); - -impl, const P: u32> Get for DefaultContractAccessWeight { - fn get() -> Weight { - let block_weights = B::get(); - block_weights - .per_class - .get(DispatchClass::Normal) - .max_total - .unwrap_or(block_weights.max_block) / - u64::from(P) - } -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -334,27 +311,6 @@ pub mod pallet { #[pallet::constant] type DepositPerByte: Get>; - /// The weight per byte of code that is charged when loading a contract from storage. - /// - /// Currently, FRAME only charges fees for computation incurred but not for PoV - /// consumption caused for storage access. This is usually not exploitable because - /// accessing storage carries some substantial weight costs, too. However in case - /// of contract code very much PoV consumption can be caused while consuming very little - /// computation. This could be used to keep the chain busy without paying the - /// proper fee for it. Until this is resolved we charge from the weight meter for - /// contract access. - /// - /// For more information check out: - /// - /// [`DefaultContractAccessWeight`] is a safe default to be used for Polkadot or Kusama - /// parachains. - /// - /// # Note - /// - /// This is only relevant for parachains. Set to zero in case of a standalone chain. - #[pallet::constant] - type ContractAccessWeight: Get; - /// The amount of balance a caller has to pay for each storage item. /// /// # Note @@ -413,23 +369,8 @@ pub mod pallet { T::AccountId: AsRef<[u8]>, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, { - /// Makes a call to an account, optionally transferring some balance. - /// - /// # Parameters - /// - /// * `dest`: Address of the contract to call. - /// * `value`: The balance to transfer from the `origin` to `dest`. - /// * `gas_limit`: The gas limit enforced when executing the constructor. - /// * `storage_deposit_limit`: The maximum amount of balance that can be charged from the - /// caller to pay for the storage consumed. - /// * `data`: The input data to pass to the contract. - /// - /// * If the account is a smart-contract account, the associated code will be - /// executed and any value will be transferred. - /// * If the account is a regular account, any value will be transferred. - /// * If no account exists and the call value is not less than `existential_deposit`, - /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call().saturating_add((*gas_limit).into()))] + /// Deprecated version if [`Self::call`] for use in an in-storage `Call`. + #[pallet::weight(T::WeightInfo::call().saturating_add(>::compat_weight(*gas_limit)))] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] pub fn call_old_weight( @@ -440,55 +381,20 @@ pub mod pallet { storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - let mut output = Self::internal_call( + Self::call( origin, dest, value, - gas_limit, - storage_deposit_limit.map(Into::into), + >::compat_weight(gas_limit), + storage_deposit_limit, data, - None, - ); - if let Ok(retval) = &output.result { - if retval.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) + ) } - /// Instantiates a new contract from the supplied `code` optionally transferring - /// some balance. - /// - /// This dispatchable has the same effect as calling [`Self::upload_code`] + - /// [`Self::instantiate`]. Bundling them together provides efficiency gains. Please - /// also check the documentation of [`Self::upload_code`]. - /// - /// # Parameters - /// - /// * `value`: The balance to transfer from the `origin` to the newly created contract. - /// * `gas_limit`: The gas limit enforced when executing the constructor. - /// * `storage_deposit_limit`: The maximum amount of balance that can be charged/reserved - /// from the caller to pay for the storage consumed. - /// * `code`: The contract code to deploy in raw bytes. - /// * `data`: The input data to pass to the contract constructor. - /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. - /// - /// Instantiation is executed as follows: - /// - /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that - /// code. - /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. - /// - The destination address is computed based on the sender, code_hash and the salt. - /// - The smart-contract account is created at the computed address. - /// - The `value` is transferred to the new account. - /// - The `deploy` function is executed in the context of the newly-created account. + /// Deprecated version if [`Self::instantiate_with_code`] for use in an in-storage `Call`. #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add((*gas_limit).into()) + .saturating_add(>::compat_weight(*gas_limit)) )] #[allow(deprecated)] #[deprecated( @@ -503,38 +409,20 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let code_len = code.len() as u32; - let salt_len = salt.len() as u32; - let mut output = Self::internal_instantiate( + Self::instantiate_with_code( origin, value, - gas_limit, - storage_deposit_limit.map(Into::into), - Code::Upload(code), + >::compat_weight(gas_limit), + storage_deposit_limit, + code, data, salt, - None, - ); - if let Ok(retval) = &output.result { - if retval.1.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result( - output.result.map(|(_address, result)| result), - T::WeightInfo::instantiate_with_code(code_len, salt_len), ) } - /// Instantiates a contract from a previously deployed wasm binary. - /// - /// This function is identical to [`Self::instantiate_with_code`] but without the - /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary - /// must be supplied. + /// Deprecated version if [`Self::instantiate`] for use in an in-storage `Call`. #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add((*gas_limit).into()) + T::WeightInfo::instantiate(salt.len() as u32).saturating_add(>::compat_weight(*gas_limit)) )] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] @@ -547,27 +435,14 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let salt_len = salt.len() as u32; - let mut output = Self::internal_instantiate( + Self::instantiate( origin, value, - gas_limit, - storage_deposit_limit.map(Into::into), - Code::Existing(code_hash), + >::compat_weight(gas_limit), + storage_deposit_limit, + code_hash, data, salt, - None, - ); - if let Ok(retval) = &output.result { - if retval.1.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result( - output.result.map(|(_address, output)| output), - T::WeightInfo::instantiate(salt_len), ) } @@ -1059,8 +934,8 @@ where ); ContractExecResult { result: output.result.map_err(|r| r.error), - gas_consumed: output.gas_meter.gas_consumed().ref_time(), - gas_required: output.gas_meter.gas_required().ref_time(), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -1104,8 +979,8 @@ where .result .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) .map_err(|e| e.error), - gas_consumed: output.gas_meter.gas_consumed().ref_time(), - gas_required: output.gas_meter.gas_required().ref_time(), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -1287,4 +1162,12 @@ where fn min_balance() -> BalanceOf { >>::minimum_balance() } + + /// Convert a 1D Weight to a 2D weight. + /// + /// Used by backwards compatible extrinsics. We cannot just set the proof to zero + /// or an old `Call` will just fail. + fn compat_weight(gas_limit: OldWeight) -> Weight { + Weight::from(gas_limit).set_proof_size(u64::from(T::MaxCodeLen::get()) * 2) + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index b4a8f8f4c834f..6a2144840143a 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -26,8 +26,8 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, - DefaultContractAccessWeight, DeletionQueue, Error, Pallet, Schedule, + BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, DeletionQueue, + Error, Pallet, Schedule, }; use assert_matches::assert_matches; use codec::Encode; @@ -404,7 +404,6 @@ impl Config for Test { type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; type AddressGenerator = DefaultAddressGenerator; - type ContractAccessWeight = DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } @@ -414,7 +413,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(u64::MAX); +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(256 * 1024); pub struct ExtBuilder { existential_deposit: u64, @@ -674,7 +673,7 @@ fn run_out_of_gas() { RuntimeOrigin::signed(ALICE), addr, // newly created account 0, - Weight::from_ref_time(1_000_000_000_000), + Weight::from_ref_time(1_000_000_000_000).set_proof_size(u64::MAX), None, vec![], ), @@ -1760,7 +1759,7 @@ fn chain_extension_works() { false, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed, gas_consumed + 42); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1771,7 +1770,7 @@ fn chain_extension_works() { false, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed, gas_consumed + 95); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( @@ -2409,10 +2408,11 @@ fn reinstrument_does_charge() { let result2 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); assert!(!result2.result.unwrap().did_revert()); - assert!(result2.gas_consumed > result1.gas_consumed); + assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); assert_eq!( - result2.gas_consumed, - result1.gas_consumed + ::WeightInfo::reinstrument(code_len).ref_time(), + result2.gas_consumed.ref_time(), + result1.gas_consumed.ref_time() + + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } @@ -2536,7 +2536,7 @@ fn gas_estimation_nested_call_fixed_limit() { assert_ok!(&result.result); // We have a subcall with a fixed gas limit. This constitutes precharging. - assert!(result.gas_required > result.gas_consumed); + assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); // Make the same call using the estimated gas. Should succeed. assert_ok!( @@ -2544,7 +2544,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), + result.gas_required, Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2557,6 +2557,7 @@ fn gas_estimation_nested_call_fixed_limit() { #[test] #[cfg(feature = "unstable-interface")] fn gas_estimation_call_runtime() { + use codec::Decode; let (caller_code, caller_hash) = compile_module::("call_runtime").unwrap(); let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -2591,7 +2592,7 @@ fn gas_estimation_call_runtime() { let call = RuntimeCall::Contracts(crate::Call::call { dest: addr_callee, value: 0, - gas_limit: GAS_LIMIT / 3, + gas_limit: GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() / 3), storage_deposit_limit: None, data: vec![], }); @@ -2604,9 +2605,10 @@ fn gas_estimation_call_runtime() { call.encode(), false, ); - assert_ok!(&result.result); - - assert!(result.gas_required > result.gas_consumed); + // contract encodes the result of the dispatch runtime + let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); + assert_eq!(outcome, 0); + assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); // Make the same call using the required gas. Should succeed. assert_ok!( @@ -2614,7 +2616,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), + result.gas_required, None, call.encode(), false, diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 137eccf3db686..09e51d981360b 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -228,16 +228,11 @@ impl Token for CodeToken { // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. - let ref_time_weight = match *self { + match *self { Reinstrument(len) => T::WeightInfo::reinstrument(len), - Load(len) => { - let computation = T::WeightInfo::call_with_code_per_byte(len) - .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwidth = T::ContractAccessWeight::get().saturating_mul(len as u64); - computation.max(bandwidth) - }, - }; - - ref_time_weight + Load(len) => T::WeightInfo::call_with_code_per_byte(len) + .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)) + .set_proof_size(len.into()), + } } }