From fd4167854467a46e46abbc3f3219ced589a115c9 Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 25 Jan 2024 16:41:52 +0100 Subject: [PATCH 01/31] max -> min --- polkadot/runtime/parachains/src/scheduler.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index a666f5689089..b8a96d0adced 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -584,7 +584,8 @@ impl Pallet { return } // If there exists a core, ensure we schedule at least one job onto it. - let n_lookahead = Self::claimqueue_lookahead().max(1); + // TODO (now): Add a test for this. This wax `max` and nobody noticed. + let n_lookahead = Self::claimqueue_lookahead().min(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); let ttl = >::config().on_demand_ttl; From 9ee7aa1e81878175d5006e8abdc23fabb9c3e73b Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 25 Jan 2024 16:44:53 +0100 Subject: [PATCH 02/31] Reduce default queue size on-demand --- polkadot/primitives/src/v6/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index fd0b32db7994..a3532ee8f5a0 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -393,7 +393,7 @@ pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; /// Default queue size we use for the on-demand order book. /// /// Can be adjusted in configuration. -pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 10_000; +pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 500; /// Backing votes threshold used from the host prior to runtime API version 6 and from the runtime /// prior to v9 configuration migration. From 0b3c03d9daaa8bfe07ad88cc75dfdf48f37a5a1b Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 1 Feb 2024 10:05:41 +0100 Subject: [PATCH 03/31] Introduce max max queue size. --- polkadot/primitives/src/lib.rs | 2 +- polkadot/primitives/src/v6/mod.rs | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 2570bcadf606..e8778c55c449 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -57,7 +57,7 @@ pub use v6::{ UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, - MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, }; diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index a3532ee8f5a0..010c23d29499 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -395,6 +395,13 @@ pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; /// Can be adjusted in configuration. pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 500; +/// Maximum for maximum queue size. +/// +/// Setting `on_demand_queue_max_size` to a value higher than this is unsound. This is more a theoretical limit, just +/// below enough what the target type supports, so comparisons are possible even with indices that are overflowing the +/// underyling type. +pub const ON_DEMAND_MAX_QUEUE_MAX_SIZE: u32 = 1_000_000_000; + /// Backing votes threshold used from the host prior to runtime API version 6 and from the runtime /// prior to v9 configuration migration. pub const LEGACY_MIN_BACKING_VOTES: u32 = 2; From edad3a4f5f97498635a54c6e4f8b7dc28af7483e Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 1 Feb 2024 10:06:34 +0100 Subject: [PATCH 04/31] Use max max queue size. --- polkadot/runtime/parachains/src/configuration.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 4619313590eb..9e4d72be0583 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,7 +29,7 @@ use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; @@ -359,6 +359,8 @@ pub enum InconsistentError { ZeroMinimumBackingVotes, /// `executor_params` are inconsistent. InconsistentExecutorParams { inner: ExecutorParamError }, + /// Passed in queue size for on-demand was too large. + OnDemandQueueSizeTooLarge, } impl HostConfiguration @@ -447,6 +449,10 @@ where return Err(InconsistentExecutorParams { inner }) } + if self.on_demand_queue_max_size > ON_DEMAND_MAX_QUEUE_MAX_SIZE { + return Err(OnDemandQueueSizeTooLarge) + } + Ok(()) } @@ -667,7 +673,7 @@ pub mod pallet { /// Set the number of coretime execution cores. /// - /// Note that this configuration is managed by the coretime chain. Only manually change + /// NOTE: that this configuration is managed by the coretime chain. Only manually change /// this, if you really know what you are doing! #[pallet::call_index(6)] #[pallet::weight(( From ee74614d5effa9c472d817e1ba8fc0f7e6f35b7d Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 1 Feb 2024 10:07:02 +0100 Subject: [PATCH 05/31] Implementation complete Does not yet typecheck. --- .../parachains/src/assigner_on_demand/mod.rs | 619 +++++++++++------- .../src/assigner_on_demand/tests.rs | 22 +- 2 files changed, 390 insertions(+), 251 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 1b746e88694c..e05f36851901 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -16,22 +16,31 @@ //! The parachain on demand assignment module. //! -//! Implements a mechanism for taking in orders for pay as you go (PAYG) or on demand -//! parachain (previously parathreads) assignments. This module is not handled by the -//! initializer but is instead instantiated in the `construct_runtime` macro. +//! Implements a mechanism for taking in orders for on-demand parachain (previously parathreads) +//! assignments. This module is not handled by the initializer but is instead instantiated in the +//! `construct_runtime` macro. //! //! The module currently limits parallel execution of blocks from the same `ParaId` via //! a core affinity mechanism. As long as there exists an affinity for a `CoreIndex` for //! a specific `ParaId`, orders for blockspace for that `ParaId` will only be assigned to -//! that `CoreIndex`. This affinity mechanism can be removed if it can be shown that parallel -//! execution is valid. +//! that `CoreIndex`. +//! +//! NOTE: Once we have elastic scaling implemented we might want to extend this module to support +//! ignoring core affinity up to a certain extend. This should be opt-in though as the parachain +//! needs to support multiple cores in the same block. If we want to enable a single parachain +//! occupying multiple cores in on-demand, we will likely add a separate order type, where the +//! intent can be made explicit. mod benchmarking; mod mock_helpers; +extern crate alloc; + #[cfg(test)] mod tests; +use core::ops::{Deref, DerefMut}; + use crate::{configuration, paras, scheduler::common::Assignment}; use frame_support::{ @@ -43,13 +52,15 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use primitives::{CoreIndex, Id as ParaId}; +use parity_scale_codec::{EncodeLike, WrapperTypeEncode}; +use primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, }; -use sp_std::{collections::vec_deque::VecDeque, prelude::*}; +use sp_std::{prelude::*, cmp::{Ordering, Reverse}, cmp::PartialOrd, cmp::Ord}; +use alloc::collections::BinaryHeap; const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; @@ -73,12 +84,99 @@ impl WeightInfo for TestWeightInfo { } } +/// Meta data for full queue. +/// +/// This includes elements with affinity and free entries. +/// +/// The actual queue is implemented via multiple priority queues. One for each core, for entries +/// which currently have a core affinity and one free queue, with entries without any affinity yet. +/// +/// The design aims to have most queue accessess be O(1) or O(log(N)). Absolute worst case is O(N). +/// Importantly this includes all accessess that happen in a single block. Even with 50 cores, the +/// total complexity of all operations in the block should maintain above complexities. In +/// particular O(N) stays O(N), it should never be O(N*cores). +/// +/// More concrete rundown on complexity: +/// +/// - insert: O(1) for placing an order, O(log(N)) for push backs. +/// - pop_assignment_for_core: O(log(N)), O(N) worst case: Can only happen for one core, next core +/// is already less work. +/// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again +/// this divides per core. +/// +/// Reads still exist, also improved slightly, but worst case we fetch all entries. This +/// technically should mean worst complexity is still O(N*cores), but constant factors should be +/// really small. Benchmarks will show. +/// +/// TODO: Add benchmark results. +#[derive(Encode, Decode, TypeInfo)] +struct QueueStatusType { + /// Last calculated traffic value. + traffic: FixedU128, + /// The next index to use. + next_index: QueueIndex, + /// Smallest index still in use. + /// + /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index == 0`. + smallest_index: QueueIndex, + /// Indices that have been freed already. + /// + /// But have a hole to `smallest_index`, so we can not yet bump `smallest_index`. This binary + /// heap is roughly bounded in the number of on demand cores: + /// + /// For a single core, elements will always be processed in order. With each core added, a + /// level of of out of order execution is added. + freed_indices: BinaryHeap>, +} + +impl QueueStatusType { + /// How many orders are queued in total? + /// + /// This includes entries which have core affinity. + fn size(&self) -> u32 { + self.next_index.0.overflowing_sub(self.smallest_index.0).0.saturating_sub(self.freed_indices.len() as u32) + } + + /// Get current next index + /// + /// to use for an element newly pushed to the back of the queue. + fn push_back(&mut self) -> QueueIndex { + let QueueIndex(next_index) = self.next_index; + self.next_index = QueueIndex(next_index.overflowing_add(1).0); + QueueIndex(next_index) + } + + /// Push something to the front of the queue + fn push_front(&mut self) -> QueueIndex { + self.smallest_index = QueueIndex(self.next_index.0.overflowing_sub(1).0); + self.smallest_index + } + + /// The given index is no longer part of the queue. + /// + /// This updates `smallest_index` if need be. + fn consume_index(&mut self, removed_index: QueueIndex) { + if removed_index != self.smallest_index { + self.freed_indices.push(Reverse(removed_index)); + return + } + let mut index = self.smallest_index.0.overflowing_add(1).0; + // Even more to advance? + while self.freed_indices.peek() == Some(&Reverse(QueueIndex(index))) { + index = index.overflowing_add(1).0; + self.freed_indices.pop(); + } + self.smallest_index = QueueIndex(index); + } +} + + /// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a /// specific `ParaId`. #[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] #[cfg_attr(test, derive(PartialEq, RuntimeDebug))] pub struct CoreAffinityCount { - core_idx: CoreIndex, + core_index: CoreIndex, count: u32, } @@ -93,8 +191,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Errors that can happen during spot traffic calculation. -#[derive(PartialEq)] -#[cfg_attr(feature = "std", derive(Debug))] +#[derive(PartialEq, RuntimeDebug)] pub enum SpotTrafficCalculationErr { /// The order queue capacity is at 0. QueueCapacityIsZero, @@ -104,18 +201,81 @@ pub enum SpotTrafficCalculationErr { Division, } +// Type used for priority indices. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +struct QueueIndex(u32); + +impl Ord for QueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + let diff = self.0.overflowing_sub(other.0).0; + if diff == 0 { + Ordering::Equal + } else if diff <= ON_DEMAND_MAX_QUEUE_MAX_SIZE { + Ordering::Greater + } else { + Ordering::Less + } + } +} + +impl PartialOrd for QueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + /// Internal representation of an order after it has been enqueued already. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] +/// +/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards to its elements) +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] pub(super) struct EnqueuedOrder { - pub para_id: ParaId, + para_id: ParaId, + idx: QueueIndex, } impl EnqueuedOrder { - pub fn new(para_id: ParaId) -> Self { - Self { para_id } + pub fn new(idx: QueueIndex, para_id: ParaId) -> Self { + Self { idx, para_id } + } +} + +impl PartialOrd for EnqueuedOrder { + fn partial_cmp(&self, other: &Self) -> Option { + match other.idx.partial_cmp(&self.idx) { + None => return None, + Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), + Some(o) => Some(o), + } + } +} + +impl Ord for EnqueuedOrder { + fn cmp(&self, other: &Self) -> Ordering { + match other.idx.cmp(&self.idx) { + Ordering::Equal => other.para_id.cmp(&self.para_id), + o => o, + } } } +#[derive(Decode,TypeInfo)] +struct EncodeableBinaryHeap(BinaryHeap); + +impl Deref for EncodeableBinaryHeap { + type Target = BinaryHeap; + fn deref(&self) -> &Self::Target { &self.0 } +} +impl DerefMut for EncodeableBinaryHeap { + // Required method + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl WrapperTypeEncode for EncodeableBinaryHeap {} +impl EncodeLike<&[T]> for EncodeableBinaryHeap {} + + #[frame_support::pallet] pub mod pallet { @@ -141,36 +301,41 @@ pub mod pallet { type TrafficDefaultValue: Get; } - /// Creates an empty spot traffic value if one isn't present in storage already. + /// Creates an empty queue status for an empty queue with initial traffic value. #[pallet::type_value] - pub fn SpotTrafficOnEmpty() -> FixedU128 { - T::TrafficDefaultValue::get() + pub fn QueueStatusOnEmpty() -> QueueStatusType { + QueueStatusType { + traffic: T::TrafficDefaultValue::get(), + next_index: QueueIndex(0), + smallest_index: QueueIndex(0), + freed_indices: BinaryHeap::new(), + } } - /// Creates an empty on demand queue if one isn't present in storage already. #[pallet::type_value] - pub(super) fn OnDemandQueueOnEmpty() -> VecDeque { - VecDeque::new() + pub(super) fn EntriesOnEmpty() -> BinaryHeap { + BinaryHeap::new() } - /// Keeps track of the multiplier used to calculate the current spot price for the on demand - /// assigner. + /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in + /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same + /// `ParaId` on two or more `CoreIndex`es. #[pallet::storage] - pub(super) type SpotTraffic = - StorageValue<_, FixedU128, ValueQuery, SpotTrafficOnEmpty>; + pub(super) type ParaIdAffinity = StorageMap<_, Twox64Concat, ParaId, CoreAffinityCount, OptionQuery>; - /// The order storage entry. Uses a VecDeque to be able to push to the front of the - /// queue from the scheduler on session boundaries. + /// Overall status of queue (both free + affinity entries) #[pallet::storage] - pub(super) type OnDemandQueue = - StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; + pub(super) type QueueStatus = StorageValue<_, QueueStatusType, QueueStatusOnEmpty, ValueQuery>; - /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in - /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same - /// `ParaId` on two or more `CoreIndex`es. + + /// Priority queue for all orders which don't yet (or not any more) have any core affinity. #[pallet::storage] - pub(super) type ParaIdAffinity = - StorageMap<_, Twox256, ParaId, CoreAffinityCount, OptionQuery>; + pub(super) type FreeEntries = StorageValue<_, BinaryHeap, EntriesOnEmpty, ValueQuery>; + + /// Queue entries that are currently bound to a particular core due to core affinity. + #[pallet::storage] + pub(super) type AffinityEntries = StorageMap<_, Twox64Concat, CoreIndex, BinaryHeap, EntriesOnEmpty, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -183,9 +348,6 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// The `ParaId` supplied to the `place_order` call is not a valid `ParaThread`, making the - /// call is invalid. - InvalidParaId, /// The order queue is full, `place_order` will not continue. QueueFull, /// The current spot price is higher than the max amount specified in the `place_order` @@ -193,52 +355,6 @@ pub mod pallet { SpotPriceHigherThanMaxAmount, } - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(_now: BlockNumberFor) -> Weight { - let config = >::config(); - // Calculate spot price multiplier and store it. - let old_traffic = SpotTraffic::::get(); - match Self::calculate_spot_traffic( - old_traffic, - config.on_demand_queue_max_size, - Self::queue_size(), - config.on_demand_target_queue_utilization, - config.on_demand_fee_variability, - ) { - Ok(new_traffic) => { - // Only update storage on change - if new_traffic != old_traffic { - SpotTraffic::::set(new_traffic); - Pallet::::deposit_event(Event::::SpotTrafficSet { - traffic: new_traffic, - }); - return T::DbWeight::get().reads_writes(2, 1) - } - }, - Err(SpotTrafficCalculationErr::QueueCapacityIsZero) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: The order queue capacity is at 0." - ); - }, - Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: The queue size is larger than the queue capacity." - ); - }, - Err(SpotTrafficCalculationErr::Division) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: Arithmetic error during division, either division by 0 or over/underflow." - ); - }, - }; - T::DbWeight::get().reads_writes(2, 0) - } - } - #[pallet::call] impl Pallet { /// Create a single on demand core order. @@ -258,7 +374,7 @@ pub mod pallet { /// Events: /// - `SpotOrderPlaced` #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::place_order_allow_death(OnDemandQueue::::get().len() as u32))] + #[pallet::weight(::WeightInfo::place_order_allow_death(QueueStatus::::get().size()))] pub fn place_order_allow_death( origin: OriginFor, max_amount: BalanceOf, @@ -285,7 +401,7 @@ pub mod pallet { /// Events: /// - `SpotOrderPlaced` #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::place_order_keep_alive(OnDemandQueue::::get().len() as u32))] + #[pallet::weight(::WeightInfo::place_order_keep_alive(QueueStatus::::get().size()))] pub fn place_order_keep_alive( origin: OriginFor, max_amount: BalanceOf, @@ -297,10 +413,80 @@ pub mod pallet { } } +// Internal functions and interface to scheduler/wrapping assignment provider. impl Pallet where BalanceOf: FixedPointOperand, { + /// Take the next queued entry that is available for a given core index. + /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` + /// but only in [0..P] range slice of the order queue, where P is the element that is + /// removed from the order queue. + /// + /// Parameters: + /// - `core_index`: The core index + pub fn pop_assignment_for_core(core_index: CoreIndex) -> Option { + + let entry = QueueStatus::::mutate(|queue_status| { + AffinityEntries::::mutate(core_index, |affinity_entries| { + FreeEntries::::mutate(|free_entries| { + let affinity_next = affinity_entries.peek(); + let free_next = free_entries.peek(); + let pick_free = match (affinity_next, free_next) { + (None, _) => true, + (Some(_), None) => false, + (Some(a), Some(f)) => f < a, + }; + let entry = if pick_free { + let entry = free_entries.pop()?; + let (affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = free_entries.into_iter().partition(|e| e.para_id == entry.para_id); + affinity_entries.append(affinities); + *free_entries = free; + entry + } else { + affinity_entries.pop()? + }; + queue_status.consume_index(entry.idx); + }); + }); + }); + + // TODO: Test for invariant: If affinity count was zero before (is 1 now) then the entry + // must have come from free_entries. + Pallet::::increase_affinity(entry.para_id, core_index); + + let mut invalidated_para_id_indexes: Vec = vec![]; + + entry.map(|e| Assignment::Pool { para_id: e.para_id, core_index }) + } + + /// Report that the `para_id` & `core_index` combination was processed. + /// + /// This should be called once it is clear that the assignment won't get pushed back anymore. + /// + /// In other words for each `pop_assignment_for_core` a call to this function or + /// `push_back_assignment` must follow, but only one. + pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity_update_queue(para_id, core_index); + } + + /// Push an assignment back to the front of the queue. + /// + /// The assignment has not been processed yet. Typically used on session boundaries. + /// + /// NOTE: We are not checking queue size here. So due to push backs it is possible that we + /// exceed the maximum queue size slightly. + /// + /// Parameters: + /// - `para_id`: The para that did not make it. + /// - `core_index`: The core the para was scheduled on. + pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { + Pallet::::decrease_affinity_update_queue(para_id, core_index); + QueueStatus::::mutate(|queue_status| { + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Front); + }); + } + /// Helper function for `place_order_*` calls. Used to differentiate between placing orders /// with a keep alive check or to allow the account to be reaped. /// @@ -326,35 +512,59 @@ where ) -> DispatchResult { let config = >::config(); - // Traffic always falls back to 1.0 - let traffic = SpotTraffic::::get(); - - // Calculate spot price - let spot_price: BalanceOf = - traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + QueueStatus::::mutate(|queue_status| { + Self::update_spot_traffic(&config, queue_status); + let traffic = queue_status.traffic; - // Is the current price higher than `max_amount` - ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); + // Calculate spot price + let spot_price: BalanceOf = + traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); - // Charge the sending account the spot price - let _ = T::Currency::withdraw( - &sender, - spot_price, - WithdrawReasons::FEE, - existence_requirement, - )?; + // Is the current price higher than `max_amount` + ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); - let order = EnqueuedOrder::new(para_id); + // Charge the sending account the spot price + let _ = T::Currency::withdraw( + &sender, + spot_price, + WithdrawReasons::FEE, + existence_requirement, + )?; - let res = Pallet::::add_on_demand_order(order, QueuePushDirection::Back); - - if res.is_ok() { - Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); - } - - res + ensure!(queue_status.size() < config.on_demand_queue_max_size, Error::::QueueFull); + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + Ok(()) + }) } + /// Calculate and update spot traffic. + fn update_spot_traffic(config: &configuration::HostConfiguration>, queue_status: &mut QueueStatusType) { + let old_traffic = queue_status.traffic; + match Self::_calculate_spot_traffic( + old_traffic, + config.on_demand_queue_max_size, + queue_status.size(), + config.on_demand_target_queue_utilization, + config.on_demand_fee_variability, + ) { + Ok(new_traffic) => { + // Only update storage on change + if new_traffic != old_traffic { + queue_status.traffic = new_traffic; + Pallet::::deposit_event(Event::::SpotTrafficSet { + traffic: new_traffic, + }); + } + }, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: {:?}", err + ); + }, + }; + } + /// The spot price multiplier. This is based on the transaction fee calculations defined in: /// https://research.web3.foundation/Polkadot/overview/token-economics#setting-transaction-fees /// @@ -379,8 +589,8 @@ where /// - `SpotTrafficCalculationErr::Division` pub(crate) fn calculate_spot_traffic( traffic: FixedU128, - queue_capacity: u32, - queue_size: u32, + queue_capacity: QueueIndex, + queue_size: QueueIndex, target_queue_utilisation: Perbill, variability: Perbill, ) -> Result { @@ -429,172 +639,95 @@ where /// Adds an order to the on demand queue. /// /// Paramenters: - /// - `order`: The `EnqueuedOrder` to add to the queue. /// - `location`: Whether to push this entry to the back or the front of the queue. Pushing an /// entry to the front of the queue is only used when the scheduler wants to push back an /// entry it has already popped. - /// Returns: - /// - The unit type on success. - /// - /// Errors: - /// - `InvalidParaId` - /// - `QueueFull` fn add_on_demand_order( - order: EnqueuedOrder, + queue_status: &mut QueueStatusType, + para_id: ParaId, location: QueuePushDirection, - ) -> Result<(), DispatchError> { - // Only parathreads are valid paraids for on the go parachains. - ensure!(>::is_parathread(order.para_id), Error::::InvalidParaId); - - let config = >::config(); - - OnDemandQueue::::try_mutate(|queue| { - // Abort transaction if queue is too large - ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); - match location { - QueuePushDirection::Back => queue.push_back(order), - QueuePushDirection::Front => queue.push_front(order), - }; - Ok(()) - }) + ) { + + let idx = match location { + QueuePushDirection::Back => queue_status.push_back(), + QueuePushDirection::Front => queue_status.push_front(), + }; + + let affinity = ParaIdAffinity::::get(para_id); + let order = EnqueuedOrder::new(idx, para_id); + match affinity { + None => FreeEntries::::mutate(|entries| entries.push(order)), + Some(affinity) => AffinityEntries::::mutate(affinity.core_index, |entries| entries.push(order)), + } } - /// Get the size of the on demand queue. + /// Decrease core affinity for para and update queue /// - /// Returns: - /// - The size of the on demand queue. - fn queue_size() -> u32 { - let config = >::config(); - match OnDemandQueue::::get().len().try_into() { - Ok(size) => return size, - Err(_) => { - log::debug!( - target: LOG_TARGET, - "Failed to fetch the on demand queue size, returning the max size." - ); - return config.on_demand_queue_max_size - }, + /// if affinity dropped to 0, moving entries back to `FreeEntries`. + fn decrease_affinity_update_queue(para_id: ParaId, core_index: CoreIndex) { + let affinity = Pallet::::decrease_affinity(para_id, core_index); + debug_assert_ne!(affinity, None, "Decreased affinity for a para that has not been served on a core?"); + if affinity != Some(0) { + return } - } - - /// Getter for the order queue. - #[cfg(test)] - fn get_queue() -> VecDeque { - OnDemandQueue::::get() - } - - /// Getter for the affinity tracker. - pub fn get_affinity_map(para_id: ParaId) -> Option { - ParaIdAffinity::::get(para_id) + // No affinity more for entries on this core, free any entries: + // + // This is necessary to ensure them being served as the core might no longer exist at all. + AffinityEntries::::mutate(core_index, |affinity_entries| { + FreeEntries::::mutate(|free_entries| { + let (freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = affinity_entries.into_iter().partition(|e| e.para_id == para_id); + free_entries.append(freed); + *affinity_entries = affinities; + }) + }); } /// Decreases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_idx + /// + /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_index /// matches. When the count reaches 0, the entry is removed. /// A non-existant entry is a no-op. - fn decrease_affinity(para_id: ParaId, core_idx: CoreIndex) { + /// + /// Returns: The new affinity of the para on that core. `None` if there is no affinity on this + /// core. + fn decrease_affinity(para_id: ParaId, core_index: CoreIndex) -> Option { ParaIdAffinity::::mutate(para_id, |maybe_affinity| { - if let Some(affinity) = maybe_affinity { - if affinity.core_idx == core_idx { - let new_count = affinity.count.saturating_sub(1); - if new_count > 0 { - *maybe_affinity = Some(CoreAffinityCount { core_idx, count: new_count }); - } else { - *maybe_affinity = None; - } + let affinity = maybe_affinity.as_mut()?; + if affinity.core_index == core_index { + let new_count = affinity.count.saturating_sub(1); + if new_count > 0 { + *maybe_affinity = Some(CoreAffinityCount { core_index, count: new_count }); + } else { + *maybe_affinity = None; } + return Some(new_count) + } else { + None } - }); + }) } /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_idx matches. + /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index matches. /// A non-existant entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. - fn increase_affinity(para_id: ParaId, core_idx: CoreIndex) { + fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { Some(affinity) => - if affinity.core_idx == core_idx { + if affinity.core_index == core_index { *maybe_affinity = Some(CoreAffinityCount { - core_idx, + core_index, count: affinity.count.saturating_add(1), }); }, None => { - *maybe_affinity = Some(CoreAffinityCount { core_idx, count: 1 }); + *maybe_affinity = Some(CoreAffinityCount { core_index, count: 1 }); }, }) } -} -impl Pallet { - /// Take the next queued entry that is available for a given core index. - /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` - /// but only in [0..P] range slice of the order queue, where P is the element that is - /// removed from the order queue. - /// - /// Parameters: - /// - `core_idx`: The core index - pub fn pop_assignment_for_core(core_idx: CoreIndex) -> Option { - let mut queue: VecDeque = OnDemandQueue::::get(); - - let mut invalidated_para_id_indexes: Vec = vec![]; - - // Get the position of the next `ParaId`. Select either a valid `ParaId` that has an - // affinity to the same `CoreIndex` as the scheduler asks for or a valid `ParaId` with no - // affinity at all. - let pos = queue.iter().enumerate().position(|(index, assignment)| { - if >::is_parathread(assignment.para_id) { - match ParaIdAffinity::::get(&assignment.para_id) { - Some(affinity) => return affinity.core_idx == core_idx, - None => return true, - } - } - // Record no longer valid para_ids. - invalidated_para_id_indexes.push(index); - return false - }); - - // Collect the popped value. - let popped = pos.and_then(|p: usize| { - if let Some(assignment) = queue.remove(p) { - Pallet::::increase_affinity(assignment.para_id, core_idx); - return Some(assignment) - }; - None - }); - - // Only remove the invalid indexes *after* using the index. - // Removed in reverse order so that the indexes don't shift. - invalidated_para_id_indexes.iter().rev().for_each(|idx| { - queue.remove(*idx); - }); - - // Write changes to storage. - OnDemandQueue::::set(queue); - - popped.map(|p| Assignment::Pool { para_id: p.para_id, core_index: core_idx }) - } - - /// Report that the `para_id` & `core_index` combination was processed. - pub fn report_processed(para_id: ParaId, core_index: CoreIndex) { - Pallet::::decrease_affinity(para_id, core_index) - } - - /// Push an assignment back to the front of the queue. - /// - /// The assignment has not been processed yet. Typically used on session boundaries. - /// Parameters: - /// - `assignment`: The on demand assignment. - pub fn push_back_assignment(para_id: ParaId, core_index: CoreIndex) { - Pallet::::decrease_affinity(para_id, core_index); - // Skip the queue on push backs from scheduler - match Pallet::::add_on_demand_order( - EnqueuedOrder::new(para_id), - QueuePushDirection::Front, - ) { - Ok(_) => {}, - Err(_) => {}, - } + /// Getter for the affinity tracker. + fn get_affinity_map(para_id: ParaId) -> Option { + ParaIdAffinity::::get(para_id) } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 8404700780c8..f3750f82e6f3 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -564,6 +564,14 @@ fn affinity_prohibits_parallel_scheduling() { OnDemandAssigner::report_processed(para_a, 0.into()); OnDemandAssigner::report_processed(para_b, 0.into()); + // Add 2 assignments for para_a for every para_b. + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + OnDemandAssigner::report_processed(assignment_b_core_0.clone()); + // Add 2 assignments for para_a for every para_b. OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) .expect("Invalid paraid or queue full"); @@ -588,9 +596,9 @@ fn affinity_prohibits_parallel_scheduling() { assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); // Clear affinity - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::report_processed(para_b, 1.into()); + OnDemandAssigner::report_processed(assignment_a.clone()); + OnDemandAssigner::report_processed(assignment_a.clone()); + OnDemandAssigner::report_processed(assignment_b_core_1.clone()); // There should be no affinity after clearing. assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); @@ -603,6 +611,7 @@ fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { let para_id = ParaId::from(10); let core_index = CoreIndex(0); let order = EnqueuedOrder::new(para_id); + let assignment = OnDemandAssignment::new(para_id, core_index); new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { // Register the para_id as a parathread @@ -617,10 +626,7 @@ fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); // First pop is fine - assert!( - OnDemandAssigner::pop_assignment_for_core(core_index) == - Some(Assignment::Pool { para_id, core_index }) - ); + assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), Some(assignment.clone())); // Deregister para assert_ok!(Paras::schedule_para_cleanup(para_id)); @@ -631,7 +637,7 @@ fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { assert!(!Paras::is_parathread(para_id)); // Second pop should be None. - OnDemandAssigner::report_processed(para_id, core_index); + OnDemandAssigner::report_processed(assignment.clone()); assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), None); }); } From 16afcfa9819bc68c68e275591de0d0c6d847d5af Mon Sep 17 00:00:00 2001 From: eskimor Date: Thu, 1 Feb 2024 10:07:29 +0100 Subject: [PATCH 06/31] Revert min/max fix. --- polkadot/runtime/parachains/src/scheduler.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index b8a96d0adced..a666f5689089 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -584,8 +584,7 @@ impl Pallet { return } // If there exists a core, ensure we schedule at least one job onto it. - // TODO (now): Add a test for this. This wax `max` and nobody noticed. - let n_lookahead = Self::claimqueue_lookahead().min(1); + let n_lookahead = Self::claimqueue_lookahead().max(1); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); let ttl = >::config().on_demand_ttl; From c750146155221b7563226ec1af528bf922d03113 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 10:39:23 +0100 Subject: [PATCH 07/31] Fixes + EncodeableBinaryHeap. --- polkadot/primitives/src/lib.rs | 4 +- polkadot/primitives/src/v6/mod.rs | 6 +- .../parachains/src/assigner_on_demand/mod.rs | 271 ++++++++++++------ 3 files changed, 188 insertions(+), 93 deletions(-) diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index e8778c55c449..14c5bb7d0acf 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -57,8 +57,8 @@ pub use v6::{ UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, - MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, - PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, + MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 010c23d29499..b69e42635965 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -397,9 +397,9 @@ pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 500; /// Maximum for maximum queue size. /// -/// Setting `on_demand_queue_max_size` to a value higher than this is unsound. This is more a theoretical limit, just -/// below enough what the target type supports, so comparisons are possible even with indices that are overflowing the -/// underyling type. +/// Setting `on_demand_queue_max_size` to a value higher than this is unsound. This is more a +/// theoretical limit, just below enough what the target type supports, so comparisons are possible +/// even with indices that are overflowing the underyling type. pub const ON_DEMAND_MAX_QUEUE_MAX_SIZE: u32 = 1_000_000_000; /// Backing votes threshold used from the host prior to runtime API version 6 and from the runtime diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index e05f36851901..1b66cc0cb2c5 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -1,4 +1,4 @@ -// Copyright (C) Parity Technologies (UK) Ltd. +// Copyright (C) Parit (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -39,7 +39,7 @@ extern crate alloc; #[cfg(test)] mod tests; -use core::ops::{Deref, DerefMut}; +use core::{mem::take, ops::{Deref, DerefMut}}; use crate::{configuration, paras, scheduler::common::Assignment}; @@ -52,15 +52,19 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use parity_scale_codec::{EncodeLike, WrapperTypeEncode}; +use parity_scale_codec::{EncodeAsRef, EncodeLike, FullEncode, Input, Output, WrapperTypeEncode}; use primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; +use scale_info::{build::{Fields, FieldsBuilder}, Path, Type, TypeParameter}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, }; -use sp_std::{prelude::*, cmp::{Ordering, Reverse}, cmp::PartialOrd, cmp::Ord}; use alloc::collections::BinaryHeap; +use sp_std::{ + cmp::{Ord, Ordering, PartialOrd}, + prelude::*, +}; const LOG_TARGET: &str = "runtime::parachains::assigner-on-demand"; @@ -117,7 +121,8 @@ struct QueueStatusType { next_index: QueueIndex, /// Smallest index still in use. /// - /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index == 0`. + /// In case of a completely empty queue (free + affinity queues), `next_index - smallest_index + /// == 0`. smallest_index: QueueIndex, /// Indices that have been freed already. /// @@ -126,7 +131,7 @@ struct QueueStatusType { /// /// For a single core, elements will always be processed in order. With each core added, a /// level of of out of order execution is added. - freed_indices: BinaryHeap>, + freed_indices: EncodeableBinaryHeap, } impl QueueStatusType { @@ -134,7 +139,11 @@ impl QueueStatusType { /// /// This includes entries which have core affinity. fn size(&self) -> u32 { - self.next_index.0.overflowing_sub(self.smallest_index.0).0.saturating_sub(self.freed_indices.len() as u32) + self.next_index + .0 + .overflowing_sub(self.smallest_index.0) + .0 + .saturating_sub(self.freed_indices.len() as u32) } /// Get current next index @@ -157,12 +166,12 @@ impl QueueStatusType { /// This updates `smallest_index` if need be. fn consume_index(&mut self, removed_index: QueueIndex) { if removed_index != self.smallest_index { - self.freed_indices.push(Reverse(removed_index)); + self.freed_indices.push(removed_index.reverse()); return } let mut index = self.smallest_index.0.overflowing_add(1).0; // Even more to advance? - while self.freed_indices.peek() == Some(&Reverse(QueueIndex(index))) { + while self.freed_indices.peek() == Some(&ReverseQueueIndex(index)) { index = index.overflowing_add(1).0; self.freed_indices.pop(); } @@ -170,7 +179,6 @@ impl QueueStatusType { } } - /// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a /// specific `ParaId`. #[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] @@ -202,9 +210,29 @@ pub enum SpotTrafficCalculationErr { } // Type used for priority indices. -#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] struct QueueIndex(u32); +/// QueueIndex with reverse ordering. +/// +/// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. +/// +/// TODO: Add basic ordering tests. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] +struct ReverseQueueIndex(u32); + +impl QueueIndex { + fn reverse(self) -> ReverseQueueIndex { + ReverseQueueIndex(self.0) + } +} + +impl ReverseQueueIndex { + fn reverse(self) -> QueueIndex { + QueueIndex(self.0) + } +} + impl Ord for QueueIndex { fn cmp(&self, other: &Self) -> Ordering { let diff = self.0.overflowing_sub(other.0).0; @@ -224,9 +252,21 @@ impl PartialOrd for QueueIndex { } } +impl Ord for ReverseQueueIndex { + fn cmp(&self, other: &Self) -> Ordering { + QueueIndex(other.0).cmp(&QueueIndex(self.0)) + } +} +impl PartialOrd for ReverseQueueIndex { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(&other)) + } +} + /// Internal representation of an order after it has been enqueued already. /// -/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards to its elements) +/// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards +/// to its elements) #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] pub(super) struct EnqueuedOrder { para_id: ParaId, @@ -258,23 +298,64 @@ impl Ord for EnqueuedOrder { } } -#[derive(Decode,TypeInfo)] -struct EncodeableBinaryHeap(BinaryHeap); +struct EncodeableBinaryHeap(BinaryHeap); + +impl TypeInfo for EncodeableBinaryHeap { + type Identity = Self; + // TODO: Do we really have to do this by hand? Better add `BinaryHeap` to scale directly. + fn type_info() -> Type { + Type::builder().path(Path::new("EncodeableBinaryHeap", module_path!())).type_params([TypeParameter { name: "T", ty: None }]) + .composite(Fields::unnamed()) + } +} -impl Deref for EncodeableBinaryHeap { +impl EncodeableBinaryHeap { + pub fn new() -> Self { + Self(BinaryHeap::new()) + } +} + +impl Deref for EncodeableBinaryHeap { type Target = BinaryHeap; - fn deref(&self) -> &Self::Target { &self.0 } + fn deref(&self) -> &Self::Target { + &self.0 + } } -impl DerefMut for EncodeableBinaryHeap { - // Required method - fn deref_mut(&mut self) -> &mut Self::Target { + +impl DerefMut for EncodeableBinaryHeap { + // Required method + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl WrapperTypeEncode for EncodeableBinaryHeap {} -impl EncodeLike<&[T]> for EncodeableBinaryHeap {} +impl From> for EncodeableBinaryHeap { + fn from(other: Vec) -> Self { + EncodeableBinaryHeap(BinaryHeap::from(other)) + } +} +impl Encode for EncodeableBinaryHeap { + fn encode_to(&self, dest: &mut A) { + take(&mut self.0).into_vec().encode_to(dest); + } + + fn encode(&self) -> Vec { + self.0.into_vec().encode() + } + + fn using_encoded R>(&self, f: F) -> R { + self.0.into_vec().using_encoded(f) + } +} + +impl Decode for EncodeableBinaryHeap { + fn decode(input: &mut I) -> Result { + as Decode>::decode(input).map(Self::from) + } +} + +impl EncodeLike for EncodeableBinaryHeap {} #[frame_support::pallet] pub mod pallet { @@ -308,34 +389,42 @@ pub mod pallet { traffic: T::TrafficDefaultValue::get(), next_index: QueueIndex(0), smallest_index: QueueIndex(0), - freed_indices: BinaryHeap::new(), + freed_indices: EncodeableBinaryHeap::new(), } } #[pallet::type_value] - pub(super) fn EntriesOnEmpty() -> BinaryHeap { - BinaryHeap::new() + pub(super) fn EntriesOnEmpty() -> EncodeableBinaryHeap { + EncodeableBinaryHeap::new() } /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in /// it's lookahead. Keeping track of this affinity prevents parallel execution of the same /// `ParaId` on two or more `CoreIndex`es. #[pallet::storage] - pub(super) type ParaIdAffinity = StorageMap<_, Twox64Concat, ParaId, CoreAffinityCount, OptionQuery>; + pub(super) type ParaIdAffinity = + StorageMap<_, Twox64Concat, ParaId, CoreAffinityCount, OptionQuery>; /// Overall status of queue (both free + affinity entries) #[pallet::storage] - pub(super) type QueueStatus = StorageValue<_, QueueStatusType, QueueStatusOnEmpty, ValueQuery>; - + pub(super) type QueueStatus = + StorageValue<_, QueueStatusType, ValueQuery, QueueStatusOnEmpty>; /// Priority queue for all orders which don't yet (or not any more) have any core affinity. #[pallet::storage] - pub(super) type FreeEntries = StorageValue<_, BinaryHeap, EntriesOnEmpty, ValueQuery>; + pub(super) type FreeEntries = + StorageValue<_, EncodeableBinaryHeap, ValueQuery, EntriesOnEmpty>; /// Queue entries that are currently bound to a particular core due to core affinity. #[pallet::storage] - pub(super) type AffinityEntries = StorageMap<_, Twox64Concat, CoreIndex, BinaryHeap, EntriesOnEmpty, ValueQuery>; - + pub(super) type AffinityEntries = StorageMap< + _, + Twox64Concat, + CoreIndex, + EncodeableBinaryHeap, + ValueQuery, + EntriesOnEmpty, + >; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] @@ -426,10 +515,9 @@ where /// Parameters: /// - `core_index`: The core index pub fn pop_assignment_for_core(core_index: CoreIndex) -> Option { - - let entry = QueueStatus::::mutate(|queue_status| { - AffinityEntries::::mutate(core_index, |affinity_entries| { - FreeEntries::::mutate(|free_entries| { + let entry: Result = QueueStatus::::try_mutate(|queue_status| { + AffinityEntries::::try_mutate(core_index, |affinity_entries| { + let free_entry = FreeEntries::::try_mutate(|free_entries| { let affinity_next = affinity_entries.peek(); let free_next = free_entries.peek(); let pick_free = match (affinity_next, free_next) { @@ -437,27 +525,29 @@ where (Some(_), None) => false, (Some(a), Some(f)) => f < a, }; - let entry = if pick_free { - let entry = free_entries.pop()?; - let (affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = free_entries.into_iter().partition(|e| e.para_id == entry.para_id); - affinity_entries.append(affinities); - *free_entries = free; - entry + if pick_free { + let entry = free_entries.pop().ok_or(())?; + let (mut affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = + (*free_entries).into_iter().partition(|e| e.para_id == entry.para_id); + affinity_entries.append(&mut affinities); + *free_entries = EncodeableBinaryHeap(free); + Ok(entry) } else { - affinity_entries.pop()? - }; - queue_status.consume_index(entry.idx); + Err(()) + } }); - }); + let entry = free_entry.or_else(|()| affinity_entries.pop().ok_or(()))?; + queue_status.consume_index(entry.idx); + Ok(entry) + }) }); + let assignment = entry.map(|e| Assignment::Pool { para_id: e.para_id, core_index }).ok()?; + // TODO: Test for invariant: If affinity count was zero before (is 1 now) then the entry // must have come from free_entries. - Pallet::::increase_affinity(entry.para_id, core_index); - - let mut invalidated_para_id_indexes: Vec = vec![]; - - entry.map(|e| Assignment::Pool { para_id: e.para_id, core_index }) + Pallet::::increase_affinity(assignment.para_id(), core_index); + Some(assignment) } /// Report that the `para_id` & `core_index` combination was processed. @@ -517,8 +607,8 @@ where let traffic = queue_status.traffic; // Calculate spot price - let spot_price: BalanceOf = - traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + let spot_price: BalanceOf = traffic + .saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); // Is the current price higher than `max_amount` ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); @@ -529,7 +619,7 @@ where spot_price, WithdrawReasons::FEE, existence_requirement, - )?; + )?; ensure!(queue_status.size() < config.on_demand_queue_max_size, Error::::QueueFull); Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); @@ -538,32 +628,33 @@ where } /// Calculate and update spot traffic. - fn update_spot_traffic(config: &configuration::HostConfiguration>, queue_status: &mut QueueStatusType) { - let old_traffic = queue_status.traffic; - match Self::_calculate_spot_traffic( - old_traffic, - config.on_demand_queue_max_size, - queue_status.size(), - config.on_demand_target_queue_utilization, - config.on_demand_fee_variability, - ) { - Ok(new_traffic) => { - // Only update storage on change - if new_traffic != old_traffic { - queue_status.traffic = new_traffic; - Pallet::::deposit_event(Event::::SpotTrafficSet { - traffic: new_traffic, - }); - } - }, - Err(err) => { - log::debug!( - target: LOG_TARGET, - "Error calculating spot traffic: {:?}", err - ); - }, - }; - } + fn update_spot_traffic( + config: &configuration::HostConfiguration>, + queue_status: &mut QueueStatusType, + ) { + let old_traffic = queue_status.traffic; + match Self::calculate_spot_traffic( + old_traffic, + config.on_demand_queue_max_size, + queue_status.size(), + config.on_demand_target_queue_utilization, + config.on_demand_fee_variability, + ) { + Ok(new_traffic) => { + // Only update storage on change + if new_traffic != old_traffic { + queue_status.traffic = new_traffic; + Pallet::::deposit_event(Event::::SpotTrafficSet { traffic: new_traffic }); + } + }, + Err(err) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: {:?}", err + ); + }, + }; + } /// The spot price multiplier. This is based on the transaction fee calculations defined in: /// https://research.web3.foundation/Polkadot/overview/token-economics#setting-transaction-fees @@ -589,8 +680,8 @@ where /// - `SpotTrafficCalculationErr::Division` pub(crate) fn calculate_spot_traffic( traffic: FixedU128, - queue_capacity: QueueIndex, - queue_size: QueueIndex, + queue_capacity: u32, + queue_size: u32, target_queue_utilisation: Perbill, variability: Perbill, ) -> Result { @@ -647,7 +738,6 @@ where para_id: ParaId, location: QueuePushDirection, ) { - let idx = match location { QueuePushDirection::Back => queue_status.push_back(), QueuePushDirection::Front => queue_status.push_front(), @@ -657,7 +747,8 @@ where let order = EnqueuedOrder::new(idx, para_id); match affinity { None => FreeEntries::::mutate(|entries| entries.push(order)), - Some(affinity) => AffinityEntries::::mutate(affinity.core_index, |entries| entries.push(order)), + Some(affinity) => + AffinityEntries::::mutate(affinity.core_index, |entries| entries.push(order)), } } @@ -666,7 +757,10 @@ where /// if affinity dropped to 0, moving entries back to `FreeEntries`. fn decrease_affinity_update_queue(para_id: ParaId, core_index: CoreIndex) { let affinity = Pallet::::decrease_affinity(para_id, core_index); - debug_assert_ne!(affinity, None, "Decreased affinity for a para that has not been served on a core?"); + debug_assert_ne!( + affinity, None, + "Decreased affinity for a para that has not been served on a core?" + ); if affinity != Some(0) { return } @@ -675,9 +769,10 @@ where // This is necessary to ensure them being served as the core might no longer exist at all. AffinityEntries::::mutate(core_index, |affinity_entries| { FreeEntries::::mutate(|free_entries| { - let (freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = affinity_entries.into_iter().partition(|e| e.para_id == para_id); - free_entries.append(freed); - *affinity_entries = affinities; + let (mut freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = + (*affinity_entries).into_iter().partition(|e| e.para_id == para_id); + free_entries.append(&mut freed); + *affinity_entries = EncodeableBinaryHeap(affinities); }) }); } @@ -708,8 +803,8 @@ where } /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. - /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index matches. - /// A non-existant entry will be initialized with a count of 1 and uses the supplied + /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index + /// matches. A non-existant entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { From b0edbcfb53c46ef5dc75e6cddfff64eb98a32965 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 11:18:01 +0100 Subject: [PATCH 08/31] Remove EncodeableBinaryHeap --- .../parachains/src/assigner_on_demand/mod.rs | 75 ++----------------- 1 file changed, 8 insertions(+), 67 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 1b66cc0cb2c5..de89c0e1ae12 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -131,7 +131,7 @@ struct QueueStatusType { /// /// For a single core, elements will always be processed in order. With each core added, a /// level of of out of order execution is added. - freed_indices: EncodeableBinaryHeap, + freed_indices: BinaryHeap, } impl QueueStatusType { @@ -298,65 +298,6 @@ impl Ord for EnqueuedOrder { } } -struct EncodeableBinaryHeap(BinaryHeap); - -impl TypeInfo for EncodeableBinaryHeap { - type Identity = Self; - // TODO: Do we really have to do this by hand? Better add `BinaryHeap` to scale directly. - fn type_info() -> Type { - Type::builder().path(Path::new("EncodeableBinaryHeap", module_path!())).type_params([TypeParameter { name: "T", ty: None }]) - .composite(Fields::unnamed()) - } -} - -impl EncodeableBinaryHeap { - pub fn new() -> Self { - Self(BinaryHeap::new()) - } -} - -impl Deref for EncodeableBinaryHeap { - type Target = BinaryHeap; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for EncodeableBinaryHeap { - // Required method - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl From> for EncodeableBinaryHeap { - fn from(other: Vec) -> Self { - EncodeableBinaryHeap(BinaryHeap::from(other)) - } -} - -impl Encode for EncodeableBinaryHeap { - fn encode_to(&self, dest: &mut A) { - take(&mut self.0).into_vec().encode_to(dest); - } - - fn encode(&self) -> Vec { - self.0.into_vec().encode() - } - - fn using_encoded R>(&self, f: F) -> R { - self.0.into_vec().using_encoded(f) - } -} - -impl Decode for EncodeableBinaryHeap { - fn decode(input: &mut I) -> Result { - as Decode>::decode(input).map(Self::from) - } -} - -impl EncodeLike for EncodeableBinaryHeap {} - #[frame_support::pallet] pub mod pallet { @@ -389,13 +330,13 @@ pub mod pallet { traffic: T::TrafficDefaultValue::get(), next_index: QueueIndex(0), smallest_index: QueueIndex(0), - freed_indices: EncodeableBinaryHeap::new(), + freed_indices: BinaryHeap::new(), } } #[pallet::type_value] - pub(super) fn EntriesOnEmpty() -> EncodeableBinaryHeap { - EncodeableBinaryHeap::new() + pub(super) fn EntriesOnEmpty() -> BinaryHeap { + BinaryHeap::new() } /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in @@ -413,7 +354,7 @@ pub mod pallet { /// Priority queue for all orders which don't yet (or not any more) have any core affinity. #[pallet::storage] pub(super) type FreeEntries = - StorageValue<_, EncodeableBinaryHeap, ValueQuery, EntriesOnEmpty>; + StorageValue<_, BinaryHeap, ValueQuery, EntriesOnEmpty>; /// Queue entries that are currently bound to a particular core due to core affinity. #[pallet::storage] @@ -421,7 +362,7 @@ pub mod pallet { _, Twox64Concat, CoreIndex, - EncodeableBinaryHeap, + BinaryHeap, ValueQuery, EntriesOnEmpty, >; @@ -530,7 +471,7 @@ where let (mut affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = (*free_entries).into_iter().partition(|e| e.para_id == entry.para_id); affinity_entries.append(&mut affinities); - *free_entries = EncodeableBinaryHeap(free); + *free_entries = free; Ok(entry) } else { Err(()) @@ -772,7 +713,7 @@ where let (mut freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = (*affinity_entries).into_iter().partition(|e| e.para_id == para_id); free_entries.append(&mut freed); - *affinity_entries = EncodeableBinaryHeap(affinities); + *affinity_entries = affinities; }) }); } From cac2752fcb6676709f27531fb75f3683b466fc66 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 11:48:42 +0100 Subject: [PATCH 09/31] Patch scale-info for now. --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index fd8708a3dadd..ad49748a4385 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -604,3 +604,6 @@ wasmi = { opt-level = 3 } x25519-dalek = { opt-level = 3 } yamux = { opt-level = 3 } zeroize = { opt-level = 3 } + +[patch.crates-io] +scale-info = { git = 'https://github.com/paritytech/scale-info.git', branch="binary_heap" } From a9b13f7679c122be233d282444b25e32af0a5a91 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 13:22:31 +0100 Subject: [PATCH 10/31] Revert default value. --- polkadot/primitives/src/v6/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index b69e42635965..9a710b9117c3 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -393,7 +393,7 @@ pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; /// Default queue size we use for the on-demand order book. /// /// Can be adjusted in configuration. -pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 500; +pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 10_000; /// Maximum for maximum queue size. /// From f9ea4e4873ffa617c6918611c016b758bf8d82d6 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 13:22:55 +0100 Subject: [PATCH 11/31] Fixes + tests. --- .../parachains/src/assigner_on_demand/mod.rs | 44 ++- .../src/assigner_on_demand/tests.rs | 350 ++++++------------ 2 files changed, 124 insertions(+), 270 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index de89c0e1ae12..6486cda0b70f 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -39,7 +39,7 @@ extern crate alloc; #[cfg(test)] mod tests; -use core::{mem::take, ops::{Deref, DerefMut}}; +use core::mem::take; use crate::{configuration, paras, scheduler::common::Assignment}; @@ -52,9 +52,7 @@ use frame_support::{ }, }; use frame_system::pallet_prelude::*; -use parity_scale_codec::{EncodeAsRef, EncodeLike, FullEncode, Input, Output, WrapperTypeEncode}; use primitives::{CoreIndex, Id as ParaId, ON_DEMAND_MAX_QUEUE_MAX_SIZE}; -use scale_info::{build::{Fields, FieldsBuilder}, Path, Type, TypeParameter}; use sp_runtime::{ traits::{One, SaturatedConversion}, FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, @@ -108,9 +106,7 @@ impl WeightInfo for TestWeightInfo { /// - report_processed & push back: If affinity dropped to 0, then O(N) in the worst case. Again /// this divides per core. /// -/// Reads still exist, also improved slightly, but worst case we fetch all entries. This -/// technically should mean worst complexity is still O(N*cores), but constant factors should be -/// really small. Benchmarks will show. +/// Reads still exist, also improved slightly, but worst case we fetch all entries. /// /// TODO: Add benchmark results. #[derive(Encode, Decode, TypeInfo)] @@ -157,7 +153,7 @@ impl QueueStatusType { /// Push something to the front of the queue fn push_front(&mut self) -> QueueIndex { - self.smallest_index = QueueIndex(self.next_index.0.overflowing_sub(1).0); + self.smallest_index = QueueIndex(self.smallest_index.0.overflowing_sub(1).0); self.smallest_index } @@ -183,13 +179,14 @@ impl QueueStatusType { /// specific `ParaId`. #[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] #[cfg_attr(test, derive(PartialEq, RuntimeDebug))] -pub struct CoreAffinityCount { +struct CoreAffinityCount { core_index: CoreIndex, count: u32, } /// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. -pub enum QueuePushDirection { +#[cfg_attr(test, derive(RuntimeDebug))] +enum QueuePushDirection { Back, Front, } @@ -200,7 +197,7 @@ type BalanceOf = /// Errors that can happen during spot traffic calculation. #[derive(PartialEq, RuntimeDebug)] -pub enum SpotTrafficCalculationErr { +enum SpotTrafficCalculationErr { /// The order queue capacity is at 0. QueueCapacityIsZero, /// The queue size is larger than the queue capacity. @@ -227,12 +224,6 @@ impl QueueIndex { } } -impl ReverseQueueIndex { - fn reverse(self) -> QueueIndex { - QueueIndex(self.0) - } -} - impl Ord for QueueIndex { fn cmp(&self, other: &Self) -> Ordering { let diff = self.0.overflowing_sub(other.0).0; @@ -268,13 +259,13 @@ impl PartialOrd for ReverseQueueIndex { /// This data structure is provided for a min BinaryHeap (Ord compares in reverse order with regards /// to its elements) #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] -pub(super) struct EnqueuedOrder { +struct EnqueuedOrder { para_id: ParaId, idx: QueueIndex, } impl EnqueuedOrder { - pub fn new(idx: QueueIndex, para_id: ParaId) -> Self { + fn new(idx: QueueIndex, para_id: ParaId) -> Self { Self { idx, para_id } } } @@ -325,7 +316,7 @@ pub mod pallet { /// Creates an empty queue status for an empty queue with initial traffic value. #[pallet::type_value] - pub fn QueueStatusOnEmpty() -> QueueStatusType { + pub(super) fn QueueStatusOnEmpty() -> QueueStatusType { QueueStatusType { traffic: T::TrafficDefaultValue::get(), next_index: QueueIndex(0), @@ -469,7 +460,9 @@ where if pick_free { let entry = free_entries.pop().ok_or(())?; let (mut affinities, free): (BinaryHeap<_>, BinaryHeap<_>) = - (*free_entries).into_iter().partition(|e| e.para_id == entry.para_id); + take(free_entries) + .into_iter() + .partition(|e| e.para_id == entry.para_id); affinity_entries.append(&mut affinities); *free_entries = free; Ok(entry) @@ -488,7 +481,7 @@ where // TODO: Test for invariant: If affinity count was zero before (is 1 now) then the entry // must have come from free_entries. Pallet::::increase_affinity(assignment.para_id(), core_index); - Some(assignment) + Some(assignment) } /// Report that the `para_id` & `core_index` combination was processed. @@ -619,7 +612,7 @@ where /// - `SpotTrafficCalculationErr::QueueCapacityIsZero` /// - `SpotTrafficCalculationErr::QueueSizeLargerThanCapacity` /// - `SpotTrafficCalculationErr::Division` - pub(crate) fn calculate_spot_traffic( + fn calculate_spot_traffic( traffic: FixedU128, queue_capacity: u32, queue_size: u32, @@ -686,6 +679,9 @@ where let affinity = ParaIdAffinity::::get(para_id); let order = EnqueuedOrder::new(idx, para_id); + #[cfg(test)] + log::debug!(target: LOG_TARGET, "add_on_demand_order, order: {:?}, affinity: {:?}, direction: {:?}", order, affinity, location); + match affinity { None => FreeEntries::::mutate(|entries| entries.push(order)), Some(affinity) => @@ -698,6 +694,7 @@ where /// if affinity dropped to 0, moving entries back to `FreeEntries`. fn decrease_affinity_update_queue(para_id: ParaId, core_index: CoreIndex) { let affinity = Pallet::::decrease_affinity(para_id, core_index); + #[cfg(not(test))] debug_assert_ne!( affinity, None, "Decreased affinity for a para that has not been served on a core?" @@ -711,7 +708,7 @@ where AffinityEntries::::mutate(core_index, |affinity_entries| { FreeEntries::::mutate(|free_entries| { let (mut freed, affinities): (BinaryHeap<_>, BinaryHeap<_>) = - (*affinity_entries).into_iter().partition(|e| e.para_id == para_id); + take(affinity_entries).into_iter().partition(|e| e.para_id == para_id); free_entries.append(&mut freed); *affinity_entries = affinities; }) @@ -763,6 +760,7 @@ where } /// Getter for the affinity tracker. + #[cfg(test)] fn get_affinity_map(para_id: ParaId) -> Option { ParaIdAffinity::::get(para_id) } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index f3750f82e6f3..629ccb22a389 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -78,6 +78,16 @@ fn run_to_block( } } +fn place_order(para_id: ParaId) { + let alice = 100u64; + let amt = 10_000_000u128; + + Balances::make_free_balance_be(&alice, amt); + + run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id).unwrap() +} + #[test] fn spot_traffic_capacity_zero_returns_none() { match OnDemandAssigner::calculate_spot_traffic( @@ -278,74 +288,6 @@ fn place_order_keep_alive_keeps_alive() { }); } -#[test] -fn add_on_demand_order_works() { - let para_a = ParaId::from(111); - let order = EnqueuedOrder::new(para_a); - - let mut genesis = GenesisConfigBuilder::default(); - genesis.on_demand_max_queue_size = 1; - new_test_ext(genesis.build()).execute_with(|| { - // Initialize the parathread and wait for it to be ready. - schedule_blank_para(para_a, ParaKind::Parathread); - - // `para_a` is not onboarded as a parathread yet. - assert_noop!( - OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back), - Error::::InvalidParaId - ); - - assert!(!Paras::is_parathread(para_a)); - run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(para_a)); - - // `para_a` is now onboarded as a valid parathread. - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); - - // Max queue size is 1, queue should be full. - assert_noop!( - OnDemandAssigner::add_on_demand_order(order, QueuePushDirection::Back), - Error::::QueueFull - ); - }); -} - -#[test] -fn spotqueue_push_directions() { - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - let para_a = ParaId::from(111); - let para_b = ParaId::from(222); - let para_c = ParaId::from(333); - - schedule_blank_para(para_a, ParaKind::Parathread); - schedule_blank_para(para_b, ParaKind::Parathread); - schedule_blank_para(para_c, ParaKind::Parathread); - - run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - let order_c = EnqueuedOrder::new(para_c); - - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_a.clone(), - QueuePushDirection::Front - )); - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_b.clone(), - QueuePushDirection::Front - )); - - assert_ok!(OnDemandAssigner::add_on_demand_order( - order_c.clone(), - QueuePushDirection::Back - )); - - assert_eq!(OnDemandAssigner::queue_size(), 3); - assert_eq!(OnDemandAssigner::get_queue(), VecDeque::from(vec![order_b, order_a, order_c])) - }); -} - #[test] fn pop_assignment_for_core_works() { new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { @@ -356,51 +298,32 @@ fn pop_assignment_for_core_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - let assignment_a = Assignment::Pool { para_id: para_a, core_index: CoreIndex(0) }; - let assignment_b = Assignment::Pool { para_id: para_b, core_index: CoreIndex(1) }; - // Pop should return none with empty queue assert_eq!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), None); // Add enough assignments to the order queue. for _ in 0..2 { - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - } - - // Queue should contain orders a, b, a, b - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!( - queue, - vec![order_a.clone(), order_b.clone(), order_a.clone(), order_b.clone()] - ); + place_order(para_a); + place_order(para_b); } // Popped assignments should be for the correct paras and cores assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), - Some(assignment_a.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).map(|a| a.para_id()), + Some(para_a) ); assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)), - Some(assignment_b.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)).map(|a| a.para_id()), + Some(para_b) ); assert_eq!( - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)), - Some(assignment_a.clone()) + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).map(|a| a.para_id()), + Some(para_a) + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)).map(|a| a.para_id()), + Some(para_b) ); - - // Queue should contain one left over order - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_b.clone(),]); - } }); } @@ -414,28 +337,19 @@ fn push_back_assignment_works() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - // Add enough assignments to the order queue. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); + place_order(para_a); + place_order(para_b); // Pop order a - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_a + ); // Para a should have affinity for core 0 assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); - - // Queue should still contain order b - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_b.clone()]); - } + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, CoreIndex(0)); // Push back order a OnDemandAssigner::push_back_assignment(para_a, CoreIndex(0)); @@ -444,75 +358,14 @@ fn push_back_assignment_works() { assert_eq!(OnDemandAssigner::get_affinity_map(para_a).is_none(), true); // Queue should contain orders a, b. A in front of b. - { - let queue: Vec = OnDemandQueue::::get().into_iter().collect(); - assert_eq!(queue, vec![order_a.clone(), order_b.clone()]); - } - }); -} - -#[test] -fn affinity_changes_work() { - new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - let para_a = ParaId::from(111); - let core_index = CoreIndex(0); - schedule_blank_para(para_a, ParaKind::Parathread); - - let order_a = EnqueuedOrder::new(para_a); - run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - - // There should be no affinity before starting. - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - - // Add enough assignments to the order queue. - for _ in 0..10 { - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Front) - .expect("Invalid paraid or queue full"); - } - - // There should be no affinity before the scheduler pops. - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - - OnDemandAssigner::pop_assignment_for_core(core_index); - - // Affinity count is 1 after popping. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); - - // Affinity count is 1 after popping with a previous para. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); - assert_eq!(OnDemandAssigner::queue_size(), 8); - - for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(core_index); - } - - // Affinity count is 4 after popping 3 times without a previous para. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - assert_eq!(OnDemandAssigner::queue_size(), 5); - - for _ in 0..5 { - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); - } - - // Affinity count should still be 4 but queue should be empty. - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - assert_eq!(OnDemandAssigner::queue_size(), 0); - - // Pop 4 times and get to exactly 0 (None) affinity. - for _ in 0..4 { - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); - } - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - - // Decreasing affinity beyond 0 should still be None. - OnDemandAssigner::report_processed(para_a, 0.into()); - OnDemandAssigner::pop_assignment_for_core(core_index); - assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_a + ); + assert_eq!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).unwrap().para_id(), + para_b + ); }); } @@ -527,36 +380,27 @@ fn affinity_prohibits_parallel_scheduling() { run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - let order_a = EnqueuedOrder::new(para_a); - let order_b = EnqueuedOrder::new(para_b); - // There should be no affinity before starting. assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - assert_eq!(OnDemandAssigner::queue_size(), 3); + place_order(para_a); + place_order(para_a); + place_order(para_b); // Approximate having 1 core. for _ in 0..3 { - OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).is_some()); } + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)).is_none()); // Affinity on one core is meaningless. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); assert_eq!( - OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, - OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx + OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, + OnDemandAssigner::get_affinity_map(para_b).unwrap().core_index, ); // Clear affinity @@ -565,40 +409,27 @@ fn affinity_prohibits_parallel_scheduling() { OnDemandAssigner::report_processed(para_b, 0.into()); // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - OnDemandAssigner::report_processed(assignment_b_core_0.clone()); - - // Add 2 assignments for para_a for every para_b. - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_a.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); - - OnDemandAssigner::add_on_demand_order(order_b.clone(), QueuePushDirection::Back) - .expect("Invalid paraid or queue full"); + place_order(para_a); + place_order(para_a); + place_order(para_b); // Approximate having 3 cores. CoreIndex 2 should be unable to obtain an assignment for _ in 0..3 { OnDemandAssigner::pop_assignment_for_core(CoreIndex(0)); OnDemandAssigner::pop_assignment_for_core(CoreIndex(1)); - assert_eq!(None, OnDemandAssigner::pop_assignment_for_core(CoreIndex(2))); + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(2)).is_none()); } // Affinity should be the same as before, but on different cores. assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); - assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); - assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_index, CoreIndex(0)); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_index, CoreIndex(1)); // Clear affinity - OnDemandAssigner::report_processed(assignment_a.clone()); - OnDemandAssigner::report_processed(assignment_a.clone()); - OnDemandAssigner::report_processed(assignment_b_core_1.clone()); + OnDemandAssigner::report_processed(para_a, CoreIndex(0)); + OnDemandAssigner::report_processed(para_a, CoreIndex(0)); + OnDemandAssigner::report_processed(para_b, CoreIndex(1)); // There should be no affinity after clearing. assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); @@ -607,37 +438,62 @@ fn affinity_prohibits_parallel_scheduling() { } #[test] -fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { - let para_id = ParaId::from(10); - let core_index = CoreIndex(0); - let order = EnqueuedOrder::new(para_id); - let assignment = OnDemandAssignment::new(para_id, core_index); - +fn affinity_changes_work() { new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { - // Register the para_id as a parathread - schedule_blank_para(para_id, ParaKind::Parathread); + let para_a = ParaId::from(111); + let core_index = CoreIndex(0); + schedule_blank_para(para_a, ParaKind::Parathread); - assert!(!Paras::is_parathread(para_id)); - run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); - assert!(Paras::is_parathread(para_id)); + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); - // Add two assignments for a para_id with a valid lifecycle. - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); - assert_ok!(OnDemandAssigner::add_on_demand_order(order.clone(), QueuePushDirection::Back)); + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - // First pop is fine - assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), Some(assignment.clone())); + // Add enough assignments to the order queue. + for _ in 0..10 { + place_order(para_a); + } - // Deregister para - assert_ok!(Paras::schedule_para_cleanup(para_id)); + // There should be no affinity before the scheduler pops. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); - // Run to new session and verify that para_id is no longer a valid parathread. - assert!(Paras::is_parathread(para_id)); - run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); - assert!(!Paras::is_parathread(para_id)); + OnDemandAssigner::pop_assignment_for_core(core_index); + + // Affinity count is 1 after popping. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + + OnDemandAssigner::report_processed(para_a, 0.into()); + OnDemandAssigner::pop_assignment_for_core(core_index); + + // Affinity count is 1 after popping with a previous para. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(core_index); + } + + // Affinity count is 4 after popping 3 times without a previous para. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); + + for _ in 0..5 { + OnDemandAssigner::report_processed(para_a, 0.into()); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_some()); + } + + // Affinity count should still be 4 but queue should be empty. + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); - // Second pop should be None. - OnDemandAssigner::report_processed(assignment.clone()); - assert_eq!(OnDemandAssigner::pop_assignment_for_core(core_index), None); + // Pop 4 times and get to exactly 0 (None) affinity. + for _ in 0..4 { + OnDemandAssigner::report_processed(para_a, 0.into()); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); + } + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + // Decreasing affinity beyond 0 should still be None. + OnDemandAssigner::report_processed(para_a, 0.into()); + assert!(OnDemandAssigner::pop_assignment_for_core(core_index).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); }); } From 457b2250269d362dc4ec85e937b3a2b555b67fc7 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 14:13:12 +0100 Subject: [PATCH 12/31] Bring back copyright. --- polkadot/runtime/parachains/src/assigner_on_demand/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 6486cda0b70f..a995bc2f0beb 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -1,4 +1,4 @@ -// Copyright (C) Parit (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify From ce7c8e846f0e5adfa33c488849209d0fa3ac6079 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 14:29:20 +0100 Subject: [PATCH 13/31] Fix benchmark. --- .../parachains/src/assigner_on_demand/benchmarking.rs | 11 ++--------- .../runtime/parachains/src/assigner_on_demand/mod.rs | 8 ++++++++ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs index 5a6060cd2b4e..b2be1b49f3c9 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -70,11 +70,7 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let order = EnqueuedOrder::new(para_id); - - for _ in 0..s { - Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); - } + Pallet::::populate_queue(para_id, s); #[extrinsic_call] _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) @@ -87,11 +83,8 @@ mod benchmarks { let para_id = ParaId::from(111u32); init_parathread::(para_id); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let order = EnqueuedOrder::new(para_id); - for _ in 0..s { - Pallet::::add_on_demand_order(order.clone(), QueuePushDirection::Back).unwrap(); - } + Pallet::::populate_queue(para_id, s); #[extrinsic_call] _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index a995bc2f0beb..0c924295de1a 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -764,4 +764,12 @@ where fn get_affinity_map(para_id: ParaId) -> Option { ParaIdAffinity::::get(para_id) } + #[cfg(feature = "runtime-benchmarks")] + pub fn populate_queue(para_id: ParaId, num: u32) { + QueueStatus::::mutate(|queue_status| { + for _ in 0..num { + Pallet::::add_on_demand_order(queue_status, para_id, QueuePushDirection::Back); + } + }); + } } From 1bab42e3ecdbddf9da9d352d70f90a0669097eb9 Mon Sep 17 00:00:00 2001 From: eskimor Date: Fri, 2 Feb 2024 14:58:38 +0100 Subject: [PATCH 14/31] binary heap got merged --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index ad49748a4385..f2058c7c2fb6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -606,4 +606,4 @@ yamux = { opt-level = 3 } zeroize = { opt-level = 3 } [patch.crates-io] -scale-info = { git = 'https://github.com/paritytech/scale-info.git', branch="binary_heap" } +scale-info = { git = 'https://github.com/paritytech/scale-info.git', branch="master" } From c21540d793d9b3d663cd8871df0107bf95ccd15b Mon Sep 17 00:00:00 2001 From: antonva Date: Fri, 9 Feb 2024 09:29:19 +0000 Subject: [PATCH 15/31] Calculate on demand traffic on idle blocks Reintroduces on_initialize hook to update the spot traffic even when there are no on demand orders being placed, allowing for a price decrease. Add Ord tests for QueueIndex and Reverse --- .../parachains/src/assigner_on_demand/mod.rs | 71 ++++++-- .../src/assigner_on_demand/tests.rs | 163 ++++++++++++++++++ 2 files changed, 222 insertions(+), 12 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 0c924295de1a..7af3df6224db 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -126,10 +126,21 @@ struct QueueStatusType { /// heap is roughly bounded in the number of on demand cores: /// /// For a single core, elements will always be processed in order. With each core added, a - /// level of of out of order execution is added. + /// level of out of order execution is added. freed_indices: BinaryHeap, } +impl Default for QueueStatusType { + fn default() -> QueueStatusType { + QueueStatusType { + traffic: FixedU128::default(), + next_index: QueueIndex(0), + smallest_index: QueueIndex(0), + freed_indices: BinaryHeap::new(), + } + } +} + impl QueueStatusType { /// How many orders are queued in total? /// @@ -206,15 +217,15 @@ enum SpotTrafficCalculationErr { Division, } -// Type used for priority indices. +/// Type used for priority indices. +// NOTE: The `Ord` implementation for this type is unsound in the general case. +// Do not use it for anything but it's intended purpose. #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] struct QueueIndex(u32); /// QueueIndex with reverse ordering. /// /// Same as `Reverse(QueueIndex)`, but with all the needed traits implemented. -/// -/// TODO: Add basic ordering tests. #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq, Copy)] struct ReverseQueueIndex(u32); @@ -317,12 +328,7 @@ pub mod pallet { /// Creates an empty queue status for an empty queue with initial traffic value. #[pallet::type_value] pub(super) fn QueueStatusOnEmpty() -> QueueStatusType { - QueueStatusType { - traffic: T::TrafficDefaultValue::get(), - next_index: QueueIndex(0), - smallest_index: QueueIndex(0), - freed_indices: BinaryHeap::new(), - } + QueueStatusType { traffic: T::TrafficDefaultValue::get(), ..Default::default() } } #[pallet::type_value] @@ -376,6 +382,21 @@ pub mod pallet { SpotPriceHigherThanMaxAmount, } + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_now: BlockNumberFor) -> Weight { + let config = >::config(); + // We need to update the spot traffic on block initialize in order to account for idle + // blocks. + QueueStatus::::mutate(|queue_status| { + Self::update_spot_traffic(&config, queue_status); + }); + + // 2 reads in config and queuestatus, at maximum 1 write to queuestatus. + T::DbWeight::get().reads_writes(2, 1) + } + } + #[pallet::call] impl Pallet { /// Create a single on demand core order. @@ -478,8 +499,6 @@ where let assignment = entry.map(|e| Assignment::Pool { para_id: e.para_id, core_index }).ok()?; - // TODO: Test for invariant: If affinity count was zero before (is 1 now) then the entry - // must have come from free_entries. Pallet::::increase_affinity(assignment.para_id(), core_index); Some(assignment) } @@ -764,6 +783,19 @@ where fn get_affinity_map(para_id: ParaId) -> Option { ParaIdAffinity::::get(para_id) } + + /// Getter for the affinity entries. + #[cfg(test)] + fn get_affinity_entries(core_index: CoreIndex) -> BinaryHeap { + AffinityEntries::::get(core_index) + } + + /// Getter for the free entries. + #[cfg(test)] + fn get_free_entries() -> BinaryHeap { + FreeEntries::::get() + } + #[cfg(feature = "runtime-benchmarks")] pub fn populate_queue(para_id: ParaId, num: u32) { QueueStatus::::mutate(|queue_status| { @@ -772,4 +804,19 @@ where } }); } + + #[cfg(test)] + fn set_queue_status(new_status: QueueStatusType) { + QueueStatus::::set(new_status); + } + + #[cfg(test)] + fn get_queue_status() -> QueueStatusType { + QueueStatus::::get() + } + + #[cfg(test)] + fn get_traffic_default_value() -> FixedU128 { + ::TrafficDefaultValue::get() + } } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 629ccb22a389..00bb2724a086 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -73,6 +73,9 @@ fn run_to_block( Paras::initializer_initialize(b + 1); Scheduler::initializer_initialize(b + 1); + // We need to update the spot traffic on every block. + OnDemandAssigner::on_initialize(b + 1); + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. Scheduler::free_cores_and_fill_claimqueue(BTreeMap::new(), b + 1); } @@ -211,6 +214,42 @@ fn spot_traffic_decreases_over_time() { assert_eq!(traffic, FixedU128::from_inner(3_125_000_000_000_000_000u128)) } +#[test] +fn spot_traffic_decreases_between_idle_blocks() { + // Testing spot traffic assumptions, but using the mock runtime and default on demand + // configuration values. Ensuring that blocks with no on demand activity (idle) + // decrease traffic. + + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + assert!(!Paras::is_parathread(para_id)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + // Set the spot traffic to a large number + OnDemandAssigner::set_queue_status(QueueStatusType { + traffic: FixedU128::from_u32(10), + ..Default::default() + }); + + assert_eq!(OnDemandAssigner::get_queue_status().traffic, FixedU128::from_u32(10)); + + // Run to block 101 and ensure that the traffic decreases. + run_to_block(101, |n| if n == 100 { Some(Default::default()) } else { None }); + assert_eq!(OnDemandAssigner::get_queue_status().traffic, FixedU128::from_float(2.49996875)); + + // Run to block 102 and observe that we've hit the default traffic value. + run_to_block(102, |n| if n == 100 { Some(Default::default()) } else { None }); + assert_eq!( + OnDemandAssigner::get_queue_status().traffic, + OnDemandAssigner::get_traffic_default_value() + ); + }) +} + #[test] fn place_order_works() { let alice = 1u64; @@ -497,3 +536,127 @@ fn affinity_changes_work() { assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); }); } + +#[test] +fn new_affinity_for_a_core_must_come_from_free_entries() { + // If affinity count for a core was zero before, and is 1 now, then the entry + // must have come from free_entries. + let parachains = + vec![ParaId::from(111), ParaId::from(222), ParaId::from(333), ParaId::from(444)]; + let core_indices = vec![CoreIndex(0), CoreIndex(1), CoreIndex(2), CoreIndex(3)]; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + parachains.iter().for_each(|chain| { + schedule_blank_para(*chain, ParaKind::Parathread); + }); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // Place orders for all chains. + parachains.iter().for_each(|chain| { + place_order(*chain); + }); + + // There are 4 entries in free_entries. + let start_free_entries = OnDemandAssigner::get_free_entries().len(); + assert_eq!(start_free_entries, 4); + + // Pop assignments on all cores. + core_indices.iter().enumerate().for_each(|(n, core_index)| { + // There is no affinity on the core prior to popping. + assert!(OnDemandAssigner::get_affinity_entries(*core_index).is_empty()); + + // There's always an order to be popped for each core. + let free_entries = OnDemandAssigner::get_free_entries(); + let next_order = free_entries.peek(); + + // There is no affinity on the paraid prior to popping. + assert!(OnDemandAssigner::get_affinity_map(next_order.unwrap().para_id).is_none()); + + match OnDemandAssigner::pop_assignment_for_core(*core_index) { + Some(assignment) => { + // The popped assignment came from free entries. + assert_eq!( + start_free_entries - 1 - n, + OnDemandAssigner::get_free_entries().len() + ); + // The popped assignment has the same para id as the next order. + assert_eq!(assignment.para_id(), next_order.unwrap().para_id); + }, + None => panic!("Should not happen"), + } + }); + + // All entries have been removed from free_entries. + assert!(OnDemandAssigner::get_free_entries().is_empty()); + + // All chains have an affinity count of 1. + parachains.iter().for_each(|chain| { + assert_eq!(OnDemandAssigner::get_affinity_map(*chain).unwrap().count, 1); + }); + }); +} + +#[test] +#[should_panic] +fn queue_index_ordering_is_unsound_over_max_size() { + // NOTE: Unsoundness proof. If the number goes sufficiently over the max_queue_max_size + // the overflow will cause an opposite comparison to what would be expected. + let mut max_num = u32::MAX - ON_DEMAND_MAX_QUEUE_MAX_SIZE; + // 0 < some large number. + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); +} + +#[test] +fn queue_index_ordering_works() { + // The largest number accepted in in the queue index. + let mut max_num = ON_DEMAND_MAX_QUEUE_MAX_SIZE; + + // 0 == 0 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(0)), Ordering::Equal); + // 0 < 1 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(1)), Ordering::Less); + // 1 > 0 + assert_eq!(QueueIndex(1).cmp(&QueueIndex(0)), Ordering::Greater); + // 0 < max_num + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num)), Ordering::Less); + // 0 > max_num + 1 + assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); + + // Ordering within the bounds of ON_DEMAND_MAX_QUEUE_MAX_SIZE works. + let mut v = vec![3, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![1, 2, 3, 4, 5, 6]); + + v = vec![max_num, 4, 5, 1, 6]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![1, 4, 5, 6, max_num]); + + // Ordering with an element outside of the bounds of the max size also works. + v = vec![max_num + 2, 0, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![0, 1, 2, 4, 5, 6, max_num + 2]); + + // Numbers way above the max size are unsound and will fail to be sorted. + // Assume all numbers above the max size to be unsound. + v = vec![u32::MAX, 6, 2, 1, 5, 4]; + v.sort_by_key(|&num| QueueIndex(num)); + assert_eq!(v, vec![u32::MAX, 1, 2, 4, 5, 6]); +} + +#[test] +fn reverse_queue_index_does_reverse() { + let mut v = vec![1, 2, 3, 4, 5, 6]; + + // Basic reversal of a vector. + v.sort_by_key(|&num| ReverseQueueIndex(num)); + assert_eq!(v, vec![6, 5, 4, 3, 2, 1]); + + // Example from rust docs on `Reverse`. Should work identically. + v.sort_by_key(|&num| (num > 3, ReverseQueueIndex(num))); + assert_eq!(v, vec![3, 2, 1, 6, 5, 4]); + + let mut v2 = vec![1, 2, u32::MAX]; + v2.sort_by_key(|&num| ReverseQueueIndex(num)); + assert_eq!(v2, vec![2, 1, u32::MAX]); +} From 999b7bbf6fe841730b7a9921b71182d8d9ff8ac9 Mon Sep 17 00:00:00 2001 From: antonva Date: Mon, 4 Mar 2024 15:10:37 +0000 Subject: [PATCH 16/31] Add migration for on demand provider --- .../src/assigner_on_demand/migration.rs | 144 ++++++++++++++++++ .../parachains/src/assigner_on_demand/mod.rs | 1 + polkadot/runtime/rococo/src/lib.rs | 1 + 3 files changed, 146 insertions(+) create mode 100644 polkadot/runtime/parachains/src/assigner_on_demand/migration.rs diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs new file mode 100644 index 000000000000..4feeb1786c54 --- /dev/null +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -0,0 +1,144 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. +use super::*; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, + traits::OnRuntimeUpgrade, weights::Weight, +}; + +mod v0 { + use super::*; + use sp_std::collections::vec_deque::VecDeque; + + #[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone)] + pub(super) struct EnqueuedOrder { + pub para_id: ParaId, + } + + /// Keeps track of the multiplier used to calculate the current spot price for the on demand + /// assigner. + /// NOTE: Ignoring the `OnEmpty` field for the migration. + #[storage_alias] + pub(super) type SpotTraffic = StorageValue, FixedU128, ValueQuery>; + + /// The order storage entry. Uses a VecDeque to be able to push to the front of the + /// queue from the scheduler on session boundaries. + /// NOTE: Ignoring the `OnEmpty` field for the migration. + #[storage_alias] + pub(super) type OnDemandQueue = + StorageValue, VecDeque, ValueQuery>; +} + +mod v1 { + use super::*; + + use crate::assigner_on_demand::LOG_TARGET; + + /// Migration to V1 + pub struct UncheckedMigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for UncheckedMigrateToV1 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = Weight::zero(); + + // Migrate the current traffic value + let config = >::config(); + QueueStatus::::mutate(|mut queue_status| { + Pallet::::update_spot_traffic(&config, &mut queue_status); + + let v0_queue = v0::OnDemandQueue::::take(); + // Process the v0 queue into v1. + v0_queue.into_iter().for_each(|enqueued_order| { + // Readding the old orders will use the new systems. + Pallet::::add_on_demand_order( + queue_status, + enqueued_order.para_id, + QueuePushDirection::Front, + ); + }); + }); + + // Remove the old storage. + v0::OnDemandQueue::::kill(); // 1 write + v0::SpotTraffic::::kill(); // 1 write + + // Config read + weight.saturating_accrue(T::DbWeight::get().reads(1)); + // QueueStatus read write (update_spot_traffic) + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + // Kill x 2 + weight.saturating_accrue(T::DbWeight::get().writes(2)); + + log::info!(target: LOG_TARGET, "Migrated on demand assigner storage to v1"); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + let n: u32 = v0::OnDemandQueue::::get().len() as u32; + + log::info!( + target: LOG_TARGET, + "Number of orders waiting in the queue before: {n}", + ); + + let n: u32 = 0; + Ok(n.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::info!(target: LOG_TARGET, "Running post_upgrade()"); + + ensure!( + v0::OnDemandQueue::::get().is_empty(), + "OnDemandQueue should be empty after the migration" + ); + + let expected_len = u32::decode(&mut &state[..]).unwrap(); + let n_affinity_entries = AffinityEntries::::iter() + .map(|(_index, heap)| heap.len() as u32) + .fold(0, |acc, x| acc + x); + let n_free_entries = FreeEntries::::get().len() as u32; + let n_orders = n_affinity_entries + n_free_entries; + // TODO: Ensure that affinity is intact. + ensure!( + expected_len == n_orders, + "Number of orders should be the same before and after migration." + ); + + let n_para_id_affinity = ParaIdAffinity::::iter() + .map(|(_para_id, affinity)| affinity.count as u32) + .fold(0, |acc, x| acc + x); + ensure!( + n_para_id_affinity == n_affinity_entries, + "Number of affinity entries should be the same as the counts in ParaIdAffinity" + ); + + Ok(()) + } + } +} + +/// Migrate `V0` to `V1` of the storage format. +pub type MigrateV0ToV1 = VersionedMigration< + 0, + 1, + v1::UncheckedMigrateToV1, + Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 7af3df6224db..e418ebc4aff6 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -32,6 +32,7 @@ //! intent can be made explicit. mod benchmarking; +pub mod migration; mod mock_helpers; extern crate alloc; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 69261b2b03f3..eacc9aa1bff5 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1664,6 +1664,7 @@ pub mod migrations { parachains_configuration::migration::v11::MigrateToV11, // This needs to come after the `parachains_configuration` above as we are reading the configuration. coretime::migration::MigrateToCoretime, + parachains_assigner_on_demand::migration::MigrateV0ToV1, ); } From ecbae81fe259b7193f257868878ff73f3f8924fa Mon Sep 17 00:00:00 2001 From: antonva Date: Thu, 7 Mar 2024 09:41:56 +0000 Subject: [PATCH 17/31] Readd missing export --- polkadot/primitives/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 2ddd9b58dfe4..745195ce092a 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -58,7 +58,8 @@ pub use v6::{ ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, + PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] From 079c78fa416e4b90acc0700eb62668ad0aacfec4 Mon Sep 17 00:00:00 2001 From: antonva Date: Thu, 7 Mar 2024 09:42:23 +0000 Subject: [PATCH 18/31] Add storage version to on demand pallet --- .../runtime/parachains/src/assigner_on_demand/migration.rs | 1 - polkadot/runtime/parachains/src/assigner_on_demand/mod.rs | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 4feeb1786c54..d8f9bbda7958 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -96,7 +96,6 @@ mod v1 { "Number of orders waiting in the queue before: {n}", ); - let n: u32 = 0; Ok(n.encode()) } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index e418ebc4aff6..7a2269b9e11f 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -306,8 +306,11 @@ pub mod pallet { use super::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] From 5308f3aca1c0026b2faee44d728ad74482e2e1cb Mon Sep 17 00:00:00 2001 From: antonva Date: Wed, 13 Mar 2024 15:22:55 +0000 Subject: [PATCH 19/31] Address comments, add new scale-info --- Cargo.lock | 10 ++- Cargo.toml | 3 - .../src/assigner_on_demand/migration.rs | 13 ++-- .../parachains/src/assigner_on_demand/mod.rs | 3 - .../src/assigner_on_demand/tests.rs | 61 ++++++++++++++++--- 5 files changed, 62 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 177d76b87062..2a7193be74cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17035,9 +17035,8 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +version = "2.11.0" +source = "git+https://github.com/paritytech/scale-info.git?branch=master#1e02764a1b6a59ef9e977750f2a60947d7dd47d1" dependencies = [ "bitvec", "cfg-if", @@ -17049,9 +17048,8 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +version = "2.11.0" +source = "git+https://github.com/paritytech/scale-info.git?branch=master#1e02764a1b6a59ef9e977750f2a60947d7dd47d1" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index c35c48f2118c..a2e22e0b9271 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -629,6 +629,3 @@ wasmi = { opt-level = 3 } x25519-dalek = { opt-level = 3 } yamux = { opt-level = 3 } zeroize = { opt-level = 3 } - -[patch.crates-io] -scale-info = { git = 'https://github.com/paritytech/scale-info.git', branch="master" } diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index d8f9bbda7958..e045cb618d1b 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -67,7 +67,7 @@ mod v1 { Pallet::::add_on_demand_order( queue_status, enqueued_order.para_id, - QueuePushDirection::Front, + QueuePushDirection::Back, ); }); }); @@ -109,15 +109,10 @@ mod v1 { ); let expected_len = u32::decode(&mut &state[..]).unwrap(); - let n_affinity_entries = AffinityEntries::::iter() - .map(|(_index, heap)| heap.len() as u32) - .fold(0, |acc, x| acc + x); - let n_free_entries = FreeEntries::::get().len() as u32; - let n_orders = n_affinity_entries + n_free_entries; - // TODO: Ensure that affinity is intact. + let queue_status_size = QueueStatus::::size(); ensure!( - expected_len == n_orders, - "Number of orders should be the same before and after migration." + expected_len == queue_status_size, + "Number of orders should be the same before and after migration" ); let n_para_id_affinity = ParaIdAffinity::::iter() diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 85749f9cafd7..077da776d47c 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -465,9 +465,6 @@ where BalanceOf: FixedPointOperand, { /// Take the next queued entry that is available for a given core index. - /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` - /// but only in [0..P] range slice of the order queue, where P is the element that is - /// removed from the order queue. /// /// Parameters: /// - `core_index`: The core index diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index 00bb2724a086..fcfbd5f73a16 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -239,7 +239,7 @@ fn spot_traffic_decreases_between_idle_blocks() { // Run to block 101 and ensure that the traffic decreases. run_to_block(101, |n| if n == 100 { Some(Default::default()) } else { None }); - assert_eq!(OnDemandAssigner::get_queue_status().traffic, FixedU128::from_float(2.49996875)); + assert!(OnDemandAssigner::get_queue_status().traffic < FixedU128::from_u32(10)); // Run to block 102 and observe that we've hit the default traffic value. run_to_block(102, |n| if n == 100 { Some(Default::default()) } else { None }); @@ -602,14 +602,14 @@ fn new_affinity_for_a_core_must_come_from_free_entries() { fn queue_index_ordering_is_unsound_over_max_size() { // NOTE: Unsoundness proof. If the number goes sufficiently over the max_queue_max_size // the overflow will cause an opposite comparison to what would be expected. - let mut max_num = u32::MAX - ON_DEMAND_MAX_QUEUE_MAX_SIZE; + let max_num = u32::MAX - ON_DEMAND_MAX_QUEUE_MAX_SIZE; // 0 < some large number. assert_eq!(QueueIndex(0).cmp(&QueueIndex(max_num + 1)), Ordering::Less); } #[test] fn queue_index_ordering_works() { - // The largest number accepted in in the queue index. + // The largest accepted queue size. let mut max_num = ON_DEMAND_MAX_QUEUE_MAX_SIZE; // 0 == 0 @@ -637,11 +637,10 @@ fn queue_index_ordering_works() { v.sort_by_key(|&num| QueueIndex(num)); assert_eq!(v, vec![0, 1, 2, 4, 5, 6, max_num + 2]); - // Numbers way above the max size are unsound and will fail to be sorted. - // Assume all numbers above the max size to be unsound. - v = vec![u32::MAX, 6, 2, 1, 5, 4]; + // Numbers way above the max size will overflow + v = vec![u32::MAX - 1, u32::MAX, 6, 2, 1, 5, 4]; v.sort_by_key(|&num| QueueIndex(num)); - assert_eq!(v, vec![u32::MAX, 1, 2, 4, 5, 6]); + assert_eq!(v, vec![u32::MAX - 1, u32::MAX, 1, 2, 4, 5, 6]); } #[test] @@ -660,3 +659,51 @@ fn reverse_queue_index_does_reverse() { v2.sort_by_key(|&num| ReverseQueueIndex(num)); assert_eq!(v2, vec![2, 1, u32::MAX]); } + +#[test] +fn queue_status_size_fn_works() { + // Add orders to the on demand queue, and make sure that they are properly represented + // by the QueueStatusType::size fn. + let parachains = vec![ParaId::from(111), ParaId::from(222), ParaId::from(333)]; + let core_indices = vec![CoreIndex(0), CoreIndex(1)]; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + parachains.iter().for_each(|chain| { + schedule_blank_para(*chain, ParaKind::Parathread); + }); + + assert_eq!(OnDemandAssigner::get_queue_status().size(), 0); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + // Place orders for all chains. + parachains.iter().for_each(|chain| { + // 2 per chain for a total of 6 + place_order(*chain); + place_order(*chain); + }); + + // 6 orders in free entries + assert_eq!(OnDemandAssigner::get_free_entries().len(), 6); + // 6 orders via queue status size + assert_eq!( + OnDemandAssigner::get_free_entries().len(), + OnDemandAssigner::get_queue_status().size() as usize + ); + + core_indices.iter().for_each(|core_index| { + OnDemandAssigner::pop_assignment_for_core(*core_index); + }); + + // There should be 2 orders in the scheduler's claimqueue, + // 2 in assorted AffinityMaps and 2 in free. + // ParaId 111 + assert_eq!(OnDemandAssigner::get_affinity_entries(core_indices[0]).len(), 1); + // ParaId 222 + assert_eq!(OnDemandAssigner::get_affinity_entries(core_indices[1]).len(), 1); + // Free entries are from ParaId 333 + assert_eq!(OnDemandAssigner::get_free_entries().len(), 2); + // For a total size of 4. + assert_eq!(OnDemandAssigner::get_queue_status().size(), 4) + }); +} From 1ae8b7bbbdfcba24f23456b86783df22055e9d87 Mon Sep 17 00:00:00 2001 From: antonva Date: Thu, 14 Mar 2024 11:24:05 +0000 Subject: [PATCH 20/31] Bump scale-info version again --- Cargo.lock | 6 ++++-- polkadot/runtime/parachains/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3611266edce8..fb43c47c2e94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17036,7 +17036,8 @@ dependencies = [ [[package]] name = "scale-info" version = "2.11.0" -source = "git+https://github.com/paritytech/scale-info.git?branch=master#1e02764a1b6a59ef9e977750f2a60947d7dd47d1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ef2175c2907e7c8bc0a9c3f86aeb5ec1f3b275300ad58a44d0c3ae379a5e52e" dependencies = [ "bitvec", "cfg-if", @@ -17049,7 +17050,8 @@ dependencies = [ [[package]] name = "scale-info-derive" version = "2.11.0" -source = "git+https://github.com/paritytech/scale-info.git?branch=master#1e02764a1b6a59ef9e977750f2a60947d7dd47d1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b8eb8fd61c5cdd3390d9b2132300a7e7618955b98b8416f118c1b4e144f" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 610401454763..6e693b83ae13 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -15,7 +15,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.11.0", default-features = false, features = ["derive"] } serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" From 3e60c090cda54d6481390e7c7c90e30a3726cb2c Mon Sep 17 00:00:00 2001 From: antonva Date: Thu, 14 Mar 2024 17:31:58 +0000 Subject: [PATCH 21/31] Add test to migration to preserve queue ordering --- .../src/assigner_on_demand/migration.rs | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index e045cb618d1b..3854c557d545 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -136,3 +136,44 @@ pub type MigrateV0ToV1 = VersionedMigration< Pallet, ::DbWeight, >; + +#[cfg(test)] +mod tests { + use super::{v0, v1, OnRuntimeUpgrade, Weight}; + use crate::mock::{new_test_ext, MockGenesisConfig, OnDemandAssigner, Test}; + use primitives::Id as ParaId; + + #[test] + fn migration_to_v1_preserves_queue_ordering() { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + // Place orders for paraids 1..5 + for i in 1..=5 { + v0::OnDemandQueue::::mutate(|queue| { + queue.push_back(v0::EnqueuedOrder { para_id: ParaId::new(i) }) + }); + } + + // Queue has 5 orders + let old_queue = v0::OnDemandQueue::::get(); + assert_eq!(old_queue.len(), 5); + // New queue has 0 orders + assert_eq!(OnDemandAssigner::get_queue_status().size(), 0); + + // For tests, db weight is zero. + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::zero() + ); + + // New queue has 5 orders + assert_eq!(OnDemandAssigner::get_queue_status().size(), 5); + + // Compare each entry from the old queue with the entry in the new queue. + old_queue.iter().zip(OnDemandAssigner::get_free_entries().iter()).for_each( + |(old_enq, new_enq)| { + assert_eq!(old_enq.para_id, new_enq.para_id); + }, + ); + }); + } +} From 600fd5e5a1185dd7881c91d00db717ddeb126484 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Mon, 18 Mar 2024 13:50:29 +0000 Subject: [PATCH 22/31] ".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime=rococo --target_dir=polkadot --pallet=runtime_parachains::assigner_on_demand --- .../runtime_parachains_assigner_on_demand.rs | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b48..dba9e7904c79 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::assigner_on_demand // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,44 +48,44 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_053_000 picoseconds. + Weight::from_parts(17_291_897, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 104 + .saturating_add(Weight::from_parts(18_779, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 20_843_000 picoseconds. + Weight::from_parts(16_881_986, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 104 + .saturating_add(Weight::from_parts(18_788, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } From ee29cc2c6e40a8e77598ff3e71203406f4f9cba7 Mon Sep 17 00:00:00 2001 From: antonva Date: Mon, 18 Mar 2024 12:42:39 +0000 Subject: [PATCH 23/31] Fix post_upgrade in migration --- .../runtime/parachains/src/assigner_on_demand/migration.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 3854c557d545..5a97fd5b5ef9 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -109,15 +109,17 @@ mod v1 { ); let expected_len = u32::decode(&mut &state[..]).unwrap(); - let queue_status_size = QueueStatus::::size(); + let queue_status_size = QueueStatus::::get().size(); ensure!( expected_len == queue_status_size, "Number of orders should be the same before and after migration" ); + let n_affinity_entries = + AffinityEntries::::iter().map(|(_index, heap)| heap.len() as u32).count(); let n_para_id_affinity = ParaIdAffinity::::iter() .map(|(_para_id, affinity)| affinity.count as u32) - .fold(0, |acc, x| acc + x); + .count(); ensure!( n_para_id_affinity == n_affinity_entries, "Number of affinity entries should be the same as the counts in ParaIdAffinity" From b344bc663a2ce8257834f5da3769c5d53ddd73fa Mon Sep 17 00:00:00 2001 From: antonva Date: Mon, 18 Mar 2024 13:28:45 +0000 Subject: [PATCH 24/31] Add prdoc --- prdoc/pr_3190.prdoc | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 prdoc/pr_3190.prdoc diff --git a/prdoc/pr_3190.prdoc b/prdoc/pr_3190.prdoc new file mode 100644 index 000000000000..2f7a89a0b1ab --- /dev/null +++ b/prdoc/pr_3190.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix algorithmic complexity of the on-demand scheduler. + +doc: + - audience: Runtime Dev + description: | + Improves on demand performance by a significant factor. Previously, having many on-demand cores + would cause really poor blocktimes due to the fact that for each core the full order queue was + processed. This allows for increasing the max size of the on-demand queue if needed. + + At the same time, the spot price for on-demand is now checked prior to every order, ensuring + that economic backpressure will be applied. + +crates: + - name: polkadot-runtime-parachains From 76e4e76ffb47e897ea0cbf3b62a4ded47f32a4d0 Mon Sep 17 00:00:00 2001 From: antonva Date: Mon, 18 Mar 2024 13:41:33 +0000 Subject: [PATCH 25/31] Remove unused on-demand max size import --- polkadot/runtime/parachains/src/configuration.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 17d3b70bcf1b..b7635dcd7b22 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -29,7 +29,7 @@ use primitives::{ vstaging::{ApprovalVotingParams, NodeFeatures}, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, - ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, + ON_DEMAND_MAX_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; From ab5104524c153c870ef2d83263edab98d88b5c20 Mon Sep 17 00:00:00 2001 From: antonva Date: Mon, 18 Mar 2024 14:03:24 +0000 Subject: [PATCH 26/31] Remove unused mut from test --- polkadot/runtime/parachains/src/assigner_on_demand/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs index fcfbd5f73a16..982efe77b939 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/tests.rs @@ -610,7 +610,7 @@ fn queue_index_ordering_is_unsound_over_max_size() { #[test] fn queue_index_ordering_works() { // The largest accepted queue size. - let mut max_num = ON_DEMAND_MAX_QUEUE_MAX_SIZE; + let max_num = ON_DEMAND_MAX_QUEUE_MAX_SIZE; // 0 == 0 assert_eq!(QueueIndex(0).cmp(&QueueIndex(0)), Ordering::Equal); From 9a705cbacfb3167f1dd32c6f57c66fb8b01bb829 Mon Sep 17 00:00:00 2001 From: antonva Date: Mon, 18 Mar 2024 14:27:11 +0000 Subject: [PATCH 27/31] Remove benchmark todo --- polkadot/runtime/parachains/src/assigner_on_demand/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index 077da776d47c..d4d224a1a853 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -108,8 +108,6 @@ impl WeightInfo for TestWeightInfo { /// this divides per core. /// /// Reads still exist, also improved slightly, but worst case we fetch all entries. -/// -/// TODO: Add benchmark results. #[derive(Encode, Decode, TypeInfo)] struct QueueStatusType { /// Last calculated traffic value. From f2ad19ffe459e99e3d14609902e5b70a2be8761f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Vilhelm=20=C3=81sgeirsson?= Date: Mon, 18 Mar 2024 14:31:00 +0000 Subject: [PATCH 28/31] Simplify PartialOrd for EnqueuedOrder Co-authored-by: ordian --- polkadot/runtime/parachains/src/assigner_on_demand/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs index d4d224a1a853..c47c8745e654 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/mod.rs @@ -283,9 +283,8 @@ impl EnqueuedOrder { impl PartialOrd for EnqueuedOrder { fn partial_cmp(&self, other: &Self) -> Option { match other.idx.partial_cmp(&self.idx) { - None => return None, Some(Ordering::Equal) => other.para_id.partial_cmp(&self.para_id), - Some(o) => Some(o), + o => o, } } } From 70f468ea937349082c90a277c9cf951b2b5a4df5 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Mon, 18 Mar 2024 15:20:05 +0000 Subject: [PATCH 29/31] ".git/.scripts/commands/bench/bench.sh" --subcommand=pallet --runtime=westend --target_dir=polkadot --pallet=runtime_parachains::assigner_on_demand --- .../runtime_parachains_assigner_on_demand.rs | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs index ac0f05301b48..acd1834f79ed 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_assigner_on_demand.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::assigner_on_demand` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-03-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::assigner_on_demand -// --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -48,44 +48,44 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::assigner_on_demand`. pub struct WeightInfo(PhantomData); impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_keep_alive(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_522_000 picoseconds. - Weight::from_parts(35_436_835, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 129 - .saturating_add(Weight::from_parts(14_041, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_396_000 picoseconds. + Weight::from_parts(20_585_695, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 127 + .saturating_add(Weight::from_parts(20_951, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } - /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) - /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Paras::ParaLifecycles` (r:1 w:0) - /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) - /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// The range of component `s` is `[1, 9999]`. fn place_order_allow_death(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `297 + s * (4 ±0)` - // Estimated: `3762 + s * (4 ±0)` - // Minimum execution time: 33_488_000 picoseconds. - Weight::from_parts(34_848_934, 0) - .saturating_add(Weight::from_parts(0, 3762)) - // Standard Error: 143 - .saturating_add(Weight::from_parts(14_215, 0).saturating_mul(s.into())) + // Measured: `218 + s * (8 ±0)` + // Estimated: `3681 + s * (8 ±0)` + // Minimum execution time: 21_412_000 picoseconds. + Weight::from_parts(19_731_554, 0) + .saturating_add(Weight::from_parts(0, 3681)) + // Standard Error: 128 + .saturating_add(Weight::from_parts(21_055, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } } From d8f1543e755346f59fe600891df11d996d35a89f Mon Sep 17 00:00:00 2001 From: antonva Date: Wed, 20 Mar 2024 08:54:08 +0000 Subject: [PATCH 30/31] Address nits --- .../runtime/parachains/src/assigner_on_demand/migration.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 5a97fd5b5ef9..4c2d1400807d 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -116,10 +116,10 @@ mod v1 { ); let n_affinity_entries = - AffinityEntries::::iter().map(|(_index, heap)| heap.len() as u32).count(); + AffinityEntries::::iter().map(|(_index, heap)| heap.len() as u32).sum(); let n_para_id_affinity = ParaIdAffinity::::iter() .map(|(_para_id, affinity)| affinity.count as u32) - .count(); + .sum(); ensure!( n_para_id_affinity == n_affinity_entries, "Number of affinity entries should be the same as the counts in ParaIdAffinity" From be3a3c16c5ba255f30b01dd7b66e57bb46096ebb Mon Sep 17 00:00:00 2001 From: antonva Date: Wed, 20 Mar 2024 09:13:28 +0000 Subject: [PATCH 31/31] Type sums in post migration --- .../runtime/parachains/src/assigner_on_demand/migration.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs index 4c2d1400807d..5071653377d4 100644 --- a/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs +++ b/polkadot/runtime/parachains/src/assigner_on_demand/migration.rs @@ -115,9 +115,9 @@ mod v1 { "Number of orders should be the same before and after migration" ); - let n_affinity_entries = + let n_affinity_entries: u32 = AffinityEntries::::iter().map(|(_index, heap)| heap.len() as u32).sum(); - let n_para_id_affinity = ParaIdAffinity::::iter() + let n_para_id_affinity: u32 = ParaIdAffinity::::iter() .map(|(_para_id, affinity)| affinity.count as u32) .sum(); ensure!(