diff --git a/Cargo.lock b/Cargo.lock index 991541ee84ee..6d8a49ea4388 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12754,6 +12754,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -12777,6 +12778,8 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", + "schnellru", "sp-application-crypto", "sp-keystore", "thiserror", diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5b62d90c1d4f..f66a66e859ec 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -23,6 +23,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] +rstest = "0.18.2" assert_matches = "1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs index 04ee42a9de06..8061dc82d835 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_tree.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_tree.rs @@ -96,6 +96,7 @@ use std::{ use super::LOG_TARGET; use bitvec::prelude::*; +use polkadot_node_subsystem::messages::Ancestors; use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -756,45 +757,46 @@ impl FragmentTree { depths.iter_ones().collect() } - /// Select `count` candidates after the given `required_path` which pass + /// Select `count` candidates after the given `ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree starting after `required_path`. - /// If there are multiple possibilities of size `count`, this will select the first one. - /// If there is no chain of size `count` that matches the criteria, this will return the largest - /// chain it could find with the criteria. - /// If there are no candidates meeting those criteria, returns an empty `Vec`. - /// Cycles are accepted, see module docs for the `Cycles` section. + /// Does an exhaustive search into the tree after traversing the ancestors path. + /// If the ancestors draw out a path that can be traversed in multiple ways, no + /// candidates will be returned. + /// If the ancestors do not draw out a full path (the path contains holes), candidates will be + /// suggested that may fill these holes. + /// If the ancestors don't draw out a valid path, no candidates will be returned. If there are + /// multiple possibilities of the same size, this will select the first one. If there is no + /// chain of size `count` that matches the criteria, this will return the largest chain it could + /// find with the criteria. If there are no candidates meeting those criteria, returns an empty + /// `Vec`. + /// Cycles are accepted, but this code expects that the runtime will deduplicate + /// identical candidates when occupying the cores (when proposing to back A->B->A, only A will + /// be backed on chain). /// - /// The intention of the `required_path` is to allow queries on the basis of + /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming - /// available and opening up more room on the core. - pub(crate) fn select_children( + /// available or candidates timing out. + pub(crate) fn find_backable_chain( &self, - required_path: &[CandidateHash], + ancestors: Ancestors, count: u32, pred: impl Fn(&CandidateHash) -> bool, ) -> Vec { - let base_node = { - // traverse the required path. - let mut node = NodePointer::Root; - for required_step in required_path { - if let Some(next_node) = self.node_candidate_child(node, &required_step) { - node = next_node; - } else { - return vec![] - }; - } - - node - }; - - // TODO: taking the first best selection might introduce bias - // or become gameable. - // - // For plausibly unique parachains, this shouldn't matter much. - // figure out alternative selection criteria? - self.select_children_inner(base_node, count, count, &pred, &mut vec![]) + if count == 0 { + return vec![] + } + // First, we need to order the ancestors. + // The node returned is the one from which we can start finding new backable candidates. + let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] }; + + self.find_backable_chain_inner( + base_node, + count, + count, + &pred, + &mut Vec::with_capacity(count as usize), + ) } // Try finding a candidate chain starting from `base_node` of length `expected_count`. @@ -805,7 +807,7 @@ impl FragmentTree { // Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum // depth we'll reach is `expected_count`. // - // Worst case performance is `O(num_forks ^ expected_count)`. + // Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree. // Although an exponential function, this is actually a constant that can only be altered via // sudo/governance, because: // 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core` @@ -817,7 +819,7 @@ impl FragmentTree { // scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a // small number (1-3), capped by the total number of available cores (a constant alterable // only via governance/sudo). - fn select_children_inner( + fn find_backable_chain_inner( &self, base_node: NodePointer, expected_count: u32, @@ -857,7 +859,7 @@ impl FragmentTree { for (child_ptr, child_hash) in children { accumulator.push(child_hash); - let result = self.select_children_inner( + let result = self.find_backable_chain_inner( child_ptr, expected_count, remaining_count - 1, @@ -869,6 +871,9 @@ impl FragmentTree { // Short-circuit the search if we've found the right length. Otherwise, we'll // search for a max. + // Taking the first best selection doesn't introduce bias or become gameable, + // because `find_ancestor_path` uses a `HashSet` to track the ancestors, which + // makes the order in which ancestors are visited non-deterministic. if result.len() == expected_count as usize { return result } else if best_result.len() < result.len() { @@ -879,6 +884,93 @@ impl FragmentTree { best_result } + // Orders the ancestors into a viable path from root to the last one. + // Returns a pointer to the last node in the path. + // We assume that the ancestors form a chain (that the + // av-cores do not back parachain forks), None is returned otherwise. + // If we cannot use all ancestors, stop at the first found hole in the chain. This usually + // translates to a timed out candidate. + fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option { + // The number of elements in the path we've processed so far. + let mut depth = 0; + let mut last_node = NodePointer::Root; + let mut next_node: Option = Some(NodePointer::Root); + + while let Some(node) = next_node { + if depth > self.scope.max_depth { + return None; + } + + last_node = node; + + next_node = match node { + NodePointer::Root => { + let children = self + .nodes + .iter() + .enumerate() + .take_while(|n| n.1.parent == NodePointer::Root) + .map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash)) + .collect::>(); + + self.find_valid_child(&mut ancestors, children.iter()).ok()? + }, + NodePointer::Storage(ptr) => { + let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter())); + if let Some(children) = children { + self.find_valid_child(&mut ancestors, children).ok()? + } else { + None + } + }, + }; + + depth += 1; + } + + Some(last_node) + } + + // Find a node from the given iterator which is present in the ancestors + // collection. If there are multiple such nodes, return an error and log a warning. We don't + // accept forks in a parachain to be backed. The supplied ancestors should all form a chain. + // If there is no such node, return None. + fn find_valid_child<'a>( + &self, + ancestors: &'a mut Ancestors, + nodes: impl Iterator + 'a, + ) -> Result, ()> { + let mut possible_children = + nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) { + true => Some(node_ptr), + false => None, + }); + + // We don't accept forks in a parachain to be backed. The supplied ancestors + // should all form a chain. + let next = possible_children.next(); + if let Some(second_child) = possible_children.next() { + if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) = + (next, second_child) + { + gum::error!( + target: LOG_TARGET, + para_id = ?self.scope.para, + relay_parent = ?self.scope.relay_parent, + "Trying to find new backable candidates for a parachain for which we've backed a fork.\ + This is a bug and the runtime should not have allowed it.\n\ + Backed candidates with the same parent: {}, {}", + self.nodes[*first_child].candidate_hash, + self.nodes[*second_child].candidate_hash, + ); + } + + Err(()) + } else { + Ok(next.copied()) + } + } + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -1061,8 +1153,18 @@ mod tests { use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; + use rstest::rstest; use std::iter; + impl NodePointer { + fn unwrap_idx(self) -> usize { + match self { + NodePointer::Root => panic!("Unexpected root"), + NodePointer::Storage(index) => index, + } + } + } + fn make_constraints( min_relay_parent_number: BlockNumber, valid_watermarks: Vec, @@ -1546,6 +1648,373 @@ mod tests { assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); } + #[test] + fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; + + // Empty tree + let storage = CandidateStorage::new(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + assert_eq!(tree.candidates().collect::>().len(), 0); + assert_eq!(tree.nodes.len(), 0); + + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + // Invalid candidate. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); + assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); + } + + #[rstest] + #[case(true, 13)] + #[case(false, 8)] + // The tree with no cycles looks like: + // Make a tree that looks like this (note that there's no cycle): + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4 + // + // The tree with cycles is the same as the first but has a cycle from 4 back to the state + // produced by 0 (It's bounded by the max_depth + 1). + // +-(root)-+ + // | | + // +----0---+ 7 + // | | + // 1----+ 5 + // | | + // | | + // 2 6 + // | + // 3 + // | + // 4---+ + // | | + // 1 5 + // | + // 2 + // | + // 3 + fn test_find_ancestor_path_and_find_backable_chain( + #[case] has_cycle: bool, + #[case] expected_node_count: usize, + ) { + let para_id = ParaId::from(5u32); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 7; + let relay_parent_number = 0; + let relay_parent_storage_root = Hash::repeat_byte(69); + + let mut candidates = vec![]; + + // Candidate 0 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![0].into(), + 0, + )); + // Candidate 1 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![1].into(), + 0, + )); + // Candidate 2 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![2].into(), + 0, + )); + // Candidate 3 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![2].into(), + vec![3].into(), + 0, + )); + // Candidate 4 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![4].into(), + 0, + )); + // Candidate 5 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![0].into(), + vec![5].into(), + 0, + )); + // Candidate 6 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![1].into(), + vec![6].into(), + 0, + )); + // Candidate 7 + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + required_parent.clone(), + vec![7].into(), + 0, + )); + + if has_cycle { + candidates[4] = make_committed_candidate( + para_id, + relay_parent, + 0, + vec![3].into(), + vec![0].into(), // put the cycle here back to the output state of 0. + 0, + ); + } + + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + let mut storage = CandidateStorage::new(); + + let relay_parent_info = RelayChainBlockInfo { + number: relay_parent_number, + hash: relay_parent, + storage_root: relay_parent_storage_root, + }; + + for (pvd, candidate) in candidates.iter() { + storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + } + let candidates = + candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_info, + base_constraints, + vec![], + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!(tree.candidates().collect::>().len(), candidates.len()); + assert_eq!(tree.nodes.len(), expected_node_count); + + // Do some common tests on both trees. + { + // No ancestors supplied. + assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(Ancestors::new(), 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // Ancestor which is not part of the tree. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + // A chain fork. + let ancestors: Ancestors = + [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + // Ancestors which are part of the tree but don't form a path. Will be ignored. + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors. + let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[7].hash()); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = + [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[0].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + if has_cycle { + assert_eq!( + tree.find_backable_chain(ancestors, 2, |_| true), + [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } else { + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + } + + let ancestors: Ancestors = + [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + assert_eq!(res, NodePointer::Root); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // Requested count is 0. + assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + + let ancestors: Ancestors = + [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); + assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + } + + // Now do some tests only on the tree with cycles + if has_cycle { + // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at + // 0-1-2-3-4-1-2-3. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[4].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1-2. + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[2].hash()); + assert_eq!( + tree.find_backable_chain(ancestors.clone(), 1, |_| true), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + tree.find_backable_chain(ancestors, 5, |_| true), + [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + + // 0-1 + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); + let candidate = &tree.nodes[res.unwrap_idx()]; + assert_eq!(candidate.candidate_hash, candidates[1].hash()); + assert_eq!( + tree.find_backable_chain(ancestors, 6, |_| true), + [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + ); + + // For 0-1-2-3-4-5, there's more than 1 way of finding this path in + // the tree. `None` should be returned. The runtime should not have accepted this. + let ancestors: Ancestors = [ + candidates[0].hash(), + candidates[1].hash(), + candidates[2].hash(), + candidates[3].hash(), + candidates[4].hash(), + candidates[5].hash(), + ] + .into_iter() + .collect(); + let res = tree.find_ancestor_path(ancestors.clone()); + assert_eq!(res, None); + assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + } + } + #[test] fn graceful_cycle_of_0() { let mut storage = CandidateStorage::new(); @@ -1602,13 +2071,17 @@ mod tests { for count in 1..10 { assert_eq!( - tree.select_children(&[], count, |_| true), + tree.find_backable_chain(Ancestors::new(), count, |_| true), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize, max_depth + 1)) .collect::>() ); assert_eq!( - tree.select_children(&[candidate_a_hash], count - 1, |_| true), + tree.find_backable_chain( + [candidate_a_hash].into_iter().collect(), + count - 1, + |_| true + ), iter::repeat(candidate_a_hash) .take(std::cmp::min(count as usize - 1, max_depth)) .collect::>() @@ -1682,22 +2155,22 @@ mod tests { assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); - assert_eq!(tree.select_children(&[], 1, |_| true), vec![candidate_a_hash],); + assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); assert_eq!( - tree.select_children(&[], 2, |_| true), + tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![candidate_a_hash, candidate_b_hash], ); assert_eq!( - tree.select_children(&[], 3, |_| true), + tree.find_backable_chain(Ancestors::new(), 3, |_| true), vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[candidate_a_hash], 2, |_| true), + tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), vec![candidate_b_hash, candidate_a_hash], ); assert_eq!( - tree.select_children(&[], 6, |_| true), + tree.find_backable_chain(Ancestors::new(), 6, |_| true), vec![ candidate_a_hash, candidate_b_hash, @@ -1706,10 +2179,17 @@ mod tests { candidate_a_hash ], ); - assert_eq!( - tree.select_children(&[candidate_a_hash, candidate_b_hash], 6, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash,], - ); + + for count in 3..7 { + assert_eq!( + tree.find_backable_chain( + [candidate_a_hash, candidate_b_hash].into_iter().collect(), + count, + |_| true + ), + vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + ); + } } #[test] diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 5937a1c1fb9f..2b14e09b4fb4 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -35,7 +35,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, + Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -150,16 +150,9 @@ async fn run_iteration( relay_parent, para, count, - required_path, + ancestors, tx, - ) => answer_get_backable_candidates( - &view, - relay_parent, - para, - count, - required_path, - tx, - ), + ) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx), ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => @@ -565,7 +558,7 @@ fn answer_get_backable_candidates( relay_parent: Hash, para: ParaId, count: u32, - required_path: Vec, + ancestors: Ancestors, tx: oneshot::Sender>, ) { let data = match view.active_leaves.get(&relay_parent) { @@ -614,7 +607,7 @@ fn answer_get_backable_candidates( }; let backable_candidates: Vec<_> = tree - .select_children(&required_path, count, |candidate| storage.is_backed(candidate)) + .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( @@ -635,7 +628,7 @@ fn answer_get_backable_candidates( if backable_candidates.is_empty() { gum::trace!( target: LOG_TARGET, - ?required_path, + ?ancestors, para_id = ?para, %relay_parent, "Could not find any backable candidate", diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 732736b101de..0beddbf1416a 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -407,7 +407,7 @@ async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, count: u32, expected_result: Vec<(CandidateHash, Hash)>, ) { @@ -415,11 +415,7 @@ async fn get_backable_candidates( virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::GetBackableCandidates( - leaf.hash, - para_id, - count, - required_path, - tx, + leaf.hash, para_id, count, ancestors, tx, ), }) .await; @@ -903,7 +899,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -912,12 +908,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), 0, vec![], ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 1.into(), vec![], 0, vec![]).await; // Second candidates. second_candidate(&mut virtual_overseer, candidate_a.clone()).await; @@ -928,7 +932,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -939,12 +943,20 @@ fn check_backable_query_single_candidate() { back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]).await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -955,7 +967,7 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -964,20 +976,20 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) .await; - // Should not get anything at the wrong path. + // Wrong path get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; @@ -1075,15 +1087,29 @@ fn check_backable_query_multiple_candidates() { make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 2.into(), - vec![candidate_hash_a], + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), 1, vec![], ) @@ -1097,7 +1123,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) @@ -1106,7 +1132,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![], + Ancestors::new(), 4, vec![ (candidate_hash_a, leaf_a.hash), @@ -1124,7 +1150,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_a.hash)], ) @@ -1133,16 +1159,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 3, vec![ (candidate_hash_b, leaf_a.hash), @@ -1159,7 +1176,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), count, vec![ (candidate_hash_c, leaf_a.hash), @@ -1172,26 +1189,30 @@ fn check_backable_query_multiple_candidates() { } } - // required path of 2 + // required path of 2 and higher { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b], + vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c] + .into_iter() + .collect(), 1, - vec![(candidate_hash_d, leaf_a.hash)], + vec![(candidate_hash_j, leaf_a.hash)], ) .await; + get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), 1, - vec![(candidate_hash_h, leaf_a.hash)], + vec![(candidate_hash_d, leaf_a.hash)], ) .await; + // If the requested count exceeds the largest chain, return the longest // chain we can get. for count in 4..10 { @@ -1199,7 +1220,7 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c], + vec![candidate_hash_a, candidate_hash_c].into_iter().collect(), count, vec![ (candidate_hash_h, leaf_a.hash), @@ -1213,317 +1234,127 @@ fn check_backable_query_multiple_candidates() { // No more candidates in any chain. { - let required_paths = vec![ - vec![candidate_hash_a, candidate_hash_b, candidate_hash_e], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } + for count in 1..4 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_e] + .into_iter() + .collect(), + count, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![ + candidate_hash_a, + candidate_hash_c, + candidate_hash_h, + candidate_hash_i, + candidate_hash_j, + ] + .into_iter() + .collect(), + count, + vec![], + ) + .await; } } - // Should not get anything at the wrong path. + // Wrong paths. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b], + vec![candidate_hash_b].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_a, leaf_a.hash)], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], + vec![candidate_hash_b, candidate_hash_f].into_iter().collect(), 3, - vec![], + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_c], - 3, - vec![], + vec![candidate_hash_a, candidate_hash_h].into_iter().collect(), + 4, + vec![ + (candidate_hash_c, leaf_a.hash), + (candidate_hash_h, leaf_a.hash), + (candidate_hash_i, leaf_a.hash), + (candidate_hash_j, leaf_a.hash), + ], ) .await; - - virtual_overseer - }); - - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 10 candidates and 7 parents on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); - } - - // A tree with multiple roots. - // Parachain 1 looks like this: - // (imaginary root) - // | | - // +----B---+ A - // | | | | - // | | | C - // D E F | - // | H - // G | - // I - // | - // J - { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - - // Candidate B - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let candidate_hash_b = candidate_b.hash(); - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + vec![candidate_hash_e, candidate_hash_h].into_iter().collect(), + 2, + vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)], + ) + .await; - // Candidate A - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![1]), - test_state.validation_code_hash, - ); - let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - - let (candidate_c, candidate_hash_c) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3); - let (_candidate_d, candidate_hash_d) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4); - let (_candidate_e, candidate_hash_e) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5); - let (candidate_f, candidate_hash_f) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6); - let (_candidate_g, candidate_hash_g) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7); - let (candidate_h, candidate_hash_h) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8); - let (candidate_i, candidate_hash_i) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9); - let (_candidate_j, candidate_hash_j) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); + vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), + 2, + vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)], + ) + .await; - // Should not get any backable candidates for the other para. - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 1, vec![]) - .await; - get_backable_candidates(&mut virtual_overseer, &leaf_a, 2.into(), vec![], 5, vec![]) - .await; + // Parachain fork. get_backable_candidates( &mut virtual_overseer, &leaf_a, - 2.into(), - vec![candidate_hash_a], + 1.into(), + vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(), 1, vec![], ) .await; - // Test various scenarios with various counts. - - // empty required_path - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 1, - vec![(candidate_hash_b, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![], - 4, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - ], - ) - .await; - } - - // required path of 1 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 1, - vec![(candidate_hash_c, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - 1, - vec![(candidate_hash_d, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a], - 2, - vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_h, leaf_a.hash)], - ) - .await; - - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 2..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b], - count, - vec![(candidate_hash_f, leaf_a.hash), (candidate_hash_g, leaf_a.hash)], - ) - .await; - } - } - - // required path of 2 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_b, candidate_hash_f], - 1, - vec![(candidate_hash_g, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - 1, - vec![(candidate_hash_h, leaf_a.hash)], - ) - .await; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c], - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } - - // No more candidates in any chain. - { - let required_paths = vec![ - vec![candidate_hash_b, candidate_hash_f, candidate_hash_g], - vec![candidate_hash_b, candidate_hash_e], - vec![candidate_hash_b, candidate_hash_d], - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ], - ]; - for path in required_paths { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - path.clone(), - count, - vec![], - ) - .await; - } - } - } + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] + .into_iter() + .collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + ) + .await; - // Should not get anything at the wrong path. + // Requested count is zero. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_d], - 1, + Ancestors::new(), + 0, vec![], ) .await; @@ -1531,8 +1362,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_a], - 3, + vec![candidate_hash_a].into_iter().collect(), + 0, vec![], ) .await; @@ -1540,8 +1371,8 @@ fn check_backable_query_multiple_candidates() { &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_c, candidate_hash_d], - 3, + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 0, vec![], ) .await; @@ -1853,8 +1684,8 @@ fn check_pvd_query() { assert_eq!(view.candidate_storage.len(), 2); } -// Test simultaneously activating and deactivating leaves, and simultaneously deactivating multiple -// leaves. +// Test simultaneously activating and deactivating leaves, and simultaneously deactivating +// multiple leaves. #[test] fn correctly_updates_leaves() { let test_state = TestState::default(); @@ -2048,7 +1879,7 @@ fn persists_pending_availability_candidate() { &mut virtual_overseer, &leaf_b, para_id, - vec![candidate_hash_a], + vec![candidate_hash_a].into_iter().collect(), 1, vec![(candidate_hash_b, leaf_b_hash)], ) @@ -2113,7 +1944,7 @@ fn backwards_compatible() { &mut virtual_overseer, &leaf_a, para_id, - vec![], + Ancestors::new(), 1, vec![(candidate_hash_a, candidate_relay_parent)], ) @@ -2135,7 +1966,15 @@ fn backwards_compatible() { ) .await; - get_backable_candidates(&mut virtual_overseer, &leaf_b, para_id, vec![], 1, vec![]).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::new(), + 1, + vec![], + ) + .await; virtual_overseer }); @@ -2162,13 +2001,13 @@ fn uses_ancestry_only_within_session() { .await; assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) - ) if parent == hash => { - tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); - } - ); + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx)) + ) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len + })).unwrap(); } + ); assert_matches!( virtual_overseer.recv().await, diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 24cdfd6b57b3..2a09e2b5b2cc 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -20,9 +20,11 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } +rstest = "0.18.2" diff --git a/polkadot/node/core/provisioner/src/error.rs b/polkadot/node/core/provisioner/src/error.rs index 376d69f276fc..aae3234c3cc4 100644 --- a/polkadot/node/core/provisioner/src/error.rs +++ b/polkadot/node/core/provisioner/src/error.rs @@ -44,14 +44,17 @@ pub enum Error { #[error("failed to get block number")] CanceledBlockNumber(#[source] oneshot::Canceled), + #[error("failed to get session index")] + CanceledSessionIndex(#[source] oneshot::Canceled), + #[error("failed to get backed candidates")] CanceledBackedCandidates(#[source] oneshot::Canceled), #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), - #[error("failed to get backable candidate from prospective parachains")] - CanceledBackableCandidate(#[source] oneshot::Canceled), + #[error("failed to get backable candidates from prospective parachains")] + CanceledBackableCandidates(#[source] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), @@ -71,11 +74,6 @@ pub enum Error { #[error("failed to send return message with Inherents")] InherentDataReturnChannel, - #[error( - "backed candidate does not correspond to selected candidate; check logic in provisioner" - )] - BackedCandidateOrderingProblem, - #[fatal] #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index a29cf72afb14..c9ed873d3c25 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -24,26 +24,29 @@ use futures::{ channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered, FutureExt, }; use futures_timer::Delay; +use schnellru::{ByLength, LruMap}; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, ProvisionableData, - ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, + Ancestors, CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage, + ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ has_required_runtime, request_availability_cores, request_persisted_validation_data, - runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + request_session_index_for_child, + runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode}, TimeoutExt, }; use polkadot_primitives::{ - BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, Hash, Id as ParaId, - OccupiedCoreAssumption, SignedAvailabilityBitfield, ValidatorIndex, + vstaging::{node_features::FeatureIndex, NodeFeatures}, + BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, CoreState, Hash, + Id as ParaId, OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; -use std::collections::{BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; mod disputes; mod error; @@ -77,11 +80,18 @@ impl ProvisionerSubsystem { } } +/// Per-session info we need for the provisioner subsystem. +pub struct PerSession { + prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, +} + /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, backed_candidates: Vec, prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, @@ -89,13 +99,14 @@ pub struct PerRelayParent { } impl PerRelayParent { - fn new(leaf: ActivatedLeaf, prospective_parachains_mode: ProspectiveParachainsMode) -> Self { + fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self { let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); Self { leaf, backed_candidates: Vec::new(), - prospective_parachains_mode, + prospective_parachains_mode: per_session.prospective_parachains_mode, + elastic_scaling_mvp: per_session.elastic_scaling_mvp, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -124,10 +135,17 @@ impl ProvisionerSubsystem { async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { let mut inherent_delays = InherentDelays::new(); let mut per_relay_parent = HashMap::new(); + let mut per_session = LruMap::new(ByLength::new(2)); loop { - let result = - run_iteration(&mut ctx, &mut per_relay_parent, &mut inherent_delays, &metrics).await; + let result = run_iteration( + &mut ctx, + &mut per_relay_parent, + &mut per_session, + &mut inherent_delays, + &metrics, + ) + .await; match result { Ok(()) => break, @@ -142,6 +160,7 @@ async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { async fn run_iteration( ctx: &mut Context, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, metrics: &Metrics, ) -> Result<(), Error> { @@ -151,7 +170,7 @@ async fn run_iteration( // Map the error to ensure that the subsystem exits when the overseer is gone. match from_overseer.map_err(Error::OverseerExited)? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(ctx.sender(), update, per_relay_parent, inherent_delays).await?, + handle_active_leaves_update(ctx.sender(), update, per_relay_parent, per_session, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -183,6 +202,7 @@ async fn handle_active_leaves_update( sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, + per_session: &mut LruMap, inherent_delays: &mut InherentDelays, ) -> Result<(), Error> { gum::trace!(target: LOG_TARGET, "Handle ActiveLeavesUpdate"); @@ -191,10 +211,31 @@ async fn handle_active_leaves_update( } if let Some(leaf) = update.activated { + let session_index = request_session_index_for_child(leaf.hash, sender) + .await + .await + .map_err(Error::CanceledSessionIndex)??; + if per_session.get(&session_index).is_none() { + let prospective_parachains_mode = + prospective_parachains_mode(sender, leaf.hash).await?; + let elastic_scaling_mvp = request_node_features(leaf.hash, session_index, sender) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + per_session.insert( + session_index, + PerSession { prospective_parachains_mode, elastic_scaling_mvp }, + ); + } + + let session_info = per_session.get(&session_index).expect("Just inserted"); + gum::trace!(target: LOG_TARGET, leaf_hash=?leaf.hash, "Adding delay"); - let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, session_info)); inherent_delays.push(delay_fut); } @@ -253,6 +294,7 @@ async fn send_inherent_data_bg( let signed_bitfields = per_relay_parent.signed_bitfields.clone(); let backed_candidates = per_relay_parent.backed_candidates.clone(); let mode = per_relay_parent.prospective_parachains_mode; + let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp; let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -272,6 +314,7 @@ async fn send_inherent_data_bg( &signed_bitfields, &backed_candidates, mode, + elastic_scaling_mvp, return_senders, &mut sender, &metrics, @@ -383,6 +426,7 @@ async fn send_inherent_data( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -434,6 +478,7 @@ async fn send_inherent_data( &bitfields, candidates, prospective_parachains_mode, + elastic_scaling_mvp, leaf.hash, from_job, ) @@ -558,6 +603,8 @@ async fn select_candidate_hashes_from_tracked( let mut selected_candidates = Vec::with_capacity(candidates.len().min(availability_cores.len())); + let mut selected_parachains = + HashSet::with_capacity(candidates.len().min(availability_cores.len())); gum::debug!( target: LOG_TARGET, @@ -591,6 +638,12 @@ async fn select_candidate_hashes_from_tracked( CoreState::Free => continue, }; + if selected_parachains.contains(&scheduled_core.para_id) { + // We already picked a candidate for this parachain. Elastic scaling only works with + // prospective parachains mode. + continue + } + let validation_data = match request_persisted_validation_data( relay_parent, scheduled_core.para_id, @@ -624,6 +677,7 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); + selected_parachains.insert(candidate.descriptor.para_id); selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); } } @@ -637,70 +691,93 @@ async fn select_candidate_hashes_from_tracked( /// Should be called when prospective parachains are enabled. async fn request_backable_candidates( availability_cores: &[CoreState], + elastic_scaling_mvp: bool, bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; - let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + // Record how many cores are scheduled for each paraid. Use a BTreeMap because + // we'll need to iterate through them. + let mut scheduled_cores: BTreeMap = BTreeMap::new(); + // The on-chain ancestors of a para present in availability-cores. + let mut ancestors: HashMap = + HashMap::with_capacity(availability_cores.len()); for (core_idx, core) in availability_cores.iter().enumerate() { - let (para_id, required_path) = match core { + let core_idx = CoreIndex(core_idx as u32); + match core { CoreState::Scheduled(scheduled_core) => { - // The core is free, pick the first eligible candidate from - // the fragment tree. - (scheduled_core.para_id, Vec::new()) + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; }, CoreState::Occupied(occupied_core) => { - if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) - { + let is_available = bitfields_indicate_availability( + core_idx.0 as usize, + bitfields, + &occupied_core.availability, + ); + + if is_available { + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { - // The candidate occupying the core is available, choose its - // child in the fragment tree. - // - // TODO: doesn't work for on-demand parachains. We lean hard on the - // assumption that cores are fixed to specific parachains within a session. - // https://github.com/paritytech/polkadot/issues/5492 - (scheduled_core.para_id, vec![occupied_core.candidate_hash]) - } else { - continue - } - } else { - if occupied_core.time_out_at != block_number { - continue + // Request a new backable candidate for the newly scheduled para id. + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else if occupied_core.time_out_at <= block_number { + // Timed out before being available. + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { // Candidate's availability timed out, practically same as scheduled. - (scheduled_core.para_id, Vec::new()) - } else { - continue + *scheduled_cores.entry(scheduled_core.para_id).or_insert(0) += 1; } + } else { + // Not timed out and not available. + ancestors + .entry(occupied_core.para_id()) + .or_default() + .insert(occupied_core.candidate_hash); } }, CoreState::Free => continue, }; + } - // We should be calling this once per para rather than per core. - // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233. - // For now, at least make sure we don't supply the same candidate multiple times in case a - // para has multiple cores scheduled. - let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match response { - Some((hash, relay_parent)) => { - if !selected_candidates.iter().any(|bc| &(hash, relay_parent) == bc) { - selected_candidates.push((hash, relay_parent)) - } - }, - None => { - gum::debug!( - target: LOG_TARGET, - leaf_hash = ?relay_parent, - core = core_idx, - "No backable candidate returned by prospective parachains", - ); - }, + let mut selected_candidates: Vec<(CandidateHash, Hash)> = + Vec::with_capacity(availability_cores.len()); + + for (para_id, core_count) in scheduled_cores { + let para_ancestors = ancestors.remove(¶_id).unwrap_or_default(); + + // If elastic scaling MVP is disabled, only allow one candidate per parachain. + if !elastic_scaling_mvp && core_count > 1 { + continue } + + let response = get_backable_candidates( + relay_parent, + para_id, + para_ancestors, + core_count as u32, + sender, + ) + .await?; + + if response.is_empty() { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + ?para_id, + "No backable candidate returned by prospective parachains", + ); + continue + } + + selected_candidates.extend(response.into_iter().take(core_count)); } Ok(selected_candidates) @@ -713,6 +790,7 @@ async fn select_candidates( bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], prospective_parachains_mode: ProspectiveParachainsMode, + elastic_scaling_mvp: bool, relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { @@ -722,7 +800,14 @@ async fn select_candidates( let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => - request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, + request_backable_candidates( + availability_cores, + elastic_scaling_mvp, + bitfields, + relay_parent, + sender, + ) + .await?, ProspectiveParachainsMode::Disabled => select_candidate_hashes_from_tracked( availability_cores, @@ -745,24 +830,6 @@ async fn select_candidates( gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent, "Got {} backed candidates", candidates.len()); - // `selected_candidates` is generated in ascending order by core index, and - // `GetBackedCandidates` _should_ preserve that property, but let's just make sure. - // - // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected - // candidate maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by - // checking them in order, we can ensure that the backed candidates are also in order. - let mut backed_idx = 0; - for selected in selected_candidates { - if selected.0 == - candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() - { - backed_idx += 1; - } - } - if candidates.len() != backed_idx { - Err(Error::BackedCandidateOrderingProblem)?; - } - // keep only one candidate with validation code. let mut with_validation_code = false; candidates.retain(|c| { @@ -804,28 +871,27 @@ async fn get_block_number_under_construction( } } -/// Requests backable candidate from Prospective Parachains based on -/// the given path in the fragment tree. -async fn get_backable_candidate( +/// Requests backable candidates from Prospective Parachains based on +/// the given ancestors in the fragment tree. The ancestors may not be ordered. +async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, - required_path: Vec, + ancestors: Ancestors, + count: u32, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para_id, - 1, // core count hardcoded to 1, until elastic scaling is implemented and enabled. - required_path, + count, + ancestors, tx, )) .await; - rx.await - .map_err(Error::CanceledBackableCandidate) - .map(|res| res.get(0).copied()) + rx.await.map_err(Error::CanceledBackableCandidates) } /// The availability bitfield for a given core is the transpose diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index 87c0e7a65d35..bdb4f85f4009 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -22,6 +22,9 @@ use polkadot_primitives::{OccupiedCore, ScheduledCore}; const MOCK_GROUP_SIZE: usize = 5; pub fn occupied_core(para_id: u32) -> CoreState { + let mut candidate_descriptor = dummy_candidate_descriptor(dummy_hash()); + candidate_descriptor.para_id = para_id.into(); + CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), next_up_on_available: None, @@ -29,7 +32,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { time_out_at: 200_u32, next_up_on_time_out: None, availability: bitvec![u8, bitvec::order::Lsb0; 0; 32], - candidate_descriptor: dummy_candidate_descriptor(dummy_hash()), + candidate_descriptor, candidate_hash: Default::default(), }) } @@ -254,10 +257,56 @@ mod select_candidates { use polkadot_primitives::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; + use rstest::rstest; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; - // For test purposes, we always return this set of availability cores: + fn dummy_candidate_template() -> CandidateReceipt { + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + } + } + + fn make_candidates( + core_count: usize, + expected_backed_indices: Vec, + ) -> (Vec, Vec) { + let candidate_template = dummy_candidate_template(); + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(core_count) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate + }) + .collect(); + + let expected_backed = expected_backed_indices + .iter() + .map(|&idx| candidates[idx].clone()) + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) + }) + .collect(); + let candidate_hashes = candidates.into_iter().map(|c| c.hash()).collect(); + + (candidate_hashes, expected_backed) + } + + // For testing only one core assigned to a parachain, we return this set of availability cores: // // [ // 0: Free, @@ -273,7 +322,7 @@ mod select_candidates { // 10: Occupied(both next_up set, not available, timeout), // 11: Occupied(next_up_on_available and available, but different successor para_id) // ] - fn mock_availability_cores() -> Vec { + fn mock_availability_cores_one_per_para() -> Vec { use std::ops::Not; use CoreState::{Free, Scheduled}; @@ -292,6 +341,7 @@ mod select_candidates { build_occupied_core(4, |core| { core.next_up_on_available = Some(scheduled_core(4)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); }), // 5: Occupied(next_up_on_time_out set but not timeout), build_occupied_core(5, |core| { @@ -307,12 +357,14 @@ mod select_candidates { build_occupied_core(7, |core| { core.next_up_on_time_out = Some(scheduled_core(7)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); }), // 8: Occupied(both next_up set, available), build_occupied_core(8, |core| { core.next_up_on_available = Some(scheduled_core(8)); core.next_up_on_time_out = Some(scheduled_core(8)); core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); }), // 9: Occupied(both next_up set, not available, no timeout), build_occupied_core(9, |core| { @@ -324,6 +376,7 @@ mod select_candidates { core.next_up_on_available = Some(scheduled_core(10)); core.next_up_on_time_out = Some(scheduled_core(10)); core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); }), // 11: Occupied(next_up_on_available and available, but different successor para_id) build_occupied_core(11, |core| { @@ -333,20 +386,189 @@ mod select_candidates { ] } + // For test purposes with multiple possible cores assigned to a para, we always return this set + // of availability cores: + fn mock_availability_cores_multiple_per_para() -> Vec { + use std::ops::Not; + use CoreState::{Free, Scheduled}; + + vec![ + // 0: Free, + Free, + // 1: Scheduled(default), + Scheduled(scheduled_core(1)), + // 2: Occupied(no next_up set), + occupied_core(2), + // 3: Occupied(next_up_on_available set but not available), + build_occupied_core(3, |core| { + core.next_up_on_available = Some(scheduled_core(3)); + }), + // 4: Occupied(next_up_on_available set and available), + build_occupied_core(4, |core| { + core.next_up_on_available = Some(scheduled_core(4)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(41)); + }), + // 5: Occupied(next_up_on_time_out set but not timeout), + build_occupied_core(5, |core| { + core.next_up_on_time_out = Some(scheduled_core(5)); + }), + // 6: Occupied(next_up_on_time_out set and timeout but available), + build_occupied_core(6, |core| { + core.next_up_on_time_out = Some(scheduled_core(6)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.availability = core.availability.clone().not(); + }), + // 7: Occupied(next_up_on_time_out set and timeout and not available), + build_occupied_core(7, |core| { + core.next_up_on_time_out = Some(scheduled_core(7)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(71)); + }), + // 8: Occupied(both next_up set, available), + build_occupied_core(8, |core| { + core.next_up_on_available = Some(scheduled_core(8)); + core.next_up_on_time_out = Some(scheduled_core(8)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(81)); + }), + // 9: Occupied(both next_up set, not available, no timeout), + build_occupied_core(9, |core| { + core.next_up_on_available = Some(scheduled_core(9)); + core.next_up_on_time_out = Some(scheduled_core(9)); + }), + // 10: Occupied(both next_up set, not available, timeout), + build_occupied_core(10, |core| { + core.next_up_on_available = Some(scheduled_core(10)); + core.next_up_on_time_out = Some(scheduled_core(10)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(101)); + }), + // 11: Occupied(next_up_on_available and available, but different successor para_id) + build_occupied_core(11, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + }), + // 12-14: Occupied(next_up_on_available and available, same para_id). + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(121)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(122)); + }), + build_occupied_core(12, |core| { + core.next_up_on_available = Some(scheduled_core(12)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(123)); + }), + // 15: Scheduled on same para_id as 12-14. + Scheduled(scheduled_core(12)), + // 16: Occupied(13, no next_up set, not available) + build_occupied_core(13, |core| { + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(131)); + }), + // 17: Occupied(13, no next_up set, available) + build_occupied_core(13, |core| { + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(132)); + }), + // 18: Occupied(13, next_up_on_available set to 13 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(133)); + }), + // 19: Occupied(13, next_up_on_available set to 13 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(134)); + }), + // 20: Occupied(13, next_up_on_time_out set to 13 but not timeout) + build_occupied_core(13, |core| { + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(135)); + }), + // 21: Occupied(13, next_up_on_available set to 14 and available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(136)); + }), + // 22: Occupied(13, next_up_on_available set to 14 but not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(137)); + }), + // 23: Occupied(13, both next_up set to 14, available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(138)); + }), + // 24: Occupied(13, both next_up set to 14, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(14)); + core.next_up_on_time_out = Some(scheduled_core(14)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1399)); + }), + // 25: Occupied(13, next_up_on_available and available, but successor para_id 15) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(139)); + }), + // 26: Occupied(15, next_up_on_available and available, but successor para_id 13) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(151)); + }), + // 27: Occupied(15, both next_up, both available and timed out) + build_occupied_core(15, |core| { + core.next_up_on_available = Some(scheduled_core(15)); + core.availability = core.availability.clone().not(); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(152)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + }), + // 28: Occupied(13, both next_up set to 13, not available) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1398)); + }), + // 29: Occupied(13, both next_up set to 13, not available, timeout) + build_occupied_core(13, |core| { + core.next_up_on_available = Some(scheduled_core(13)); + core.next_up_on_time_out = Some(scheduled_core(13)); + core.time_out_at = BLOCK_UNDER_PRODUCTION; + core.candidate_hash = CandidateHash(Hash::from_low_u64_be(1397)); + }), + ] + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, - expected: Vec, + mock_availability_cores: Vec, + mut expected: Vec, + mut expected_ancestors: HashMap, Ancestors>, prospective_parachains_mode: ProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; + let mut backed_iter = expected.clone().into_iter(); + + expected.sort_by_key(|c| c.candidate().descriptor.para_id); let mut candidates_iter = expected .iter() .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)); - let mut backed_iter = expected.clone().into_iter(); - while let Some(from_job) = receiver.next().await { match from_job { AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) => @@ -356,7 +578,7 @@ mod select_candidates { PersistedValidationDataReq(_para_id, _assumption, tx), )) => tx.send(Ok(Some(Default::default()))).unwrap(), AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => - tx.send(Ok(mock_availability_cores())).unwrap(), + tx.send(Ok(mock_availability_cores.clone())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( hashes, sender, @@ -373,35 +595,71 @@ mod select_candidates { let _ = sender.send(response); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetBackableCandidates(_, _, count, _, tx), - ) => { - assert_eq!(count, 1); - - match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled { .. } => { - let _ = - tx.send(candidates_iter.next().map_or_else(Vec::new, |c| vec![c])); - }, - ProspectiveParachainsMode::Disabled => - panic!("unexpected prospective parachains request"), - } + ProspectiveParachainsMessage::GetBackableCandidates( + _, + _para_id, + count, + actual_ancestors, + tx, + ), + ) => match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled { .. } => { + assert!(count > 0); + let candidates = + (&mut candidates_iter).take(count as usize).collect::>(); + assert_eq!(candidates.len(), count as usize); + + if !expected_ancestors.is_empty() { + if let Some(expected_required_ancestors) = expected_ancestors.remove( + &(candidates + .clone() + .into_iter() + .take(actual_ancestors.len()) + .map(|(c_hash, _)| c_hash) + .collect::>()), + ) { + assert_eq!(expected_required_ancestors, actual_ancestors); + } else { + assert_eq!(actual_ancestors.len(), 0); + } + } + + let _ = tx.send(candidates); + }, + ProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), }, _ => panic!("Unexpected message: {:?}", from_job), } } + + if let ProspectiveParachainsMode::Enabled { .. } = prospective_parachains_mode { + assert_eq!(candidates_iter.next(), None); + } + assert_eq!(expected_ancestors.len(), 0); } - #[test] - fn can_succeed() { + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn can_succeed(#[case] prospective_parachains_mode: ProspectiveParachainsMode) { test_harness( - |r| mock_overseer(r, Vec::new(), ProspectiveParachainsMode::Disabled), + |r| { + mock_overseer( + r, + Vec::new(), + Vec::new(), + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; select_candidates( &[], &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) @@ -411,22 +669,22 @@ mod select_candidates { ) } - // this tests that only the appropriate candidates get selected. - // To accomplish this, we supply a candidate list containing one candidate per possible core; - // the candidate selection algorithm must filter them to the appropriate set - #[test] - fn selects_correct_candidates() { - let mock_cores = mock_availability_cores(); - - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; - + // Test candidate selection when prospective parachains mode is disabled. + // This tests that only the appropriate candidates get selected when prospective parachains mode + // is disabled. To accomplish this, we supply a candidate list containing one candidate per + // possible core; the candidate selection algorithm must filter them to the appropriate set + #[rstest] + // why those particular indices? see the comments on mock_availability_cores_*() functions. + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], true)] + #[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], false)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], true)] + #[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], false)] + fn test_in_subsystem_selection( + #[case] mock_cores: Vec, + #[case] expected_candidates: Vec, + #[case] elastic_scaling_mvp: bool, + ) { + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) .take(mock_cores.len()) .enumerate() @@ -453,9 +711,8 @@ mod select_candidates { }) .collect(); - // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + expected_candidates.into_iter().map(|idx| candidates[idx].clone()).collect(); let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; let expected_backed = expected_candidates @@ -473,14 +730,24 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { - let result = select_candidates( + let result: Vec = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + elastic_scaling_mvp, Default::default(), &mut tx, ) @@ -498,20 +765,24 @@ mod select_candidates { ) } - #[test] - fn selects_max_one_code_upgrade() { - let mock_cores = mock_availability_cores(); + #[rstest] + #[case(ProspectiveParachainsMode::Disabled)] + #[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})] + fn selects_max_one_code_upgrade( + #[case] prospective_parachains_mode: ProspectiveParachainsMode, + ) { + let mock_cores = mock_availability_cores_one_per_para(); let empty_hash = PersistedValidationData::::default().hash(); // why those particular indices? see the comments on mock_availability_cores() - // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 4, 7, 8, 10]; + // the first candidate with code is included out of [1, 4, 7, 8, 10, 12]. + let cores = [1, 4, 7, 8, 10, 12]; let cores_with_code = [1, 4, 8]; - let expected_cores = [1, 7, 10]; + let expected_cores = [1, 7, 10, 12]; - let committed_receipts: Vec<_> = (0..mock_cores.len()) + let committed_receipts: Vec<_> = (0..=mock_cores.len()) .map(|i| { let mut descriptor = dummy_candidate_descriptor(dummy_hash()); descriptor.para_id = i.into(); @@ -552,23 +823,32 @@ mod select_candidates { let expected_backed_filtered: Vec<_> = expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); - let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &candidates, prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); - assert_eq!(result.len(), 3); + assert_eq!(result.len(), 4); result.into_iter().for_each(|c| { assert!( @@ -581,66 +861,214 @@ mod select_candidates { ) } - #[test] - fn request_from_prospective_parachains() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); + #[rstest] + #[case(true)] + #[case(false)] + fn request_from_prospective_parachains_one_core_per_para(#[case] elastic_scaling_mvp: bool) { + let mock_cores = mock_availability_cores_one_per_para(); - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10, 12]; + let (candidates, expected_candidates) = + make_candidates(mock_cores.len() + 1, expected_candidates); - let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) - .enumerate() - .map(|(idx, mut candidate)| { - candidate.descriptor.para_id = idx.into(); - candidate - }) - .collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates_clone, + required_ancestors, + prospective_parachains_mode, + ) + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + elastic_scaling_mvp, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert_eq!(result.len(), expected_candidates.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp() { + let mock_cores = mock_availability_cores_multiple_per_para(); // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + vec![1, 4, 7, 8, 10, 12, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15]; // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; - let expected_backed = expected_candidates - .iter() - .map(|c| { - BackedCandidate::new( - CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - Vec::new(), - default_bitvec(MOCK_GROUP_SIZE), - None, + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + required_ancestors.insert( + [12, 12, 12].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(121)), + CandidateHash(Hash::from_low_u64_be(122)), + CandidateHash(Hash::from_low_u64_be(123)), + ] + .into_iter() + .collect(), + ); + required_ancestors.insert( + [13, 13, 13].iter().map(|&idx| candidates[idx]).collect::>(), + (131..=139) + .map(|num| CandidateHash(Hash::from_low_u64_be(num))) + .chain(std::iter::once(CandidateHash(Hash::from_low_u64_be(1398)))) + .collect(), + ); + + required_ancestors.insert( + [15, 15].iter().map(|&idx| candidates[idx]).collect::>(), + vec![ + CandidateHash(Hash::from_low_u64_be(151)), + CandidateHash(Hash::from_low_u64_be(152)), + ] + .into_iter() + .collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); + test_harness( + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, ) - }) - .collect(); + }, + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + true, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); + result.into_iter().for_each(|c| { + assert!( + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + + #[test] + fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp_disabled() { + let mock_cores = mock_availability_cores_multiple_per_para(); + + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10]; + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let (candidates, expected_candidates) = + make_candidates(mock_cores.len(), expected_candidates); + + let mut required_ancestors: HashMap, Ancestors> = HashMap::new(); + required_ancestors.insert( + vec![candidates[4]], + vec![CandidateHash(Hash::from_low_u64_be(41))].into_iter().collect(), + ); + required_ancestors.insert( + vec![candidates[8]], + vec![CandidateHash(Hash::from_low_u64_be(81))].into_iter().collect(), + ); + + let mock_cores_clone = mock_cores.clone(); + let expected_candidates_clone = expected_candidates.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_candidates, + required_ancestors, + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) .await .unwrap(); + assert_eq!(result.len(), expected_candidates_clone.len()); result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), + expected_candidates_clone + .iter() + .any(|c2| c.candidate().corresponds_to(&c2.receipt())), "Failed to find candidate: {:?}", c, ) @@ -651,18 +1079,11 @@ mod select_candidates { #[test] fn request_receipts_based_on_relay_parent() { - let mock_cores = mock_availability_cores(); - let empty_hash = PersistedValidationData::::default().hash(); - - let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); - descriptor_template.persisted_validation_data_hash = empty_hash; - let candidate_template = CandidateReceipt { - descriptor: descriptor_template, - commitments_hash: CandidateCommitments::default().hash(), - }; + let mock_cores = mock_availability_cores_one_per_para(); + let candidate_template = dummy_candidate_template(); let candidates: Vec<_> = std::iter::repeat(candidate_template) - .take(mock_cores.len()) + .take(mock_cores.len() + 1) .enumerate() .map(|(idx, mut candidate)| { candidate.descriptor.para_id = idx.into(); @@ -673,7 +1094,7 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + [1, 4, 7, 8, 10, 12].iter().map(|&idx| candidates[idx].clone()).collect(); // Expect prospective parachains subsystem requests. let prospective_parachains_mode = ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; @@ -693,14 +1114,24 @@ mod select_candidates { }) .collect(); + let mock_cores_clone = mock_cores.clone(); test_harness( - |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |r| { + mock_overseer( + r, + mock_cores_clone, + expected_backed, + HashMap::new(), + prospective_parachains_mode, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], &[], prospective_parachains_mode, + false, Default::default(), &mut tx, ) diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 549e43a671d6..f11291fd0ea8 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -1112,6 +1112,9 @@ pub struct ProspectiveValidationDataRequest { /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; +/// A collection of ancestor candidates of a parachain. +pub type Ancestors = HashSet; + /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { @@ -1128,15 +1131,18 @@ pub enum ProspectiveParachainsMessage { /// has been backed. This requires that the candidate was successfully introduced in /// the past. CandidateBacked(ParaId, CandidateHash), - /// Get N backable candidate hashes along with their relay parents for the given parachain, - /// under the given relay-parent hash, which is a descendant of the given candidate hashes. + /// Try getting N backable candidate hashes along with their relay parents for the given + /// parachain, under the given relay-parent hash, which is a descendant of the given ancestors. + /// Timed out ancestors should not be included in the collection. /// N should represent the number of scheduled cores of this ParaId. - /// Returns `None` on the channel if no such candidate exists. + /// A timed out ancestor frees the cores of all of its descendants, so if there's a hole in the + /// supplied ancestor path, we'll get candidates that backfill those timed out slots first. It + /// may also return less/no candidates, if there aren't enough backable candidates recorded. GetBackableCandidates( Hash, ParaId, u32, - Vec, + Ancestors, oneshot::Sender>, ), /// Get the hypothetical frontier membership of candidates with the given properties diff --git a/prdoc/pr_3233.prdoc b/prdoc/pr_3233.prdoc new file mode 100644 index 000000000000..ed4e8cce31f7 --- /dev/null +++ b/prdoc/pr_3233.prdoc @@ -0,0 +1,12 @@ +title: "provisioner: allow multiple cores assigned to the same para" + +topic: Node + +doc: + - audience: Node Dev + description: | + Enable supplying multiple backable candidates to the paras_inherent pallet for the same paraid. + +crates: + - name: polkadot-node-core-prospective-parachains + - name: polkadot-node-core-provisioner