From a59c120ce8d63ae6e0ba0a365d98ea9142a6b770 Mon Sep 17 00:00:00 2001 From: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> Date: Fri, 11 Nov 2022 11:24:40 -0800 Subject: [PATCH 1/8] Brad implementers guide revisions 2 (#6239) * Add disputes subsystems fix * Updated dispute approval vote import reasoning * Improved wording of my changes * Resolving issues brought up in comments --- .../src/node/disputes/README.md | 2 +- .../src/node/disputes/dispute-coordinator.md | 60 ++++++++++--------- 2 files changed, 33 insertions(+), 29 deletions(-) diff --git a/roadmap/implementers-guide/src/node/disputes/README.md b/roadmap/implementers-guide/src/node/disputes/README.md index 7b28b908fbbe..2db891aa216f 100644 --- a/roadmap/implementers-guide/src/node/disputes/README.md +++ b/roadmap/implementers-guide/src/node/disputes/README.md @@ -5,7 +5,7 @@ This section is for the node-side subsystems that lead to participation in dispu * Participation. Participate in active disputes. When a node becomes aware of a dispute, it should recover the data for the disputed block, check the validity of the parablock, and issue a statement regarding the validity of the parablock. * Distribution. Validators should notify each other of active disputes and relevant statements over the network. * Submission. When authoring a block, a validator should inspect the state of the parent block and provide any information about disputes that the chain needs as part of the `ParaInherent`. This should initialize new disputes on-chain as necessary. - * Fork-choice and Finality. When observing a block issuing a `DisputeRollback` digest in the header, that branch of the relay chain should be abandoned all the way back to the indicated block. When voting on chains in GRANDPA, no chains that contain blocks that are or have been disputed should be voted on. + * Fork-choice and Finality. When observing a block issuing a `DisputeRollback` digest in the header, that branch of the relay chain should be abandoned all the way back to the indicated block. When voting on chains in GRANDPA, no chains that contain blocks with active disputes or disputes that concluded invalid should be voted on. ## Components diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index 29a3c5ffa082..c04e15b1a0e9 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -9,8 +9,8 @@ In particular the dispute-coordinator is responsible for: - Ensuring that the node is able to raise a dispute in case an invalid candidate is found during approval checking. -- Ensuring malicious approval votes will be recorded, so nodes can get slashed - properly. +- Ensuring lazy approval votes (votes given without running the parachain + validation function) will be recorded, so lazy nodes can get slashed properly. - Coordinating actual participation in a dispute, ensuring that the node participates in any justified dispute in a way that ensures resolution of disputes on the network even in the case of many disputes raised (flood/DoS @@ -76,24 +76,27 @@ inefficient process. (Quadratic complexity adds up, with 35 votes in total per c Approval votes are very relevant nonetheless as we are going to see in the next section. -## Ensuring Malicious Approval Votes Will Be Recorded +## Ensuring Lazy Approval Votes Will Be Recorded ### Ensuring Recording While there is no need to record approval votes in the dispute coordinator preemptively, we do need to make sure they are recorded when a dispute actually happens. This is because only votes recorded by the dispute -coordinator will be considered for slashing. While the backing group always gets -slashed, a serious attack attempt will likely also consist of malicious approval -checkers which will cast approval votes, although the candidate is invalid. If -we did not import those votes, those nodes would likely cast an `invalid` explicit -vote as part of the dispute in addition to their approval vote and thus avoid a -slash. With the 2/3rd honest assumption it seems unrealistic that malicious -actors will keep sending approval votes once they became aware of a raised -dispute. Hence the most crucial approval votes to import are the early ones -(tranche 0), to take into account network latencies and such we still want to -import approval votes at a later point in time as well (in particular we need to -make sure the dispute can conclude, but more on that later). +coordinator will be considered for slashing. It is sufficient for our +threat model that malicious backers are slashed as opposed to both backers and +approval checkers. However, we still must import approval votes from the approvals +process into the disputes process to ensure that lazy approval checkers +actually run the parachain validation function. Slashing lazy approval checkers is necessary, else we risk a useless approvals process where every approval +checker blindly votes valid for every candidate. If we did not import approval +votes, lazy nodes would likely cast a properly checked explicit vote as part +of the dispute in addition to their blind approval vote and thus avoid a slash. +With the 2/3rd honest assumption it seems unrealistic that lazy approval voters +will keep sending unchecked approval votes once they became aware of a raised +dispute. Hence the most crucial approval votes to import are the early ones +(tranche 0), to take into account network latencies and such we still want to +import approval votes at a later point in time as well (in particular we need +to make sure the dispute can conclude, but more on that later). As mentioned already previously, importing votes is most efficient when batched. At the same time approval voting and disputes are running concurrently so @@ -173,7 +176,7 @@ There are two potential caveats with this though: voting. By distributing our own approval vote we make sure the dispute can conclude regardless how the race ended (we either participate explicitly anyway or we sent our already present approval vote). By importing all - approval votes we make it possible to slash malicious approval voters, even + approval votes we make it possible to slash lazy approval voters, even if they also cast an invalid explicit vote. Conclusion: As long as we make sure, if our own approval vote gets imported @@ -199,11 +202,12 @@ time participation is faster than approval, a node would do double work. ### Ensuring Chain Import While in the previous section we discussed means for nodes to ensure relevant -votes are recorded so attackers get slashed properly, it is crucial to also -discuss the actual chain import. Only if we guarantee that recorded votes will -also get imported on chain (on all potential chains really) we will succeed in -executing slashes. Again approval votes prove to be our weak spot here, but also -backing votes might get missed. +votes are recorded so lazy approval checkers get slashed properly, it is crucial +to also discuss the actual chain import. Only if we guarantee that recorded votes +will also get imported on chain (on all potential chains really) we will succeed +in executing slashes. Particularly we need to make sure backing votes end up on +chain consistantly. In contrast recording and slashing lazy approval voters only +needs to be likely, not certain. Dispute distribution will make sure all explicit dispute votes get distributed among nodes which includes current block producers (current authority set) which @@ -223,14 +227,14 @@ production in the current set - they might only exist on an already abandoned fork. This means a block producer that just joined the set, might not have seen any of them. -For approvals it is even more tricky: Approval voting together with finalization -is a completely off-chain process therefore those protocols don't care about -block production at all. Approval votes only have a guarantee of being -propagated between the nodes that are responsible for finalizing the concerned -blocks. This implies that on an era change the current authority set, will not -necessarily get informed about any approval votes for the previous era. Hence -even if all validators of the previous era successfully recorded all approval -votes in the dispute coordinator, they won't get a chance to put them on chain, +For approvals it is even more tricky and less necessary: Approval voting together +with finalization is a completely off-chain process therefore those protocols +don't care about block production at all. Approval votes only have a guarantee of +being propagated between the nodes that are responsible for finalizing the +concerned blocks. This implies that on an era change the current authority set, +will not necessarily get informed about any approval votes for the previous era. +Hence even if all validators of the previous era successfully recorded all approval +votes in the dispute coordinator, they won't get a chance to put them on chain, hence they won't be considered for slashing. It is important to note, that the essential properties of the system still hold: From 133d4051eff81e7e630a57d2f66bf9913896d931 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Fri, 11 Nov 2022 23:47:29 +0200 Subject: [PATCH 2/8] Update disputes prioritisation in `dispute-coordinator` (#6130) * Scraper processes CandidateBacked events * Change definition of best-effort * Fix `dispute-coordinator` tests * Unit test for dispute filtering * Clarification comment * Add tests * Fix logic If a dispute is not backed, not included and not confirmed we don't participate but we do import votes. * Add metrics for refrained participations * Revert "Add tests" This reverts commit 7b8391a087922ced942cde9cd2b50ff3f633efc0. * Revert "Unit test for dispute filtering" This reverts commit 92ba5fe678214ab360306313a33c781338e600a0. * fix dispute-coordinator tests * Fix scraping * new tests * Small fixes in guide * Apply suggestions from code review Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> * Fix some comments and remove a pointless test * Code review feedback * Clarification comment in tests * Some tests * Reference counted `CandidateHash` in scraper * Proper handling for Backed and Included candidates in scraper Backed candidates which are not included should be kept for a predetermined window of finalized blocks. E.g. if a candidate is backed but not included in block 2, and the window size is 2, the same candidate should be cleaned after block 4 is finalized. Add reference counting for candidates in scraper. A candidate can be added on multiple block heights so we have to make sure we don't clean it prematurely from the scraper. Add tests. * Update comments in tests * Guide update * Fix cleanup logic for `backed_candidates_by_block_number` * Simplify cleanup * Make spellcheck happy * Update tests * Extract candidate backing logic in separate struct * Code review feedback * Treat backed and included candidates in the same fashion * Update some comments * Small improvements in test * spell check * Fix some more comments * clean -> prune * Code review feedback * Reword comment * spelling Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- .../dispute-coordinator/src/initialized.rs | 36 +- node/core/dispute-coordinator/src/lib.rs | 2 +- node/core/dispute-coordinator/src/metrics.rs | 16 + .../src/scraping/candidates.rs | 148 +++++++ .../dispute-coordinator/src/scraping/mod.rs | 120 +++--- .../dispute-coordinator/src/scraping/tests.rs | 250 ++++++++++- node/core/dispute-coordinator/src/tests.rs | 391 ++++++++++++++++-- .../src/node/disputes/dispute-coordinator.md | 9 +- 8 files changed, 873 insertions(+), 99 deletions(-) create mode 100644 node/core/dispute-coordinator/src/scraping/candidates.rs diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 9229f61f3a78..901a6d863ed2 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -869,12 +869,21 @@ impl Initialized { } } + let has_own_vote = new_state.has_own_vote(); + let is_disputed = new_state.is_disputed(); + let has_controlled_indices = !env.controlled_indices().is_empty(); + let is_backed = self.scraper.is_candidate_backed(&candidate_hash); + let is_confirmed = new_state.is_confirmed(); + // We participate only in disputes which are included, backed or confirmed + let allow_participation = is_included || is_backed || is_confirmed; + // Participate in dispute if we did not cast a vote before and actually have keys to cast a - // local vote: - if !new_state.has_own_vote() && - new_state.is_disputed() && - !env.controlled_indices().is_empty() - { + // local vote. Disputes should fall in one of the categories below, otherwise we will refrain + // from participation: + // - `is_included` lands in prioritised queue + // - `is_confirmed` | `is_backed` lands in best effort queue + // We don't participate in disputes on finalized candidates. + if !has_own_vote && is_disputed && has_controlled_indices && allow_participation { let priority = ParticipationPriority::with_priority_if(is_included); gum::trace!( target: LOG_TARGET, @@ -896,6 +905,23 @@ impl Initialized { ) .await; log_error(r)?; + } else { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?is_confirmed, + ?has_own_vote, + ?is_disputed, + ?has_controlled_indices, + ?allow_participation, + ?is_included, + ?is_backed, + "Will not queue participation for candidate" + ); + + if !allow_participation { + self.metrics.on_refrained_participation(); + } } // Also send any already existing approval vote on new disputes: diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 03abd8f59d60..09d6c621b999 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -286,7 +286,7 @@ impl DisputeCoordinatorSubsystem { let mut participation_requests = Vec::new(); let mut unconfirmed_disputes: UnconfirmedDisputes = UnconfirmedDisputes::new(); - let (mut scraper, votes) = ChainScraper::new(ctx.sender(), initial_head).await?; + let (scraper, votes) = ChainScraper::new(ctx.sender(), initial_head).await?; for ((session, ref candidate_hash), status) in active_disputes { let votes: CandidateVotes = match overlay_db.load_candidate_votes(session, candidate_hash) { diff --git a/node/core/dispute-coordinator/src/metrics.rs b/node/core/dispute-coordinator/src/metrics.rs index 1fbe7e49e8b8..70cd49ac49d1 100644 --- a/node/core/dispute-coordinator/src/metrics.rs +++ b/node/core/dispute-coordinator/src/metrics.rs @@ -30,6 +30,8 @@ struct MetricsInner { queued_participations: prometheus::CounterVec, /// How long vote cleanup batches take. vote_cleanup_time: prometheus::Histogram, + /// Number of refrained participations. + refrained_participations: prometheus::Counter, } /// Candidate validation metrics. @@ -88,6 +90,12 @@ impl Metrics { pub(crate) fn time_vote_cleanup(&self) -> Option { self.0.as_ref().map(|metrics| metrics.vote_cleanup_time.start_timer()) } + + pub(crate) fn on_refrained_participation(&self) { + if let Some(metrics) = &self.0 { + metrics.refrained_participations.inc(); + } + } } impl metrics::Metrics for Metrics { @@ -147,6 +155,14 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + refrained_participations: prometheus::register( + prometheus::Counter::with_opts( + prometheus::Opts::new( + "polkadot_parachain_dispute_refrained_participations", + "Number of refrained participations. We refrain from participation if all of the following conditions are met: disputed candidate is not included, not backed and not confirmed.", + ))?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/node/core/dispute-coordinator/src/scraping/candidates.rs b/node/core/dispute-coordinator/src/scraping/candidates.rs new file mode 100644 index 000000000000..2fe797768cc2 --- /dev/null +++ b/node/core/dispute-coordinator/src/scraping/candidates.rs @@ -0,0 +1,148 @@ +use polkadot_primitives::v2::{BlockNumber, CandidateHash}; +use std::collections::{BTreeMap, HashMap, HashSet}; + +/// Keeps `CandidateHash` in reference counted way. +/// Each `insert` saves a value with `reference count == 1` or increases the reference +/// count if the value already exists. +/// Each `remove` decreases the reference count for the corresponding `CandidateHash`. +/// If the reference count reaches 0 - the value is removed. +struct RefCountedCandidates { + candidates: HashMap, +} + +impl RefCountedCandidates { + pub fn new() -> Self { + Self { candidates: HashMap::new() } + } + // If `CandidateHash` doesn't exist in the `HashMap` it is created and its reference + // count is set to 1. + // If `CandidateHash` already exists in the `HashMap` its reference count is increased. + pub fn insert(&mut self, candidate: CandidateHash) { + *self.candidates.entry(candidate).or_default() += 1; + } + + // If a `CandidateHash` with reference count equals to 1 is about to be removed - the + // candidate is dropped from the container too. + // If a `CandidateHash` with reference count biger than 1 is about to be removed - the + // reference count is decreased and the candidate remains in the container. + pub fn remove(&mut self, candidate: &CandidateHash) { + match self.candidates.get_mut(candidate) { + Some(v) if *v > 1 => *v -= 1, + Some(v) => { + assert!(*v == 1); + self.candidates.remove(candidate); + }, + None => {}, + } + } + + pub fn contains(&self, candidate: &CandidateHash) -> bool { + self.candidates.contains_key(&candidate) + } +} + +#[cfg(test)] +mod ref_counted_candidates_tests { + use super::*; + use polkadot_primitives::v2::{BlakeTwo256, HashT}; + + #[test] + fn element_is_removed_when_refcount_reaches_zero() { + let mut container = RefCountedCandidates::new(); + + let zero = CandidateHash(BlakeTwo256::hash(&vec![0])); + let one = CandidateHash(BlakeTwo256::hash(&vec![1])); + // add two separate candidates + container.insert(zero); // refcount == 1 + container.insert(one); + + // and increase the reference count for the first + container.insert(zero); // refcount == 2 + + assert!(container.contains(&zero)); + assert!(container.contains(&one)); + + // remove once -> refcount == 1 + container.remove(&zero); + assert!(container.contains(&zero)); + assert!(container.contains(&one)); + + // remove once -> refcount == 0 + container.remove(&zero); + assert!(!container.contains(&zero)); + assert!(container.contains(&one)); + + // remove the other element + container.remove(&one); + assert!(!container.contains(&zero)); + assert!(!container.contains(&one)); + } +} + +/// Keeps track of scraped candidates. Supports `insert`, `remove_up_to_height` and `contains` +/// operations. +pub struct ScrapedCandidates { + /// Main data structure which keeps the candidates we know about. `contains` does lookups only here. + candidates: RefCountedCandidates, + /// Keeps track at which block number a candidate was inserted. Used in `remove_up_to_height`. + /// Without this tracking we won't be able to remove all candidates before block X. + candidates_by_block_number: BTreeMap>, +} + +impl ScrapedCandidates { + pub fn new() -> Self { + Self { + candidates: RefCountedCandidates::new(), + candidates_by_block_number: BTreeMap::new(), + } + } + + pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + self.candidates.contains(candidate_hash) + } + + // Removes all candidates up to a given height. The candidates at the block height are NOT removed. + pub fn remove_up_to_height(&mut self, height: &BlockNumber) { + let not_stale = self.candidates_by_block_number.split_off(&height); + let stale = std::mem::take(&mut self.candidates_by_block_number); + self.candidates_by_block_number = not_stale; + for candidates in stale.values() { + for c in candidates { + self.candidates.remove(c); + } + } + } + + pub fn insert(&mut self, block_number: BlockNumber, candidate_hash: CandidateHash) { + self.candidates.insert(candidate_hash); + self.candidates_by_block_number + .entry(block_number) + .or_default() + .insert(candidate_hash); + } + + // Used only for tests to verify the pruning doesn't leak data. + #[cfg(test)] + pub fn candidates_by_block_number_is_empty(&self) -> bool { + self.candidates_by_block_number.is_empty() + } +} + +#[cfg(test)] +mod scraped_candidates_tests { + use super::*; + use polkadot_primitives::v2::{BlakeTwo256, HashT}; + + #[test] + fn stale_candidates_are_removed() { + let mut candidates = ScrapedCandidates::new(); + let target = CandidateHash(BlakeTwo256::hash(&vec![1, 2, 3])); + candidates.insert(1, target); + + assert!(candidates.contains(&target)); + + candidates.remove_up_to_height(&2); + assert!(!candidates.contains(&target)); + assert!(candidates.candidates_by_block_number_is_empty()); + } +} diff --git a/node/core/dispute-coordinator/src/scraping/mod.rs b/node/core/dispute-coordinator/src/scraping/mod.rs index 7d5d33e1ff4b..99a6e68cdfb5 100644 --- a/node/core/dispute-coordinator/src/scraping/mod.rs +++ b/node/core/dispute-coordinator/src/scraping/mod.rs @@ -14,10 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{ - collections::{BTreeMap, HashSet}, - num::NonZeroUsize, -}; +use std::num::NonZeroUsize; use futures::channel::oneshot; use lru::LruCache; @@ -40,6 +37,8 @@ use crate::{ #[cfg(test)] mod tests; +mod candidates; + /// Number of hashes to keep in the LRU. /// /// @@ -58,18 +57,21 @@ const LRU_OBSERVED_BLOCKS_CAPACITY: NonZeroUsize = match NonZeroUsize::new(20) { /// /// Concretely: /// -/// - Monitors for inclusion events to keep track of candidates that have been included on chains. +/// - Monitors for `CandidateIncluded` events to keep track of candidates that have been +/// included on chains. +/// - Monitors for `CandidateBacked` events to keep track of all backed candidates. /// - Calls `FetchOnChainVotes` for each block to gather potentially missed votes from chain. /// /// With this information it provides a `CandidateComparator` and as a return value of /// `process_active_leaves_update` any scraped votes. +/// +/// Scraped candidates are available `CANDIDATE_LIFETIME_AFTER_FINALIZATION` more blocks +/// after finalization as a precaution not to prune them prematurely. pub struct ChainScraper { /// All candidates we have seen included, which not yet have been finalized. - included_candidates: HashSet, - /// including block -> `CandidateHash` - /// - /// We need this to clean up `included_candidates` on finalization. - candidates_by_block_number: BTreeMap>, + included_candidates: candidates::ScrapedCandidates, + /// All candidates we have seen backed + backed_candidates: candidates::ScrapedCandidates, /// Latest relay blocks observed by the provider. /// /// We assume that ancestors of cached blocks are already processed, i.e. we have saved @@ -85,6 +87,16 @@ impl ChainScraper { /// As long as we have `MAX_FINALITY_LAG` this makes sense as a value. pub(crate) const ANCESTRY_SIZE_LIMIT: u32 = MAX_FINALITY_LAG; + /// How many blocks after finalization a backed/included candidate should be kept. + /// We don't want to remove scraped candidates on finalization because we want to + /// be sure that disputes will conclude on abandoned forks. + /// Removing the candidate on finalization creates a possibility for an attacker to + /// avoid slashing. If a bad fork is abandoned too quickly because in the same another + /// better one gets finalized the entries for the bad fork will be pruned and we + /// will never participate in a dispute for it. We want such disputes to conclude + /// in a timely manner so that the offenders are slashed. + pub(crate) const CANDIDATE_LIFETIME_AFTER_FINALIZATION: BlockNumber = 2; + /// Create a properly initialized `OrderingProvider`. /// /// Returns: `Self` and any scraped votes. @@ -96,8 +108,8 @@ impl ChainScraper { Sender: overseer::DisputeCoordinatorSenderTrait, { let mut s = Self { - included_candidates: HashSet::new(), - candidates_by_block_number: BTreeMap::new(), + included_candidates: candidates::ScrapedCandidates::new(), + backed_candidates: candidates::ScrapedCandidates::new(), last_observed_blocks: LruCache::new(LRU_OBSERVED_BLOCKS_CAPACITY), }; let update = @@ -107,10 +119,15 @@ impl ChainScraper { } /// Check whether we have seen a candidate included on any chain. - pub fn is_candidate_included(&mut self, candidate_hash: &CandidateHash) -> bool { + pub fn is_candidate_included(&self, candidate_hash: &CandidateHash) -> bool { self.included_candidates.contains(candidate_hash) } + /// Check whether the candidate is backed + pub fn is_candidate_backed(&self, candidate_hash: &CandidateHash) -> bool { + self.backed_candidates.contains(candidate_hash) + } + /// Query active leaves for any candidate `CandidateEvent::CandidateIncluded` events. /// /// and updates current heads, so we can query candidates for all non finalized blocks. @@ -120,7 +137,7 @@ impl ChainScraper { &mut self, sender: &mut Sender, update: &ActiveLeavesUpdate, - ) -> crate::error::Result> + ) -> Result> where Sender: overseer::DisputeCoordinatorSenderTrait, { @@ -158,21 +175,27 @@ impl ChainScraper { /// Prune finalized candidates. /// - /// Once a candidate lives in a relay chain block that's behind the finalized chain/got - /// finalized, we can treat it as low priority. - pub fn process_finalized_block(&mut self, finalized: &BlockNumber) { - let not_finalized = self.candidates_by_block_number.split_off(finalized); - let finalized = std::mem::take(&mut self.candidates_by_block_number); - self.candidates_by_block_number = not_finalized; - // Clean up finalized: - for finalized_candidate in finalized.into_values().flatten() { - self.included_candidates.remove(&finalized_candidate); + /// We keep each candidate for `CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks after finalization. + /// After that we treat it as low priority. + pub fn process_finalized_block(&mut self, finalized_block_number: &BlockNumber) { + // `CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1` because `finalized_block_number`counts to the + // candidate lifetime. + match finalized_block_number.checked_sub(Self::CANDIDATE_LIFETIME_AFTER_FINALIZATION - 1) { + Some(key_to_prune) => { + self.backed_candidates.remove_up_to_height(&key_to_prune); + self.included_candidates.remove_up_to_height(&key_to_prune); + }, + None => { + // Nothing to prune. We are still in the beginning of the chain and there are not + // enough finalized blocks yet. + }, } + {} } /// Process candidate events of a block. /// - /// Keep track of all included candidates. + /// Keep track of all included and backed candidates. async fn process_candidate_events( &mut self, sender: &mut Sender, @@ -182,28 +205,33 @@ impl ChainScraper { where Sender: overseer::DisputeCoordinatorSenderTrait, { - // Get included events: - let included = - get_candidate_events(sender, block_hash) - .await? - .into_iter() - .filter_map(|ev| match ev { - CandidateEvent::CandidateIncluded(receipt, _, _, _) => Some(receipt), - _ => None, - }); - for receipt in included { - let candidate_hash = receipt.hash(); - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - ?block_number, - "Processing included event" - ); - self.included_candidates.insert(candidate_hash); - self.candidates_by_block_number - .entry(block_number) - .or_default() - .insert(candidate_hash); + // Get included and backed events: + for ev in get_candidate_events(sender, block_hash).await? { + match ev { + CandidateEvent::CandidateIncluded(receipt, _, _, _) => { + let candidate_hash = receipt.hash(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?block_number, + "Processing included event" + ); + self.included_candidates.insert(block_number, candidate_hash); + }, + CandidateEvent::CandidateBacked(receipt, _, _, _) => { + let candidate_hash = receipt.hash(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?block_number, + "Processing backed event" + ); + self.backed_candidates.insert(block_number, candidate_hash); + }, + _ => { + // skip the rest + }, + } } Ok(()) } diff --git a/node/core/dispute-coordinator/src/scraping/tests.rs b/node/core/dispute-coordinator/src/scraping/tests.rs index b6b5a1f633bf..6251891af5a3 100644 --- a/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/node/core/dispute-coordinator/src/scraping/tests.rs @@ -73,7 +73,12 @@ impl TestState { assert_finalized_block_number_request(&mut ctx_handle, finalized_block_number).await; gum::trace!(target: LOG_TARGET, "After assert_finalized_block_number"); // No ancestors requests, as list would be empty. - assert_candidate_events_request(&mut ctx_handle, &chain).await; + assert_candidate_events_request( + &mut ctx_handle, + &chain, + get_backed_and_included_candidate_events, + ) + .await; assert_chain_vote_request(&mut ctx_handle, &chain).await; }; @@ -112,6 +117,10 @@ async fn process_active_leaves_update( .unwrap(); } +fn process_finalized_block(scraper: &mut ChainScraper, finalized: &BlockNumber) { + scraper.process_finalized_block(&finalized) +} + fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceipt { let zeros = dummy_hash(); let descriptor = CandidateDescriptor { @@ -145,16 +154,66 @@ fn get_block_number_hash(n: BlockNumber) -> Hash { } /// Get a dummy event that corresponds to candidate inclusion for the given block number. -fn get_candidate_included_events(block_number: BlockNumber) -> Vec { - vec![CandidateEvent::CandidateIncluded( - make_candidate_receipt(get_block_number_hash(block_number)), +fn get_backed_and_included_candidate_events(block_number: BlockNumber) -> Vec { + let candidate_receipt = make_candidate_receipt(get_block_number_hash(block_number)); + vec![ + CandidateEvent::CandidateIncluded( + candidate_receipt.clone(), + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + ] +} + +fn get_backed_candidate_event(block_number: BlockNumber) -> Vec { + let candidate_receipt = make_candidate_receipt(get_block_number_hash(block_number)); + vec![CandidateEvent::CandidateBacked( + candidate_receipt, HeadData::default(), CoreIndex::from(0), GroupIndex::from(0), )] } +/// Hash for a 'magic' candidate. This is meant to be a special candidate used to verify special cases. +fn get_magic_candidate_hash() -> Hash { + BlakeTwo256::hash(&"abc".encode()) +} +/// Get a dummy event that corresponds to candidate inclusion for a hardcoded block number. +/// Used to simulate candidates included multiple times at different block heights. +fn get_backed_and_included_magic_candidate_events( + _block_number: BlockNumber, +) -> Vec { + let candidate_receipt = make_candidate_receipt(get_magic_candidate_hash()); + vec![ + CandidateEvent::CandidateIncluded( + candidate_receipt.clone(), + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData::default(), + CoreIndex::from(0), + GroupIndex::from(0), + ), + ] +} -async fn assert_candidate_events_request(virtual_overseer: &mut VirtualOverseer, chain: &[Hash]) { +async fn assert_candidate_events_request( + virtual_overseer: &mut VirtualOverseer, + chain: &[Hash], + event_generator: F, +) where + F: Fn(u32) -> Vec, +{ assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -163,7 +222,7 @@ async fn assert_candidate_events_request(virtual_overseer: &mut VirtualOverseer, )) => { let maybe_block_number = chain.iter().position(|h| *h == hash); let response = maybe_block_number - .map(|num| get_candidate_included_events(num as u32)) + .map(|num| event_generator(num as u32)) .unwrap_or_default(); tx.send(Ok(response)).unwrap(); } @@ -207,12 +266,15 @@ async fn assert_block_ancestors_request(virtual_overseer: &mut VirtualOverseer, ); } -async fn overseer_process_active_leaves_update( +async fn overseer_process_active_leaves_update( virtual_overseer: &mut VirtualOverseer, chain: &[Hash], finalized_block: BlockNumber, expected_ancestry_len: usize, -) { + event_generator: F, +) where + F: Fn(u32) -> Vec + Clone, +{ // Before walking through ancestors provider requests latest finalized block number. assert_finalized_block_number_request(virtual_overseer, finalized_block).await; // Expect block ancestors requests with respect to the ancestry step. @@ -221,7 +283,7 @@ async fn overseer_process_active_leaves_update( } // For each ancestry and the head return corresponding candidates inclusions. for _ in 0..expected_ancestry_len { - assert_candidate_events_request(virtual_overseer, chain).await; + assert_candidate_events_request(virtual_overseer, chain, event_generator.clone()).await; assert_chain_vote_request(virtual_overseer, chain).await; } } @@ -236,7 +298,9 @@ fn scraper_provides_included_state_when_initialized() { let TestState { mut chain, mut scraper, mut ctx } = state; assert!(!scraper.is_candidate_included(&candidate_2.hash())); + assert!(!scraper.is_candidate_backed(&candidate_2.hash())); assert!(scraper.is_candidate_included(&candidate_1.hash())); + assert!(scraper.is_candidate_backed(&candidate_1.hash())); // After next active leaves update we should see the candidate included. let next_update = next_leaf(&mut chain); @@ -248,11 +312,13 @@ fn scraper_provides_included_state_when_initialized() { &chain, finalized_block_number, expected_ancestry_len, + get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) .await; assert!(scraper.is_candidate_included(&candidate_2.hash())); + assert!(scraper.is_candidate_backed(&candidate_2.hash())); }); } @@ -274,6 +340,7 @@ fn scraper_requests_candidates_of_leaf_ancestors() { &chain, finalized_block_number, BLOCKS_TO_SKIP, + get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) .await; @@ -282,6 +349,7 @@ fn scraper_requests_candidates_of_leaf_ancestors() { for block_number in 1..next_block_number { let candidate = make_candidate_receipt(get_block_number_hash(block_number)); assert!(scraper.is_candidate_included(&candidate.hash())); + assert!(scraper.is_candidate_backed(&candidate.hash())); } }); } @@ -304,6 +372,7 @@ fn scraper_requests_candidates_of_non_cached_ancestors() { &chain, finalized_block_number, BLOCKS_TO_SKIP[0], + get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) .await; @@ -315,6 +384,7 @@ fn scraper_requests_candidates_of_non_cached_ancestors() { &chain, finalized_block_number, BLOCKS_TO_SKIP[1], + get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) .await; @@ -340,8 +410,170 @@ fn scraper_requests_candidates_of_non_finalized_ancestors() { &chain, finalized_block_number, BLOCKS_TO_SKIP - finalized_block_number as usize, // Expect the provider not to go past finalized block. + get_backed_and_included_candidate_events, ); join(process_active_leaves_update(ctx.sender(), &mut ordering, next_update), overseer_fut) .await; }); } + +#[test] +fn scraper_prunes_finalized_candidates() { + const TEST_TARGET_BLOCK_NUMBER: BlockNumber = 2; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 3; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + // 1 because `TestState` starts at leaf 1. + let next_update = (1..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if block_num == TEST_TARGET_BLOCK_NUMBER { + get_backed_and_included_candidate_events(block_num) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); + + // After `CANDIDATE_LIFETIME_AFTER_FINALIZATION` blocks the candidate should be removed + finalized_block_number = + TEST_TARGET_BLOCK_NUMBER + ChainScraper::CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + assert!(!scraper.is_candidate_backed(&candidate.hash())); + assert!(!scraper.is_candidate_included(&candidate.hash())); + }); +} + +#[test] +fn scraper_handles_backed_but_not_included_candidate() { + const TEST_TARGET_BLOCK_NUMBER: BlockNumber = 2; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 3; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + let next_update = (1..BLOCKS_TO_SKIP as BlockNumber) + .map(|_| next_leaf(&mut chain)) + .last() + .unwrap(); + + // Add `ActiveLeavesUpdate` containing `CandidateBacked` event for block `BLOCK_WITH_EVENTS` + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if block_num == TEST_TARGET_BLOCK_NUMBER { + get_backed_candidate_event(block_num) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + // Finalize blocks to enforce pruning of scraped events + finalized_block_number += 1; + process_finalized_block(&mut scraper, &finalized_block_number); + + // `FIRST_TEST_BLOCK` is finalized, which is within `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION` window. + // The candidate should still be backed. + let candidate = make_candidate_receipt(get_block_number_hash(TEST_TARGET_BLOCK_NUMBER)); + assert!(!scraper.is_candidate_included(&candidate.hash())); + assert!(scraper.is_candidate_backed(&candidate.hash())); + + // Bump the finalized block outside `BACKED_CANDIDATE_LIFETIME_AFTER_FINALIZATION`. + // The candidate should be removed. + assert!( + finalized_block_number < + TEST_TARGET_BLOCK_NUMBER + ChainScraper::CANDIDATE_LIFETIME_AFTER_FINALIZATION + ); + finalized_block_number += + TEST_TARGET_BLOCK_NUMBER + ChainScraper::CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + assert!(!scraper.is_candidate_included(&candidate.hash())); + assert!(!scraper.is_candidate_backed(&candidate.hash())); + }); +} + +#[test] +fn scraper_handles_the_same_candidate_incuded_in_two_different_block_heights() { + // Same candidate will be inclued in these two leaves + let test_targets = vec![2, 3]; + + // How many blocks should we skip before sending a leaf update. + const BLOCKS_TO_SKIP: usize = 3; + + futures::executor::block_on(async { + let (state, mut virtual_overseer) = TestState::new().await; + + let TestState { mut chain, mut scraper, mut ctx } = state; + + // 1 because `TestState` starts at leaf 1. + let next_update = (1..BLOCKS_TO_SKIP).map(|_| next_leaf(&mut chain)).last().unwrap(); + + // Now we will add the same magic candidate at two different block heights. + // Check `get_backed_and_included_magic_candidate_event` implementation + let mut finalized_block_number = 1; + let expected_ancestry_len = BLOCKS_TO_SKIP - finalized_block_number as usize; + let overseer_fut = overseer_process_active_leaves_update( + &mut virtual_overseer, + &chain, + finalized_block_number, + expected_ancestry_len, + |block_num| { + if test_targets.contains(&block_num) { + get_backed_and_included_magic_candidate_events(block_num) + } else { + vec![] + } + }, + ); + join(process_active_leaves_update(ctx.sender(), &mut scraper, next_update), overseer_fut) + .await; + + // Finalize blocks to enforce pruning of scraped events. + // The magic candidate was added twice, so it shouldn't be removed if we finalize two more blocks. + finalized_block_number = test_targets.first().expect("there are two block nums") + + ChainScraper::CANDIDATE_LIFETIME_AFTER_FINALIZATION; + process_finalized_block(&mut scraper, &finalized_block_number); + + let magic_candidate = make_candidate_receipt(get_magic_candidate_hash()); + assert!(scraper.is_candidate_backed(&magic_candidate.hash())); + assert!(scraper.is_candidate_included(&magic_candidate.hash())); + + // On the next finalization the magic candidate should be removed + finalized_block_number += 1; + process_finalized_block(&mut scraper, &finalized_block_number); + + assert!(!scraper.is_candidate_backed(&magic_candidate.hash())); + assert!(!scraper.is_candidate_included(&magic_candidate.hash())); + }); +} diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index a1ad315d2ea0..d7208c1a59eb 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -57,10 +57,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; use polkadot_primitives::v2::{ - ApprovalVote, BlockNumber, CandidateCommitments, CandidateHash, CandidateReceipt, - DisputeStatement, GroupIndex, Hash, Header, IndexedVec, MultiDisputeStatementSet, - ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, ValidDisputeStatementKind, - ValidatorId, ValidatorIndex, ValidatorSignature, + ApprovalVote, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CandidateReceipt, CoreIndex, DisputeStatement, GroupIndex, Hash, HeadData, Header, IndexedVec, + MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; use crate::{ @@ -212,6 +212,7 @@ impl TestState { virtual_overseer: &mut VirtualOverseer, session: SessionIndex, block_number: BlockNumber, + candidate_events: Vec, ) { assert!(block_number > 0); @@ -239,8 +240,14 @@ impl TestState { ))) .await; - self.handle_sync_queries(virtual_overseer, block_hash, block_number, session) - .await; + self.handle_sync_queries( + virtual_overseer, + block_hash, + block_number, + session, + candidate_events, + ) + .await; } async fn handle_sync_queries( @@ -249,6 +256,7 @@ impl TestState { block_hash: Hash, block_number: BlockNumber, session: SessionIndex, + candidate_events: Vec, ) { // Order of messages is not fixed (different on initializing): #[derive(Debug)] @@ -352,7 +360,7 @@ impl TestState { _new_leaf, RuntimeApiRequest::CandidateEvents(tx), )) => { - tx.send(Ok(Vec::new())).unwrap(); + tx.send(Ok(candidate_events.clone())).unwrap(); } ); gum::trace!("After answering runtime api request"); @@ -401,8 +409,14 @@ impl TestState { ))) .await; - self.handle_sync_queries(virtual_overseer, *leaf, n as BlockNumber, session) - .await; + self.handle_sync_queries( + virtual_overseer, + *leaf, + n as BlockNumber, + session, + Vec::new(), + ) + .await; } } @@ -556,6 +570,26 @@ fn make_invalid_candidate_receipt() -> CandidateReceipt { dummy_candidate_receipt_bad_sig(Default::default(), Some(Default::default())) } +// Generate a `CandidateBacked` event from a `CandidateReceipt`. The rest is dummy data. +fn make_candidate_backed_event(candidate_receipt: CandidateReceipt) -> CandidateEvent { + CandidateEvent::CandidateBacked( + candidate_receipt, + HeadData(Vec::new()), + CoreIndex(0), + GroupIndex(0), + ) +} + +// Generate a `CandidateIncluded` event from a `CandidateReceipt`. The rest is dummy data. +fn make_candidate_included_event(candidate_receipt: CandidateReceipt) -> CandidateEvent { + CandidateEvent::CandidateIncluded( + candidate_receipt, + HeadData(Vec::new()), + CoreIndex(0), + GroupIndex(0), + ) +} + /// Handle request for approval votes: pub async fn handle_approval_vote_request( ctx_handle: &mut VirtualOverseer, @@ -587,7 +621,9 @@ fn too_many_unconfirmed_statements_are_considered_spam() { let candidate_receipt2 = make_invalid_candidate_receipt(); let candidate_hash2 = candidate_receipt2.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote1 = test_state .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash1, session) @@ -634,8 +670,9 @@ fn too_many_unconfirmed_statements_are_considered_spam() { handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) .await; - // Participation has to fail, otherwise the dispute will be confirmed. - participation_missing_availability(&mut virtual_overseer).await; + // Participation has to fail here, otherwise the dispute will be confirmed. However + // participation won't happen at all because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. { let (tx, rx) = oneshot::channel(); @@ -718,7 +755,9 @@ fn approval_vote_import_works() { let candidate_receipt1 = make_valid_candidate_receipt(); let candidate_hash1 = candidate_receipt1.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote1 = test_state .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash1, session) @@ -762,8 +801,8 @@ fn approval_vote_import_works() { handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, approval_votes) .await; - // Participation has to fail, otherwise the dispute will be confirmed. - participation_missing_availability(&mut virtual_overseer).await; + // Participation won't happen here because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. { let (tx, rx) = oneshot::channel(); @@ -814,7 +853,17 @@ fn dispute_gets_confirmed_via_participation() { let candidate_receipt2 = make_invalid_candidate_receipt(); let candidate_hash2 = candidate_receipt2.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![ + make_candidate_backed_event(candidate_receipt1.clone()), + make_candidate_backed_event(candidate_receipt2.clone()), + ], + ) + .await; let valid_vote1 = test_state .issue_explicit_statement_with_index( @@ -963,7 +1012,9 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { let candidate_receipt2 = make_invalid_candidate_receipt(); let candidate_hash2 = candidate_receipt2.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote1 = test_state .issue_explicit_statement_with_index( @@ -1037,7 +1088,8 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) .await; - participation_missing_availability(&mut virtual_overseer).await; + // Participation won't happen here because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. { let (tx, rx) = oneshot::channel(); @@ -1124,7 +1176,9 @@ fn backing_statements_import_works_and_no_spam() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote1 = test_state .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash, session) @@ -1187,6 +1241,15 @@ fn backing_statements_import_works_and_no_spam() { .issue_backing_statement_with_index(ValidatorIndex(4), candidate_hash, session) .await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; + let (pending_confirmation, confirmation_rx) = oneshot::channel(); // Backing vote import should not have accounted to spam slots, so this should succeed // as well: @@ -1228,7 +1291,14 @@ fn conflicting_votes_lead_to_dispute_participation() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let valid_vote = test_state .issue_explicit_statement_with_index( @@ -1353,7 +1423,14 @@ fn positive_votes_dont_trigger_participation() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let valid_vote = test_state .issue_explicit_statement_with_index( @@ -1466,7 +1543,9 @@ fn wrong_validator_index_is_ignored() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote = test_state .issue_explicit_statement_with_index( @@ -1544,7 +1623,14 @@ fn finality_votes_ignore_disputed_candidates() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let valid_vote = test_state .issue_explicit_statement_with_index( @@ -1654,7 +1740,14 @@ fn supermajority_valid_dispute_may_be_finalized() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let supermajority_threshold = polkadot_primitives::v2::supermajority_threshold(test_state.validators.len()); @@ -1794,7 +1887,14 @@ fn concluded_supermajority_for_non_active_after_time() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let supermajority_threshold = polkadot_primitives::v2::supermajority_threshold(test_state.validators.len()); @@ -1912,7 +2012,14 @@ fn concluded_supermajority_against_non_active_after_time() { let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let supermajority_threshold = polkadot_primitives::v2::supermajority_threshold(test_state.validators.len()); @@ -2036,7 +2143,9 @@ fn resume_dispute_without_local_statement() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote = test_state .issue_explicit_statement_with_index( @@ -2074,7 +2183,8 @@ fn resume_dispute_without_local_statement() { .await; // Missing availability -> No local vote. - participation_missing_availability(&mut virtual_overseer).await; + // Participation won't happen here because the dispute is neither backed, not confirmed + // nor the candidate is included. Or in other words - we'll refrain from participation. assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); @@ -2216,7 +2326,14 @@ fn resume_dispute_with_local_statement() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_backed_event(candidate_receipt.clone())], + ) + .await; let local_valid_vote = test_state .issue_explicit_statement_with_index( @@ -2315,7 +2432,9 @@ fn resume_dispute_without_local_statement_or_local_key() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote = test_state .issue_explicit_statement_with_index( @@ -2408,7 +2527,9 @@ fn resume_dispute_with_local_statement_without_local_key() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let local_valid_vote = test_state .issue_explicit_statement_with_index( @@ -2517,7 +2638,9 @@ fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let other_vote = test_state .issue_explicit_statement_with_index( @@ -2590,7 +2713,9 @@ fn own_approval_vote_gets_distributed_on_dispute() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let statement = test_state.issue_approval_vote_with_index( ValidatorIndex(0), @@ -2681,7 +2806,9 @@ fn negative_issue_local_statement_only_triggers_import() { let candidate_receipt = make_invalid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; virtual_overseer .send(FromOrchestra::Communication { @@ -2729,7 +2856,9 @@ fn redundant_votes_ignored() { let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; let valid_vote = test_state .issue_backing_statement_with_index(ValidatorIndex(1), candidate_hash, session) @@ -2787,3 +2916,195 @@ fn redundant_votes_ignored() { }) }); } + +#[test] +fn refrain_from_participation() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + // activate leaf - no backing/included event + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + // generate two votes + let valid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) + .await; + + let invalid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.len(), 1); + assert_eq!(votes.invalid.len(), 1); + } + + // activate leaf - no backing event + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // confirm that no participation request is made. + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + +/// We have got no `participation_for_backed_candidates` test because most of the other tests (e.g. +/// `dispute_gets_confirmed_via_participation`, `backing_statements_import_works_and_no_spam`) use +/// candidate backing event to trigger participation. If they pass - that case works. +#[test] +fn participation_for_included_candidates() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + + // activate leaf - with candidate included event + test_state + .activate_leaf_at_session( + &mut virtual_overseer, + session, + 1, + vec![make_candidate_included_event(candidate_receipt.clone())], + ) + .await; + + // generate two votes + let valid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) + .await; + + let invalid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (valid_vote, ValidatorIndex(1)), + (invalid_vote, ValidatorIndex(2)), + ], + pending_confirmation: None, + }, + }) + .await; + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + + participation_with_distribution( + &mut virtual_overseer, + &candidate_hash, + candidate_receipt.commitments_hash, + ) + .await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap().len(), 1); + + // check if we have participated (casted a vote) + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.len(), 2); // 2 => we have participated + assert_eq!(votes.invalid.len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + test_state + }) + }); +} diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index c04e15b1a0e9..3d44e210db0e 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -142,7 +142,7 @@ There are two potential caveats with this though: dispute concludes in all cases? The answer is nuanced, but in general we cannot rely on it. The problem is first, that finalization and approval-voting is an off-chain process so there is no global consensus: As - soon as at least f+1 honest (f= n/3, where n is the number of + soon as at least f+1 honest (f=n/3, where n is the number of validators/nodes) nodes have seen the dispute conclude, finalization will take place and approval votes will be cleared. This would still be fine, if we had some guarantees that those honest nodes will be able to include those @@ -214,7 +214,7 @@ among nodes which includes current block producers (current authority set) which is an important property: If the dispute carries on across an era change, we need to ensure that the new validator set will learn about any disputes and their votes, so they can put that information on chain. Dispute-distribution -luckily has this property and sends votes to the current authority set always. +luckily has this property and always sends votes to the current authority set. The issue is, for dispute-distribution, nodes send only their own explicit (or in some cases their approval vote) in addition to some opposing vote. This guarantees that at least some backing or approval vote will be present at the @@ -327,7 +327,10 @@ participation at all on any _vote import_ if any of the following holds true: - We have seen the disputed candidate backed in some not yet finalized block on at least one fork of the chain. This ensures the candidate is at least not completely made up and there has been some effort already flown into that - candidate. + candidate. Generally speaking a dispute shouldn't be raised for a candidate + which is backed but is not yet included. Disputes are raised during approval + checking. We participate on such disputes as a precaution - maybe we haven't + seen the `CandidateIncluded` event yet? - The dispute is already confirmed: Meaning that 1/3+1 nodes already participated, as this suggests in our threat model that there was at least one honest node that already voted, so the dispute must be genuine. From 659297f2d449f59634af0ffb2b8a2aa0b33c7c7c Mon Sep 17 00:00:00 2001 From: Andronik Date: Sat, 12 Nov 2022 12:52:26 +0100 Subject: [PATCH 3/8] approval-voting: remove redundant validation check (#6266) * approval-voting: remove a redundant check * candidate-validation: remove unreachable check --- node/core/approval-voting/src/lib.rs | 20 +++----------------- node/core/candidate-validation/src/lib.rs | 5 ----- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index b96992df2c88..cfebd1065edc 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -2404,28 +2404,14 @@ async fn launch_approval( match val_rx.await { Err(_) => return ApprovalState::failed(validator_index, candidate_hash), - Ok(Ok(ValidationResult::Valid(commitments, _))) => { + Ok(Ok(ValidationResult::Valid(_, _))) => { // Validation checked out. Issue an approval command. If the underlying service is unreachable, // then there isn't anything we can do. gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid"); - let expected_commitments_hash = candidate.commitments_hash; - if commitments.hash() == expected_commitments_hash { - let _ = metrics_guard.take(); - return ApprovalState::approved(validator_index, candidate_hash) - } else { - // Commitments mismatch - issue a dispute. - issue_local_invalid_statement( - &mut sender, - session_index, - candidate_hash, - candidate.clone(), - ); - - metrics_guard.take().on_approval_invalid(); - return ApprovalState::failed(validator_index, candidate_hash) - } + let _ = metrics_guard.take(); + return ApprovalState::approved(validator_index, candidate_hash) }, Ok(Ok(ValidationResult::Invalid(reason))) => { gum::warn!( diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index a82a0feb78a0..7d9db4f3d794 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -467,11 +467,6 @@ where .await; if let Ok(ValidationResult::Valid(ref outputs, _)) = validation_result { - // If validation produces new commitments we consider the candidate invalid. - if candidate_receipt.commitments_hash != outputs.hash() { - return Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) - } - let (tx, rx) = oneshot::channel(); match runtime_api_request( sender, From 9a994265d0229d1f64a8039bc4601748c3d5140a Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sat, 12 Nov 2022 15:14:35 +0100 Subject: [PATCH 4/8] remove fill_block (#6200) Co-authored-by: parity-processbot <> --- runtime/kusama/src/lib.rs | 17 ++++------------- runtime/polkadot/src/lib.rs | 17 ++++------------- 2 files changed, 8 insertions(+), 26 deletions(-) diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index e399d7a0b35e..7ef451e52ea9 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -2046,7 +2046,7 @@ mod tests_fess { #[cfg(test)] mod multiplier_tests { use super::*; - use frame_support::{dispatch::GetDispatchInfo, traits::OnFinalize}; + use frame_support::{dispatch::DispatchInfo, traits::OnFinalize}; use runtime_common::{MinimumMultiplier, TargetBlockFullness}; use separator::Separatable; use sp_runtime::traits::Convert; @@ -2088,17 +2088,8 @@ mod multiplier_tests { let mut blocks = 0; let mut fees_paid = 0; - let call = frame_system::Call::::fill_block { - ratio: Perbill::from_rational( - block_weight.ref_time(), - BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap().ref_time(), - ), - }; - println!("calling {:?}", call); - let info = call.get_dispatch_info(); - // convert to outer call. - let call = RuntimeCall::System(call); - let len = call.using_encoded(|e| e.len()) as u32; + frame_system::Pallet::::set_block_consumed_resources(Weight::MAX, 0); + let info = DispatchInfo { weight: Weight::MAX, ..Default::default() }; let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default() .build_storage::() @@ -2112,7 +2103,7 @@ mod multiplier_tests { while multiplier <= Multiplier::from_u32(1) { t.execute_with(|| { // imagine this tx was called. - let fee = TransactionPayment::compute_fee(len, &info, 0); + let fee = TransactionPayment::compute_fee(0, &info, 0); fees_paid += fee; // this will update the multiplier. diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 8beb551db376..25b142a13be1 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -2299,7 +2299,7 @@ mod test { #[cfg(test)] mod multiplier_tests { use super::*; - use frame_support::{dispatch::GetDispatchInfo, traits::OnFinalize}; + use frame_support::{dispatch::DispatchInfo, traits::OnFinalize}; use runtime_common::{MinimumMultiplier, TargetBlockFullness}; use separator::Separatable; use sp_runtime::traits::Convert; @@ -2341,17 +2341,8 @@ mod multiplier_tests { let mut blocks = 0; let mut fees_paid = 0; - let call = frame_system::Call::::fill_block { - ratio: Perbill::from_rational( - block_weight.ref_time(), - BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap().ref_time(), - ), - }; - println!("calling {:?}", call); - let info = call.get_dispatch_info(); - // convert to outer call. - let call = RuntimeCall::System(call); - let len = call.using_encoded(|e| e.len()) as u32; + frame_system::Pallet::::set_block_consumed_resources(Weight::MAX, 0); + let info = DispatchInfo { weight: Weight::MAX, ..Default::default() }; let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default() .build_storage::() @@ -2365,7 +2356,7 @@ mod multiplier_tests { while multiplier <= Multiplier::from_u32(1) { t.execute_with(|| { // imagine this tx was called. - let fee = TransactionPayment::compute_fee(len, &info, 0); + let fee = TransactionPayment::compute_fee(0, &info, 0); fees_paid += fee; // this will update the multiplier. From 1d4627558d29ada672897d481fd48d729ac0c36e Mon Sep 17 00:00:00 2001 From: Andronik Date: Sun, 13 Nov 2022 20:14:24 +0100 Subject: [PATCH 5/8] fix a compilation warning (#6279) Fixes #6277. --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d6ff78d801e2..4b0e2047bf64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -126,7 +126,6 @@ maintenance = { status = "actively-developed" } # This list is ordered alphabetically. [profile.dev.package] blake2 = { opt-level = 3 } -blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } From 89dc70f9cdeedf4312c2ea67c871d043a92e0148 Mon Sep 17 00:00:00 2001 From: eskimor Date: Mon, 14 Nov 2022 12:41:45 +0100 Subject: [PATCH 6/8] Only report concluded if there is an actual dispute. (#6270) * Only report concluded if there is an actual dispute. Hence no "non"-disputes will be added to disputes anymore. * Fix redundant check. * Test for no onesided disputes. Co-authored-by: eskimor --- node/core/dispute-coordinator/src/import.rs | 116 ++++++++---------- .../dispute-coordinator/src/initialized.rs | 65 +++++----- node/core/dispute-coordinator/src/tests.rs | 62 ++++++++++ node/primitives/src/disputes/message.rs | 3 + node/primitives/src/disputes/status.rs | 27 +++- 5 files changed, 166 insertions(+), 107 deletions(-) diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index 8eb3d173dcf7..c0f0d3d9e009 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -28,7 +28,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; -use polkadot_node_primitives::{CandidateVotes, SignedDisputeStatement}; +use polkadot_node_primitives::{CandidateVotes, DisputeStatus, SignedDisputeStatement, Timestamp}; use polkadot_node_subsystem_util::rolling_session_window::RollingSessionWindow; use polkadot_primitives::v2::{ CandidateReceipt, DisputeStatement, IndexedVec, SessionIndex, SessionInfo, @@ -154,22 +154,8 @@ pub struct CandidateVoteState { /// Information about own votes: own_vote: OwnVoteState, - /// Whether or not the dispute concluded invalid. - concluded_invalid: bool, - - /// Whether or not the dispute concluded valid. - /// - /// Note: Due to equivocations it is technically possible for a dispute to conclude both valid - /// and invalid. In that case the invalid result takes precedence. - concluded_valid: bool, - - /// There is an ongoing dispute and we reached f+1 votes -> the dispute is confirmed - /// - /// as at least one honest validator cast a vote for the candidate. - is_confirmed: bool, - - /// Whether or not we have an ongoing dispute. - is_disputed: bool, + /// Current dispute status, if there is any. + dispute_status: Option, } impl CandidateVoteState { @@ -179,18 +165,11 @@ impl CandidateVoteState { pub fn new_from_receipt(candidate_receipt: CandidateReceipt) -> Self { let votes = CandidateVotes { candidate_receipt, valid: BTreeMap::new(), invalid: BTreeMap::new() }; - Self { - votes, - own_vote: OwnVoteState::NoVote, - concluded_invalid: false, - concluded_valid: false, - is_confirmed: false, - is_disputed: false, - } + Self { votes, own_vote: OwnVoteState::NoVote, dispute_status: None } } /// Create a new `CandidateVoteState` from already existing votes. - pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>, now: Timestamp) -> Self { let own_vote = OwnVoteState::new(&votes, env); let n_validators = env.validators().len(); @@ -198,16 +177,31 @@ impl CandidateVoteState { let supermajority_threshold = polkadot_primitives::v2::supermajority_threshold(n_validators); - let concluded_invalid = votes.invalid.len() >= supermajority_threshold; - let concluded_valid = votes.valid.len() >= supermajority_threshold; - // We have a dispute, if we have votes on both sides: let is_disputed = !votes.invalid.is_empty() && !votes.valid.is_empty(); - let byzantine_threshold = polkadot_primitives::v2::byzantine_threshold(n_validators); - let is_confirmed = votes.voted_indices().len() > byzantine_threshold && is_disputed; + let dispute_status = if is_disputed { + let mut status = DisputeStatus::active(); + let byzantine_threshold = polkadot_primitives::v2::byzantine_threshold(n_validators); + let is_confirmed = votes.voted_indices().len() > byzantine_threshold; + if is_confirmed { + status = status.confirm(); + }; + let concluded_for = votes.valid.len() >= supermajority_threshold; + if concluded_for { + status = status.conclude_for(now); + }; + + let concluded_against = votes.invalid.len() >= supermajority_threshold; + if concluded_against { + status = status.conclude_against(now); + }; + Some(status) + } else { + None + }; - Self { votes, own_vote, concluded_invalid, concluded_valid, is_confirmed, is_disputed } + Self { votes, own_vote, dispute_status } } /// Import fresh statements. @@ -217,6 +211,7 @@ impl CandidateVoteState { self, env: &CandidateEnvironment, statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, + now: Timestamp, ) -> ImportResult { let (mut votes, old_state) = self.into_old_state(); @@ -294,7 +289,7 @@ impl CandidateVoteState { } } - let new_state = Self::new(votes, env); + let new_state = Self::new(votes, env, now); ImportResult { old_state, @@ -313,32 +308,15 @@ impl CandidateVoteState { /// Extract `CandidateVotes` for handling import of new statements. fn into_old_state(self) -> (CandidateVotes, CandidateVoteState<()>) { - let CandidateVoteState { - votes, - own_vote, - concluded_invalid, - concluded_valid, - is_confirmed, - is_disputed, - } = self; - ( - votes, - CandidateVoteState { - votes: (), - own_vote, - concluded_invalid, - concluded_valid, - is_confirmed, - is_disputed, - }, - ) + let CandidateVoteState { votes, own_vote, dispute_status } = self; + (votes, CandidateVoteState { votes: (), own_vote, dispute_status }) } } impl CandidateVoteState { /// Whether or not we have an ongoing dispute. pub fn is_disputed(&self) -> bool { - self.is_disputed + self.dispute_status.is_some() } /// Whether there is an ongoing confirmed dispute. @@ -346,7 +324,7 @@ impl CandidateVoteState { /// This checks whether there is a dispute ongoing and we have more than byzantine threshold /// votes. pub fn is_confirmed(&self) -> bool { - self.is_confirmed + self.dispute_status.map_or(false, |s| s.is_confirmed_concluded()) } /// This machine already cast some vote in that dispute/for that candidate. @@ -359,14 +337,19 @@ impl CandidateVoteState { self.own_vote.approval_votes() } - /// Whether or not this dispute has already enough valid votes to conclude. - pub fn is_concluded_valid(&self) -> bool { - self.concluded_valid + /// Whether or not there is a dispute and it has already enough valid votes to conclude. + pub fn has_concluded_for(&self) -> bool { + self.dispute_status.map_or(false, |s| s.has_concluded_for()) + } + + /// Whether or not there is a dispute and it has already enough invalid votes to conclude. + pub fn has_concluded_against(&self) -> bool { + self.dispute_status.map_or(false, |s| s.has_concluded_against()) } - /// Whether or not this dispute has already enough invalid votes to conclude. - pub fn is_concluded_invalid(&self) -> bool { - self.concluded_invalid + /// Get access to the dispute status, in case there is one. + pub fn dispute_status(&self) -> &Option { + &self.dispute_status } /// Access to underlying votes. @@ -451,18 +434,18 @@ impl ImportResult { } /// Whether or not any dispute just concluded valid due to the import. - pub fn is_freshly_concluded_valid(&self) -> bool { - !self.old_state().is_concluded_valid() && self.new_state().is_concluded_valid() + pub fn is_freshly_concluded_for(&self) -> bool { + !self.old_state().has_concluded_for() && self.new_state().has_concluded_for() } /// Whether or not any dispute just concluded invalid due to the import. - pub fn is_freshly_concluded_invalid(&self) -> bool { - !self.old_state().is_concluded_invalid() && self.new_state().is_concluded_invalid() + pub fn is_freshly_concluded_against(&self) -> bool { + !self.old_state().has_concluded_against() && self.new_state().has_concluded_against() } /// Whether or not any dispute just concluded either invalid or valid due to the import. pub fn is_freshly_concluded(&self) -> bool { - self.is_freshly_concluded_invalid() || self.is_freshly_concluded_valid() + self.is_freshly_concluded_against() || self.is_freshly_concluded_for() } /// Modify this `ImportResult`s, by importing additional approval votes. @@ -473,6 +456,7 @@ impl ImportResult { self, env: &CandidateEnvironment, approval_votes: HashMap, + now: Timestamp, ) -> Self { let Self { old_state, @@ -508,7 +492,7 @@ impl ImportResult { } } - let new_state = CandidateVoteState::new(votes, env); + let new_state = CandidateVoteState::new(votes, env, now); Self { old_state, diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 901a6d863ed2..0df1a620826c 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -748,7 +748,7 @@ impl Initialized { .load_candidate_votes(session, &candidate_hash)? .map(CandidateVotes::from) { - Some(votes) => CandidateVoteState::new(votes, &env), + Some(votes) => CandidateVoteState::new(votes, &env, now), None => if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt { CandidateVoteState::new_from_receipt(candidate_receipt) @@ -766,7 +766,7 @@ impl Initialized { gum::trace!(target: LOG_TARGET, ?candidate_hash, ?session, "Loaded votes"); let import_result = { - let intermediate_result = old_state.import_statements(&env, statements); + let intermediate_result = old_state.import_statements(&env, statements, now); // Handle approval vote import: // @@ -803,7 +803,7 @@ impl Initialized { ); intermediate_result }, - Ok(votes) => intermediate_result.import_approval_votes(&env, votes), + Ok(votes) => intermediate_result.import_approval_votes(&env, votes, now), } } else { gum::trace!( @@ -977,43 +977,34 @@ impl Initialized { } // All good, update recent disputes if state has changed: - if import_result.dispute_state_changed() { - let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); + if let Some(new_status) = new_state.dispute_status() { + // Only bother with db access, if there was an actual change. + if import_result.dispute_state_changed() { + let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); + + let status = + recent_disputes.entry((session, candidate_hash)).or_insert_with(|| { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "New dispute initiated for candidate.", + ); + DisputeStatus::active() + }); + + *status = *new_status; - let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| { - gum::info!( + gum::trace!( target: LOG_TARGET, ?candidate_hash, - session, - "New dispute initiated for candidate.", + ?status, + has_concluded_for = ?new_state.has_concluded_for(), + has_concluded_against = ?new_state.has_concluded_against(), + "Writing recent disputes with updates for candidate" ); - DisputeStatus::active() - }); - - if new_state.is_confirmed() { - *status = status.confirm(); + overlay_db.write_recent_disputes(recent_disputes); } - - // Note: concluded-invalid overwrites concluded-valid, - // so we do this check first. Dispute state machine is - // non-commutative. - if new_state.is_concluded_valid() { - *status = status.concluded_for(now); - } - - if new_state.is_concluded_invalid() { - *status = status.concluded_against(now); - } - - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - ?status, - is_concluded_valid = ?new_state.is_concluded_valid(), - is_concluded_invalid = ?new_state.is_concluded_invalid(), - "Writing recent disputes with updates for candidate" - ); - overlay_db.write_recent_disputes(recent_disputes); } // Update metrics: @@ -1036,7 +1027,7 @@ impl Initialized { ); self.metrics.on_approval_votes(import_result.imported_approval_votes()); - if import_result.is_freshly_concluded_valid() { + if import_result.is_freshly_concluded_for() { gum::info!( target: LOG_TARGET, ?candidate_hash, @@ -1045,7 +1036,7 @@ impl Initialized { ); self.metrics.on_concluded_valid(); } - if import_result.is_freshly_concluded_invalid() { + if import_result.is_freshly_concluded_against() { gum::info!( target: LOG_TARGET, ?candidate_hash, diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index d7208c1a59eb..b2f779041a4c 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -2917,6 +2917,68 @@ fn redundant_votes_ignored() { }); } +#[test] +/// Make sure no disputes are recorded when there are no opposing votes, even if we reached supermajority. +fn no_onesided_disputes() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt = make_valid_candidate_receipt(); + let candidate_hash = candidate_receipt.hash(); + test_state + .activate_leaf_at_session(&mut virtual_overseer, session, 1, Vec::new()) + .await; + + let mut statements = Vec::new(); + for index in 1..10 { + statements.push(( + test_state + .issue_backing_statement_with_index( + ValidatorIndex(index), + candidate_hash, + session, + ) + .await, + ValidatorIndex(index), + )); + } + + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements, + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + assert_matches!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); + + // We should not have any active disputes now. + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert!(rx.await.unwrap().is_empty()); + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + #[test] fn refrain_from_participation() { test_harness(|mut test_state, mut virtual_overseer| { diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index 93d4e804f906..1a943f8dcee6 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -33,6 +33,9 @@ use polkadot_primitives::v2::{ /// /// And most likely has been constructed correctly. This is used with /// `DisputeDistributionMessage::SendDispute` for sending out votes. +/// +/// NOTE: This is sent over the wire, any changes are a change in protocol and need to be +/// versioned. #[derive(Debug, Clone)] pub struct DisputeMessage(UncheckedDisputeMessage); diff --git a/node/primitives/src/disputes/status.rs b/node/primitives/src/disputes/status.rs index c0ffb907423b..7cc26c1a1f7a 100644 --- a/node/primitives/src/disputes/status.rs +++ b/node/primitives/src/disputes/status.rs @@ -19,8 +19,12 @@ use parity_scale_codec::{Decode, Encode}; /// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. pub type Timestamp = u64; -/// The status of dispute. This is a state machine which can be altered by the -/// helper methods. +/// The status of dispute. +/// +/// As managed by the dispute coordinator. +/// +/// NOTE: This status is persisted to the database, any changes have to be versioned and a db +/// migration will be needed. #[derive(Debug, Clone, Copy, Encode, Decode, PartialEq)] pub enum DisputeStatus { /// The dispute is active and unconcluded. @@ -69,9 +73,24 @@ impl DisputeStatus { } } + /// Concluded valid? + pub fn has_concluded_for(&self) -> bool { + match self { + &DisputeStatus::ConcludedFor(_) => true, + _ => false, + } + } + /// Concluded invalid? + pub fn has_concluded_against(&self) -> bool { + match self { + &DisputeStatus::ConcludedAgainst(_) => true, + _ => false, + } + } + /// Transition the status to a new status after observing the dispute has concluded for the candidate. /// This may be a no-op if the status was already concluded. - pub fn concluded_for(self, now: Timestamp) -> DisputeStatus { + pub fn conclude_for(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now), DisputeStatus::ConcludedFor(at) => DisputeStatus::ConcludedFor(std::cmp::min(at, now)), @@ -81,7 +100,7 @@ impl DisputeStatus { /// Transition the status to a new status after observing the dispute has concluded against the candidate. /// This may be a no-op if the status was already concluded. - pub fn concluded_against(self, now: Timestamp) -> DisputeStatus { + pub fn conclude_against(self, now: Timestamp) -> DisputeStatus { match self { DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedAgainst(now), From 624ae5764086cc7edbe6e455e816b73765219906 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:59:56 +0100 Subject: [PATCH 7/8] [ci] fix buildah image (#6281) --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0dd640186bb8..877004694d79 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -152,7 +152,7 @@ default: .build-push-image: extends: - .kubernetes-env - image: quay.io/buildah/stable + image: quay.io/buildah/stable:v1.27 before_script: - test -s ./artifacts/VERSION || exit 1 - test -s ./artifacts/EXTRATAG || exit 1 From 353c57b3d34a58711f93926c1b2dc17b2642809e Mon Sep 17 00:00:00 2001 From: eskimor Date: Tue, 15 Nov 2022 10:04:24 +0100 Subject: [PATCH 8/8] Revert special casing of Kusama for grandpa rounds. (#6217) Co-authored-by: eskimor --- node/service/src/lib.rs | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 18218a8aba8e..3e535f3bd1f5 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -1221,22 +1221,11 @@ where } } - // Reduce grandpa load on Kusama and test networks. This will slow down finality by - // approximately one slot duration, but will reduce load. We would like to see the impact on - // Kusama, see: https://github.com/paritytech/polkadot/issues/5464 - let gossip_duration = if chain_spec.is_versi() || - chain_spec.is_wococo() || - chain_spec.is_rococo() || - chain_spec.is_kusama() - { - Duration::from_millis(2000) - } else { - Duration::from_millis(1000) - }; - let config = grandpa::Config { // FIXME substrate#1578 make this available through chainspec - gossip_duration, + // Grandpa performance can be improved a bit by tuning this parameter, see: + // https://github.com/paritytech/polkadot/issues/5464 + gossip_duration: Duration::from_millis(1000), justification_period: 512, name: Some(name), observer_enabled: false,