From 59d9c0ae8b8401a5e23086b59cd4e3241f93b094 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 14 Jul 2023 11:34:28 -0500 Subject: [PATCH 01/37] Add bron_kerbosch from the satalia and sigp implementation --- .../operation_pool/src/bron_kerbosch.rs | 167 ++++++++++++++++++ beacon_node/operation_pool/src/lib.rs | 1 + 2 files changed, 168 insertions(+) create mode 100644 beacon_node/operation_pool/src/bron_kerbosch.rs diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs new file mode 100644 index 00000000000..8e903ac1b8e --- /dev/null +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -0,0 +1,167 @@ +/// Entry point for the Bron-Kerbosh algorithm. Takes a vector of `vertices` of type +/// `T : Compatible`. Returns all the maximal cliques (as a matrix of indices) for the graph +/// `G = (V,E)` where `V` is `vertices` and `E` encodes the `is_compatible` relationship. + +pub fn bron_kerbosh bool>( + vertices: &Vec, + is_compatible: F, +) -> Vec> { + // build neighbourhoods and degeneracy ordering, also move to index-based reasoning + let neighbourhoods = compute_neigbourhoods(vertices, is_compatible); + let ordering = degeneracy_order(vertices.len(), &neighbourhoods); + + // create empty vector to store cliques + let mut cliques: Vec> = vec![]; + let mut publish_clique = |c| cliques.push(c); + + for i in 0..ordering.len() { + let vi = ordering[i]; + let p = (i + 1..ordering.len()) + .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) + .map(|j| ordering[j]) + .collect(); + let r = vec![vi]; + let x = (0..i) + .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) + .map(|j| ordering[j]) + .collect(); + bron_kerbosh_aux(r, p, x, &neighbourhoods, &mut publish_clique) + } + + cliques +} + +/// A function to the neighbourhoods for all nodes in the list. The neighbourhood `N(a)` of a +/// vertex `a` in `vertices` is the set of vertices `b` in `vertices` such that +/// `is_compatible(&a, &b) == true`. The function assumes that `is_compatible` is symmetric, +/// and returns a symmetric matrix (`Vec>`) of indices, where each index corresponds +/// to the relative vertex in `vertices`. +fn compute_neigbourhoods bool>( + vertices: &Vec, + is_compatible: F, +) -> Vec> { + let mut neighbourhoods = vec![]; + neighbourhoods.resize_with(vertices.len(), Vec::new); + for i in 0..vertices.len() - 1 { + for j in i + 1..vertices.len() { + if is_compatible(&vertices[i], &vertices[j]) { + neighbourhoods[i].push(j); + neighbourhoods[j].push(i); + } + } + } + neighbourhoods +} + +/// Produces a degeneracy ordering of a set of vertices. +fn degeneracy_order(num_vertices: usize, neighbourhoods: &[Vec]) -> Vec { + let mut v: Vec = (0..num_vertices).collect(); + let mut o = vec![]; + // move vertices from v to o in order of minimum degree + while !v.is_empty() { + let m: Option = (0..v.len()).min_by_key(|i| neighbourhoods[*i].len()); + if let Some(i) = m { + o.push(v[i]); + v.remove(i); + } else { + break; + } + } + o +} + +/// Auxiliary function to be used in the recursive call of the Bron-Kerbosh algorithm. +/// Parameters +/// * `r` - a working clique that is being built +/// * `p` - a set of candidate vertices to be added to r +/// * `x` - a set of vertices that have been explored and shouldn't be added to r +/// * `neighbourhoods` - a data structure to hold the neighbourhoods of each vertex +/// * `publish_clique` - a callback function to call whenever a clique has been produced +fn bron_kerbosh_aux( + r: Vec, + p: Vec, + x: Vec, + neighbourhoods: &Vec>, + publish_clique: &mut F, +) where + F: FnMut(Vec), +{ + if p.is_empty() && x.is_empty() { + publish_clique(r); + return; + } + + // modified p (p \ neighbourhood(pivot)), modified x + let (mut ip, mut mp, mut mx) = (p.clone(), p.clone(), x.clone()); + let pivot = find_pivot(&p, &x, neighbourhoods); + ip.retain(|e| !neighbourhoods[pivot].contains(e)); + + // while !mp.is_empty() { + while !ip.is_empty() { + // v + let v = ip[0]; + + let n = &neighbourhoods[v]; + + let (mut nr, mut np, mut nx) = (r.clone(), mp.clone(), mx.clone()); + + // r U { v } + nr.push(v); + + // p intersect neighbourhood { v } + np.retain(|e| n.contains(e)); + + // x intersect neighbourhood { v } + nx.retain(|e| n.contains(e)); + + // recursive call + bron_kerbosh_aux(nr, np, nx, neighbourhoods, publish_clique); + + // p \ { v }, x U { v } + mp.remove(mp.iter().position(|x| *x == v).unwrap()); + ip = mp.clone(); + ip.retain(|e| !neighbourhoods[pivot].contains(e)); + mx.push(v); + } +} + +/// Identifies pivot for Bron-Kerbosh pivoting technique. +fn find_pivot(p: &[usize], x: &[usize], neighbourhoods: &[Vec]) -> usize { + let mut px = p.to_vec(); + px.append(&mut x.to_vec()); + *px.iter() + .min_by_key(|&e| { + let pp = p.to_vec(); + pp.iter().filter(|ee| neighbourhoods[*e].contains(ee)).count() + }) + .unwrap() +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn bron_kerbosch_small_test() { + let vertices: Vec = (0..7).collect(); + let edges = vec![ + (0, 1), + (0, 2), + (0, 3), + (1, 2), + (1, 3), + (2, 3), + (0, 4), + (4, 5), + (4, 6), + (1, 6), + (0, 6), + (4, 6), + ]; + + let is_compatible = |first: &usize, second: &usize| -> bool { + edges.contains(&(*first, *second)) || edges.contains(&(*first, *second)) + }; + + println!("{:?}", bron_kerbosh(&vertices, is_compatible)); + } +} diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 7e1ddb1fd2f..b4bd2fb6760 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -3,6 +3,7 @@ mod attestation_id; mod attestation_storage; mod attester_slashing; mod bls_to_execution_changes; +mod bron_kerbosch; mod max_cover; mod metrics; mod persistence; From 889da778d1f741db1520f18bd8791222db6e8ac8 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 14 Jul 2023 11:39:07 -0500 Subject: [PATCH 02/37] I needed to rebase and fmt --- beacon_node/operation_pool/src/bron_kerbosch.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 8e903ac1b8e..2b5102567ff 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -132,7 +132,9 @@ fn find_pivot(p: &[usize], x: &[usize], neighbourhoods: &[Vec]) -> usize *px.iter() .min_by_key(|&e| { let pp = p.to_vec(); - pp.iter().filter(|ee| neighbourhoods[*e].contains(ee)).count() + pp.iter() + .filter(|ee| neighbourhoods[*e].contains(ee)) + .count() }) .unwrap() } From eb5d6d269b1b28378f9293da930c13817fee66d1 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 28 Jul 2023 10:57:27 -0600 Subject: [PATCH 03/37] op-pool maintains unaggregated_attestations and calculates bron kerbosh from a static graph --- .../operation_pool/src/attestation_storage.rs | 94 +++++++--- .../operation_pool/src/bron_kerbosch.rs | 11 +- beacon_node/operation_pool/src/lib.rs | 167 +++++++++++++++--- 3 files changed, 220 insertions(+), 52 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index dac5e25b349..57de8d842eb 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -20,7 +20,7 @@ pub struct CompactAttestationData { pub target_root: Hash256, } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct CompactIndexedAttestation { pub attesting_indices: Vec, pub aggregation_bits: BitList, @@ -48,7 +48,8 @@ pub struct AttestationMap { #[derive(Debug, Default, PartialEq)] pub struct AttestationDataMap { - attestations: HashMap>>, + pub aggregate_attestations: HashMap>>, + pub unaggregate_attestations: HashMap>>, } impl SplitAttestation { @@ -151,23 +152,29 @@ impl AttestationMap { indexed, } = SplitAttestation::new(attestation, attesting_indices); - let attestation_map = self.checkpoint_map.entry(checkpoint).or_default(); - let attestations = attestation_map.attestations.entry(data).or_default(); - - // Greedily aggregate the attestation with all existing attestations. - // NOTE: this is sub-optimal and in future we will remove this in favour of max-clique - // aggregation. - let mut aggregated = false; + let attestation_map = self + .checkpoint_map + .entry(checkpoint) + .or_default(); + let attestations = if indexed.attesting_indices.len() > 1 { + attestation_map + .aggregate_attestations + .entry(data) + .or_default() + } else { + attestation_map + .unaggregate_attestations + .entry(data) + .or_insert_with(Vec::new) + }; + let mut observed = false; for existing_attestation in attestations.iter_mut() { - if existing_attestation.signers_disjoint_from(&indexed) { - existing_attestation.aggregate(&indexed); - aggregated = true; - } else if *existing_attestation == indexed { - aggregated = true; + if *existing_attestation == indexed { + observed = true; } } - if !aggregated { + if !observed { attestations.push(indexed); } } @@ -195,6 +202,13 @@ impl AttestationMap { self.checkpoint_map .retain(|checkpoint_key, _| current_epoch <= checkpoint_key.target_epoch + 1); } + + pub fn get_attestation_map( + &self, + checkpoint_key: &CheckpointKey, + ) -> Option<&AttestationDataMap> { + self.checkpoint_map.get(checkpoint_key) + } /// Statistics about all attestations stored in the map. pub fn stats(&self) -> AttestationStats { @@ -216,24 +230,58 @@ impl AttestationDataMap { &'a self, checkpoint_key: &'a CheckpointKey, ) -> impl Iterator> + 'a { - self.attestations.iter().flat_map(|(data, vec_indexed)| { - vec_indexed.iter().map(|indexed| AttestationRef { - checkpoint: checkpoint_key, - data, - indexed, - }) - }) + let aggregates = self + .aggregate_attestations + .iter() + .flat_map(|(data, vec_indexed)| { + vec_indexed.iter().map(|indexed| AttestationRef { + checkpoint: checkpoint_key, + data, + indexed, + }) + }); + + let unaggregates = self + .aggregate_attestations + .iter() + .flat_map(|(data, vec_indexed)| { + vec_indexed.iter().map(|indexed| AttestationRef { + checkpoint: checkpoint_key, + data, + indexed, + }) + }); + + aggregates.chain(unaggregates) } pub fn stats(&self) -> AttestationStats { let mut stats = AttestationStats::default(); + let mut data_to_num_attestations: HashMap<&CompactAttestationData, usize> = HashMap::new(); - for aggregates in self.attestations.values() { + for (data, aggregates) in self.aggregate_attestations.iter() { stats.num_attestations += aggregates.len(); stats.num_attestation_data += 1; stats.max_aggregates_per_data = std::cmp::max(stats.max_aggregates_per_data, aggregates.len()); + + data_to_num_attestations.insert(data, aggregates.len()); + } + + for (data, unaggregates) in self.unaggregate_attestations.iter() { + stats.num_attestations += unaggregates.len(); + if let Some(aggregates_num) = data_to_num_attestations.get(data) { + stats.max_aggregates_per_data = std::cmp::max( + stats.max_aggregates_per_data, + aggregates_num + unaggregates.len(), + ); + } else { + stats.num_attestation_data += 1; + stats.max_aggregates_per_data = + std::cmp::max(stats.max_aggregates_per_data, unaggregates.len()); + } } + stats } } diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 2b5102567ff..65cd624b388 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -1,8 +1,7 @@ /// Entry point for the Bron-Kerbosh algorithm. Takes a vector of `vertices` of type /// `T : Compatible`. Returns all the maximal cliques (as a matrix of indices) for the graph /// `G = (V,E)` where `V` is `vertices` and `E` encodes the `is_compatible` relationship. - -pub fn bron_kerbosh bool>( +pub fn bron_kerbosch bool>( vertices: &Vec, is_compatible: F, ) -> Vec> { @@ -25,7 +24,7 @@ pub fn bron_kerbosh bool>( .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) .map(|j| ordering[j]) .collect(); - bron_kerbosh_aux(r, p, x, &neighbourhoods, &mut publish_clique) + bron_kerbosch_aux(r, p, x, &neighbourhoods, &mut publish_clique) } cliques @@ -77,7 +76,7 @@ fn degeneracy_order(num_vertices: usize, neighbourhoods: &[Vec]) -> Vec( +fn bron_kerbosch_aux( r: Vec, p: Vec, x: Vec, @@ -115,7 +114,7 @@ fn bron_kerbosh_aux( nx.retain(|e| n.contains(e)); // recursive call - bron_kerbosh_aux(nr, np, nx, neighbourhoods, publish_clique); + bron_kerbosch_aux(nr, np, nx, neighbourhoods, publish_clique); // p \ { v }, x U { v } mp.remove(mp.iter().position(|x| *x == v).unwrap()); @@ -164,6 +163,6 @@ mod tests { edges.contains(&(*first, *second)) || edges.contains(&(*first, *second)) }; - println!("{:?}", bron_kerbosh(&vertices, is_compatible)); + println!("{:?}", bron_kerbosch(&vertices, is_compatible)); } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index b4bd2fb6760..244239a8a37 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -12,7 +12,9 @@ mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::{earliest_attestation_validators, AttMaxCover}; +use attestation_storage::{CompactIndexedAttestation, CompactAttestationData, AttestationDataMap}; pub use attestation_storage::{AttestationRef, SplitAttestation}; +use bron_kerbosch::bron_kerbosch; pub use max_cover::MaxCover; pub use persistence::{ PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, @@ -239,6 +241,96 @@ impl OperationPool { AttMaxCover::new(att, state, reward_cache, total_active_balance, spec) }) } + + #[allow(clippy::too_many_arguments)] + fn get_clique_aggregate_attestations_for_epoch<'a>( + &'a self, + checkpoint_key: &'a CheckpointKey, + all_attestations: &'a AttestationMap, + state: &'a BeaconState, + mut validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, + num_valid: &mut i64, + spec: &'a ChainSpec, + ) -> Vec<(&CompactAttestationData, CompactIndexedAttestation)> { + let mut cliqued_atts: Vec<(&CompactAttestationData, CompactIndexedAttestation)> = vec![]; + if let Some(AttestationDataMap { aggregate_attestations, unaggregate_attestations }) + = all_attestations.get_attestation_map(checkpoint_key) { + for (data, aggregates) in aggregate_attestations { + if data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= data.slot + T::slots_per_epoch() { + let aggregates: Vec<&CompactIndexedAttestation> = aggregates + .iter() + .filter(|indexed| validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed + })) + .collect(); + *num_valid += aggregates.len() as i64; + + let cliques = bron_kerbosch(&aggregates, is_compatible); + + // This assumes that the values from bron_kerbosch are valid indices of + // aggregates. + let mut clique_aggregates = cliques + .iter() + .map(|clique| { + let mut res_att = aggregates[clique[0]].clone(); + for ind in clique.iter().skip(1) { + res_att.aggregate(&aggregates[*ind]); + } + res_att + }); + + if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { + for attestation in unaggregate_attestations + .iter() + .filter(|indexed| validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed + })) + { + *num_valid += 1; + for mut clique_aggregate in &mut clique_aggregates { + if !clique_aggregate.attesting_indices.contains(&attestation.attesting_indices[0]) { + clique_aggregate.aggregate(attestation); + } + } + } + } + + cliqued_atts.extend( + clique_aggregates + .map(|indexed| (data, indexed)) + ); + } + } + for (data, attestations) in unaggregate_attestations { + if data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= data.slot + T::slots_per_epoch() { + if !aggregate_attestations.contains_key(&data) { + let mut valid_attestations = attestations + .iter() + .filter(|indexed| validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed + })); + + if let Some(first) = valid_attestations.next() { + let mut agg = first.clone(); + for attestation in valid_attestations { + agg.aggregate(&attestation); + } + cliqued_atts.push((data, agg)); + } + } + } + } + } + cliqued_atts + } /// Get a list of attestations for inclusion in a block. /// @@ -272,28 +364,50 @@ impl OperationPool { let mut num_prev_valid = 0_i64; let mut num_curr_valid = 0_i64; - let prev_epoch_att = self - .get_valid_attestations_for_epoch( - &prev_epoch_key, - &*all_attestations, - state, - &reward_cache, - total_active_balance, - prev_epoch_validity_filter, - spec, + let prev_cliqued_atts = if prev_epoch_key != curr_epoch_key { + self.get_clique_aggregate_attestations_for_epoch( + &prev_epoch_key, + &*all_attestations, + state, + prev_epoch_validity_filter, + &mut num_prev_valid, + spec ) - .inspect(|_| num_prev_valid += 1); - let curr_epoch_att = self - .get_valid_attestations_for_epoch( - &curr_epoch_key, - &*all_attestations, - state, - &reward_cache, - total_active_balance, - curr_epoch_validity_filter, - spec, - ) - .inspect(|_| num_curr_valid += 1); + } else { + vec![] + }; + + let prev_epoch_cliqued_atts: Vec> = prev_cliqued_atts.iter() + .map(|(data, indexed)| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) + .collect(); + + let curr_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( + &curr_epoch_key, + &*all_attestations, + state, + curr_epoch_validity_filter, + &mut num_curr_valid, + spec + ); + + let curr_epoch_cliqued_atts: Vec> = curr_cliqued_atts.iter() + .map(|(data, indexed)| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) + .collect(); + let prev_epoch_limit = if let BeaconState::Base(base_state) = state { std::cmp::min( @@ -312,13 +426,13 @@ impl OperationPool { if prev_epoch_key == curr_epoch_key { vec![] } else { - maximum_cover(prev_epoch_att, prev_epoch_limit, "prev_epoch_attestations") + maximum_cover(prev_epoch_cliqued_atts, prev_epoch_limit, "prev_epoch_attestations") } }, move || { let _timer = metrics::start_timer(&metrics::ATTESTATION_CURR_EPOCH_PACKING_TIME); maximum_cover( - curr_epoch_att, + curr_epoch_cliqued_atts, T::MaxAttestations::to_usize(), "curr_epoch_attestations", ) @@ -708,6 +822,13 @@ impl OperationPool { } } +fn is_compatible(x: &&CompactIndexedAttestation, y: &&CompactIndexedAttestation) -> bool { + let x_attester_set: HashSet<_> = x.attesting_indices.iter().collect(); + let y_attester_set: HashSet<_> = y.attesting_indices.iter().collect(); + x_attester_set.is_disjoint(&y_attester_set) +} + + /// Filter up to a maximum number of operations out of an iterator. fn filter_limit_operations<'a, T: 'a, V: 'a, I, F, G>( operations: I, From 7bac0ddd820954e51d4a37bd4dc8c38b2b5c85eb Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 28 Jul 2023 13:02:35 -0600 Subject: [PATCH 04/37] add unagg attestations to op_pool during processing. fmt --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 --- beacon_node/http_api/src/lib.rs | 14 ++ .../gossip_methods.rs | 10 ++ .../operation_pool/src/attestation_storage.rs | 7 +- beacon_node/operation_pool/src/lib.rs | 157 ++++++++++-------- 5 files changed, 112 insertions(+), 97 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 918d0bd29b7..86530823e6e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4716,27 +4716,6 @@ impl BeaconChain { .op_pool .get_bls_to_execution_changes(&state, &self.spec); - // Iterate through the naive aggregation pool and ensure all the attestations from there - // are included in the operation pool. - let unagg_import_timer = - metrics::start_timer(&metrics::BLOCK_PRODUCTION_UNAGGREGATED_TIMES); - for attestation in self.naive_aggregation_pool.read().iter() { - let import = |attestation: &Attestation| { - let attesting_indices = get_attesting_indices_from_state(&state, attestation)?; - self.op_pool - .insert_attestation(attestation.clone(), attesting_indices) - }; - if let Err(e) = import(attestation) { - // Don't stop block production if there's an error, just create a log. - error!( - self.log, - "Attestation did not transfer to op pool"; - "reason" => ?e - ); - } - } - drop(unagg_import_timer); - // Override the beacon node's graffiti with graffiti from the validator, if present. let graffiti = match validator_graffiti { Some(graffiti) => graffiti, diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 05c678b250f..65af3e1d800 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1893,6 +1893,20 @@ pub fn serve( format!("Naive aggregation pool: {:?}", e), )); } + + if let Err(e) = chain.add_to_block_inclusion_pool(attestation) { + error!(log, + "Failure adding verified attestation to the operation pool"; + "error" => ?e, + "request_index" => index, + "committee_index" => committee_index, + "slot" => slot, + ); + failures.push(api_types::Failure::new( + index, + format!("Operation pool: {:?}", e), + )); + } } if num_already_known > 0 { diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 9fe64d159f2..362e896192c 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -376,6 +376,16 @@ impl NetworkBeaconProcessor { ) } + if let Err(e) = self.chain.add_to_block_inclusion_pool(verified_attestation) { + debug!( + self.log, + "Attestation invalid for op pool"; + "reason" => ?e, + "peer" => %peer_id, + "beacon_block_root" => ?beacon_block_root, + ); + } + metrics::inc_counter( &metrics::BEACON_PROCESSOR_UNAGGREGATED_ATTESTATION_IMPORTED_TOTAL, ); diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 57de8d842eb..f546d317bdc 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -49,7 +49,8 @@ pub struct AttestationMap { #[derive(Debug, Default, PartialEq)] pub struct AttestationDataMap { pub aggregate_attestations: HashMap>>, - pub unaggregate_attestations: HashMap>>, + pub unaggregate_attestations: + HashMap>>, } impl SplitAttestation { @@ -202,12 +203,12 @@ impl AttestationMap { self.checkpoint_map .retain(|checkpoint_key, _| current_epoch <= checkpoint_key.target_epoch + 1); } - + pub fn get_attestation_map( &self, checkpoint_key: &CheckpointKey, ) -> Option<&AttestationDataMap> { - self.checkpoint_map.get(checkpoint_key) + self.checkpoint_map.get(checkpoint_key) } /// Statistics about all attestations stored in the map. diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 244239a8a37..82ce13aca53 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -12,7 +12,7 @@ mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::{earliest_attestation_validators, AttMaxCover}; -use attestation_storage::{CompactIndexedAttestation, CompactAttestationData, AttestationDataMap}; +use attestation_storage::{AttestationDataMap, CompactAttestationData, CompactIndexedAttestation}; pub use attestation_storage::{AttestationRef, SplitAttestation}; use bron_kerbosch::bron_kerbosch; pub use max_cover::MaxCover; @@ -241,7 +241,7 @@ impl OperationPool { AttMaxCover::new(att, state, reward_cache, total_active_balance, spec) }) } - + #[allow(clippy::too_many_arguments)] fn get_clique_aggregate_attestations_for_epoch<'a>( &'a self, @@ -251,72 +251,76 @@ impl OperationPool { mut validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, num_valid: &mut i64, spec: &'a ChainSpec, - ) -> Vec<(&CompactAttestationData, CompactIndexedAttestation)> { + ) -> Vec<(&CompactAttestationData, CompactIndexedAttestation)> { let mut cliqued_atts: Vec<(&CompactAttestationData, CompactIndexedAttestation)> = vec![]; - if let Some(AttestationDataMap { aggregate_attestations, unaggregate_attestations }) - = all_attestations.get_attestation_map(checkpoint_key) { + if let Some(AttestationDataMap { + aggregate_attestations, + unaggregate_attestations, + }) = all_attestations.get_attestation_map(checkpoint_key) + { for (data, aggregates) in aggregate_attestations { if data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= data.slot + T::slots_per_epoch() { + && state.slot() <= data.slot + T::slots_per_epoch() + { let aggregates: Vec<&CompactIndexedAttestation> = aggregates .iter() - .filter(|indexed| validity_filter(&AttestationRef { + .filter(|indexed| { + validity_filter(&AttestationRef { checkpoint: checkpoint_key, data: &data, - indexed - })) + indexed, + }) + }) .collect(); *num_valid += aggregates.len() as i64; let cliques = bron_kerbosch(&aggregates, is_compatible); - + // This assumes that the values from bron_kerbosch are valid indices of // aggregates. - let mut clique_aggregates = cliques - .iter() - .map(|clique| { - let mut res_att = aggregates[clique[0]].clone(); - for ind in clique.iter().skip(1) { - res_att.aggregate(&aggregates[*ind]); - } - res_att - }); + let mut clique_aggregates = cliques.iter().map(|clique| { + let mut res_att = aggregates[clique[0]].clone(); + for ind in clique.iter().skip(1) { + res_att.aggregate(&aggregates[*ind]); + } + res_att + }); if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { - for attestation in unaggregate_attestations - .iter() - .filter(|indexed| validity_filter(&AttestationRef { + for attestation in unaggregate_attestations.iter().filter(|indexed| { + validity_filter(&AttestationRef { checkpoint: checkpoint_key, data: &data, - indexed - })) - { + indexed, + }) + }) { *num_valid += 1; for mut clique_aggregate in &mut clique_aggregates { - if !clique_aggregate.attesting_indices.contains(&attestation.attesting_indices[0]) { + if !clique_aggregate + .attesting_indices + .contains(&attestation.attesting_indices[0]) + { clique_aggregate.aggregate(attestation); } } } } - cliqued_atts.extend( - clique_aggregates - .map(|indexed| (data, indexed)) - ); + cliqued_atts.extend(clique_aggregates.map(|indexed| (data, indexed))); } } for (data, attestations) in unaggregate_attestations { if data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= data.slot + T::slots_per_epoch() { + && state.slot() <= data.slot + T::slots_per_epoch() + { if !aggregate_attestations.contains_key(&data) { - let mut valid_attestations = attestations - .iter() - .filter(|indexed| validity_filter(&AttestationRef { + let mut valid_attestations = attestations.iter().filter(|indexed| { + validity_filter(&AttestationRef { checkpoint: checkpoint_key, data: &data, - indexed - })); + indexed, + }) + }); if let Some(first) = valid_attestations.next() { let mut agg = first.clone(); @@ -364,50 +368,51 @@ impl OperationPool { let mut num_prev_valid = 0_i64; let mut num_curr_valid = 0_i64; - let prev_cliqued_atts = if prev_epoch_key != curr_epoch_key { + let prev_cliqued_atts = if prev_epoch_key != curr_epoch_key { self.get_clique_aggregate_attestations_for_epoch( - &prev_epoch_key, - &*all_attestations, - state, - prev_epoch_validity_filter, - &mut num_prev_valid, - spec + &prev_epoch_key, + &*all_attestations, + state, + prev_epoch_validity_filter, + &mut num_prev_valid, + spec, ) } else { vec![] }; - let prev_epoch_cliqued_atts: Vec> = prev_cliqued_atts.iter() - .map(|(data, indexed)| AttestationRef { - checkpoint: &prev_epoch_key, - data, - indexed, - }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) - .collect(); + let prev_epoch_cliqued_atts: Vec> = prev_cliqued_atts + .iter() + .map(|(data, indexed)| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) + .collect(); let curr_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( - &curr_epoch_key, - &*all_attestations, - state, - curr_epoch_validity_filter, - &mut num_curr_valid, - spec + &curr_epoch_key, + &*all_attestations, + state, + curr_epoch_validity_filter, + &mut num_curr_valid, + spec, ); - let curr_epoch_cliqued_atts: Vec> = curr_cliqued_atts.iter() - .map(|(data, indexed)| AttestationRef { - checkpoint: &prev_epoch_key, - data, - indexed, - }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) - .collect(); - + let curr_epoch_cliqued_atts: Vec> = curr_cliqued_atts + .iter() + .map(|(data, indexed)| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) + .collect(); let prev_epoch_limit = if let BeaconState::Base(base_state) = state { std::cmp::min( @@ -426,7 +431,11 @@ impl OperationPool { if prev_epoch_key == curr_epoch_key { vec![] } else { - maximum_cover(prev_epoch_cliqued_atts, prev_epoch_limit, "prev_epoch_attestations") + maximum_cover( + prev_epoch_cliqued_atts, + prev_epoch_limit, + "prev_epoch_attestations", + ) } }, move || { @@ -822,13 +831,15 @@ impl OperationPool { } } -fn is_compatible(x: &&CompactIndexedAttestation, y: &&CompactIndexedAttestation) -> bool { +fn is_compatible( + x: &&CompactIndexedAttestation, + y: &&CompactIndexedAttestation, +) -> bool { let x_attester_set: HashSet<_> = x.attesting_indices.iter().collect(); let y_attester_set: HashSet<_> = y.attesting_indices.iter().collect(); x_attester_set.is_disjoint(&y_attester_set) } - /// Filter up to a maximum number of operations out of an iterator. fn filter_limit_operations<'a, T: 'a, V: 'a, I, F, G>( operations: I, From 3fa1dd5ce5780325af3d2e5d413718d4a78337d5 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 28 Jul 2023 13:17:47 -0600 Subject: [PATCH 05/37] added a couple of comments --- beacon_node/operation_pool/src/lib.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 82ce13aca53..39d0023c60a 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -242,6 +242,9 @@ impl OperationPool { }) } + /// Return a vector of aggregate/unaggregate attestations which are maximal cliques + /// with resepct to the graph with attestations as vertices and an edge encoding + /// compatibility for aggregation. #[allow(clippy::too_many_arguments)] fn get_clique_aggregate_attestations_for_epoch<'a>( &'a self, @@ -274,10 +277,10 @@ impl OperationPool { .collect(); *num_valid += aggregates.len() as i64; + // derive cliques for current attestation data let cliques = bron_kerbosch(&aggregates, is_compatible); - // This assumes that the values from bron_kerbosch are valid indices of - // aggregates. + // aggregate each cliques corresponding attestaiions let mut clique_aggregates = cliques.iter().map(|clique| { let mut res_att = aggregates[clique[0]].clone(); for ind in clique.iter().skip(1) { @@ -286,6 +289,8 @@ impl OperationPool { res_att }); + // aggregate unaggregate attestations into the clique aggregates + // if compatible if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { for attestation in unaggregate_attestations.iter().filter(|indexed| { validity_filter(&AttestationRef { @@ -309,6 +314,8 @@ impl OperationPool { cliqued_atts.extend(clique_aggregates.map(|indexed| (data, indexed))); } } + // include aggregated attestations from unaggregated attestations whose + // attestation data doesn't appear in aggregated_attestations for (data, attestations) in unaggregate_attestations { if data.slot + spec.min_attestation_inclusion_delay <= state.slot() && state.slot() <= data.slot + T::slots_per_epoch() @@ -368,7 +375,8 @@ impl OperationPool { let mut num_prev_valid = 0_i64; let mut num_curr_valid = 0_i64; - let prev_cliqued_atts = if prev_epoch_key != curr_epoch_key { + // If we're in the genesis epoch, just use the current epoch attestations. + let prev_epoch_cliqued_atts = if prev_epoch_key != curr_epoch_key { self.get_clique_aggregate_attestations_for_epoch( &prev_epoch_key, &*all_attestations, @@ -381,7 +389,7 @@ impl OperationPool { vec![] }; - let prev_epoch_cliqued_atts: Vec> = prev_cliqued_atts + let prev_epoch_cliqued_atts: Vec> = prev_epoch_cliqued_atts .iter() .map(|(data, indexed)| AttestationRef { checkpoint: &prev_epoch_key, @@ -393,7 +401,7 @@ impl OperationPool { }) .collect(); - let curr_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( + let curr_epoch_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( &curr_epoch_key, &*all_attestations, state, @@ -402,7 +410,7 @@ impl OperationPool { spec, ); - let curr_epoch_cliqued_atts: Vec> = curr_cliqued_atts + let curr_epoch_cliqued_atts: Vec> = curr_epoch_cliqued_atts .iter() .map(|(data, indexed)| AttestationRef { checkpoint: &prev_epoch_key, From 2aeb8c6fe466e5defc5ef36ffa3dbcc675c77522 Mon Sep 17 00:00:00 2001 From: geemo Date: Tue, 1 Aug 2023 13:01:39 -0600 Subject: [PATCH 06/37] delete unused --- beacon_node/beacon_chain/src/beacon_chain.rs | 1 - .../operation_pool/src/attestation_storage.rs | 11 --------- beacon_node/operation_pool/src/lib.rs | 24 ------------------- 3 files changed, 36 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 86530823e6e..cdad16b07bf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -92,7 +92,6 @@ use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::{ - common::get_attesting_indices_from_state, per_block_processing, per_block_processing::{ errors::AttestationValidationError, get_expected_withdrawals, diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index f546d317bdc..e984515895a 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -180,17 +180,6 @@ impl AttestationMap { } } - /// Iterate all attestations matching the given `checkpoint_key`. - pub fn get_attestations<'a>( - &'a self, - checkpoint_key: &'a CheckpointKey, - ) -> impl Iterator> + 'a { - self.checkpoint_map - .get(checkpoint_key) - .into_iter() - .flat_map(|attestation_map| attestation_map.iter(checkpoint_key)) - } - /// Iterate all attestations in the map. pub fn iter(&self) -> impl Iterator> { self.checkpoint_map diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 39d0023c60a..60c404aa89c 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -218,30 +218,6 @@ impl OperationPool { self.attestations.read().stats() } - /// Return all valid attestations for the given epoch, for use in max cover. - #[allow(clippy::too_many_arguments)] - fn get_valid_attestations_for_epoch<'a>( - &'a self, - checkpoint_key: &'a CheckpointKey, - all_attestations: &'a AttestationMap, - state: &'a BeaconState, - reward_cache: &'a RewardCache, - total_active_balance: u64, - validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, - spec: &'a ChainSpec, - ) -> impl Iterator> + Send { - all_attestations - .get_attestations(checkpoint_key) - .filter(|att| { - att.data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= att.data.slot + T::slots_per_epoch() - }) - .filter(validity_filter) - .filter_map(move |att| { - AttMaxCover::new(att, state, reward_cache, total_active_balance, spec) - }) - } - /// Return a vector of aggregate/unaggregate attestations which are maximal cliques /// with resepct to the graph with attestations as vertices and an edge encoding /// compatibility for aggregation. From 22d0d355f60d1fe817c98effe96bd16459aa2b01 Mon Sep 17 00:00:00 2001 From: geemo Date: Tue, 15 Aug 2023 14:51:38 -0600 Subject: [PATCH 07/37] fix using wrong checkpoint key --- beacon_node/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 60c404aa89c..92380d7e6d3 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -389,7 +389,7 @@ impl OperationPool { let curr_epoch_cliqued_atts: Vec> = curr_epoch_cliqued_atts .iter() .map(|(data, indexed)| AttestationRef { - checkpoint: &prev_epoch_key, + checkpoint: &curr_epoch_key, data, indexed, }) From 673851a949bec9c745ed144e40e76ab239a35d9f Mon Sep 17 00:00:00 2001 From: geemo Date: Mon, 21 Aug 2023 22:52:06 -0600 Subject: [PATCH 08/37] avoid underflow on zero length vector --- .../operation_pool/src/bron_kerbosch.rs | 39 ++++++++++--------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 65cd624b388..6467ac9d004 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -5,26 +5,29 @@ pub fn bron_kerbosch bool>( vertices: &Vec, is_compatible: F, ) -> Vec> { - // build neighbourhoods and degeneracy ordering, also move to index-based reasoning - let neighbourhoods = compute_neigbourhoods(vertices, is_compatible); - let ordering = degeneracy_order(vertices.len(), &neighbourhoods); - // create empty vector to store cliques let mut cliques: Vec> = vec![]; - let mut publish_clique = |c| cliques.push(c); - - for i in 0..ordering.len() { - let vi = ordering[i]; - let p = (i + 1..ordering.len()) - .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) - .map(|j| ordering[j]) - .collect(); - let r = vec![vi]; - let x = (0..i) - .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) - .map(|j| ordering[j]) - .collect(); - bron_kerbosch_aux(r, p, x, &neighbourhoods, &mut publish_clique) + + if vertices.len() > 0 { + // build neighbourhoods and degeneracy ordering, also move to index-based reasoning + let neighbourhoods = compute_neigbourhoods(vertices, is_compatible); + let ordering = degeneracy_order(vertices.len(), &neighbourhoods); + + let mut publish_clique = |c| cliques.push(c); + + for i in 0..ordering.len() { + let vi = ordering[i]; + let p = (i + 1..ordering.len()) + .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) + .map(|j| ordering[j]) + .collect(); + let r = vec![vi]; + let x = (0..i) + .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) + .map(|j| ordering[j]) + .collect(); + bron_kerbosch_aux(r, p, x, &neighbourhoods, &mut publish_clique) + } } cliques From 3f0011bb86619705edaefeac8b1579c2ef5e7b40 Mon Sep 17 00:00:00 2001 From: geemo Date: Mon, 21 Aug 2023 23:40:05 -0600 Subject: [PATCH 09/37] This test is no longer the correct logic since we do not aggregate the attestations in the op pool until calculating the attestations for block inclusion --- beacon_node/operation_pool/src/lib.rs | 95 +-------------------------- 1 file changed, 2 insertions(+), 93 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 92380d7e6d3..3c0a8f499a9 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1080,7 +1080,7 @@ mod release_tests { } } - assert_eq!(op_pool.num_attestations(), committees.len()); + assert_eq!(op_pool.num_attestations(), 128); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. assert_eq!( @@ -1107,7 +1107,7 @@ mod release_tests { // Prune attestations shouldn't do anything at this point. op_pool.prune_attestations(state.current_epoch()); - assert_eq!(op_pool.num_attestations(), committees.len()); + assert_eq!(op_pool.num_attestations(), 128); // But once we advance to more than an epoch after the attestation, it should prune it // out of existence. @@ -1155,97 +1155,6 @@ mod release_tests { assert_eq!(op_pool.num_attestations(), committees.len()); } - /// Adding lots of attestations that only intersect pairwise should lead to two aggregate - /// attestations. - #[test] - fn attestation_pairwise_overlapping() { - let (harness, ref spec) = attestation_test_state::(1); - - let state = harness.get_current_state(); - - let op_pool = OperationPool::::new(); - - let slot = state.slot(); - let committees = state - .get_beacon_committees_at_slot(slot) - .unwrap() - .into_iter() - .map(BeaconCommittee::into_owned) - .collect::>(); - - let num_validators = - MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; - - let attestations = harness.make_attestations( - (0..num_validators).collect::>().as_slice(), - &state, - Hash256::zero(), - SignedBeaconBlockHash::from(Hash256::zero()), - slot, - ); - - let step_size = 2; - // Create attestations that overlap on `step_size` validators, like: - // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... - for (atts1, _) in attestations { - let atts2 = atts1.clone(); - let aggs1 = atts1 - .chunks_exact(step_size * 2) - .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { - if let Some(mut a) = att { - a.aggregate(new_att); - Some(a) - } else { - Some(new_att.clone()) - } - }, - ); - agg.unwrap() - }) - .collect::>(); - let aggs2 = atts2 - .into_iter() - .skip(step_size) - .collect::>() - .as_slice() - .chunks_exact(step_size * 2) - .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { - if let Some(mut a) = att { - a.aggregate(new_att); - Some(a) - } else { - Some(new_att.clone()) - } - }, - ); - agg.unwrap() - }) - .collect::>(); - - for att in aggs1.into_iter().chain(aggs2.into_iter()) { - let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); - op_pool.insert_attestation(att, attesting_indices).unwrap(); - } - } - - // The attestations should get aggregated into two attestations that comprise all - // validators. - let stats = op_pool.attestation_stats(); - assert_eq!(stats.num_attestation_data, committees.len()); - assert_eq!(stats.num_attestations, 2 * committees.len()); - assert_eq!(stats.max_aggregates_per_data, 2); - } - /// Create a bunch of attestations signed by a small number of validators, and another /// bunch signed by a larger number, such that there are at least `max_attestations` /// signed by the larger number. Then, check that `get_attestations` only returns the From 2d211be033b0bf69c496a6aa4f5ac220cfb2d2a9 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 1 Sep 2023 00:29:56 -0600 Subject: [PATCH 10/37] remove collect --- beacon_node/operation_pool/src/lib.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 3c0a8f499a9..529f184cdfb 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -365,7 +365,7 @@ impl OperationPool { vec![] }; - let prev_epoch_cliqued_atts: Vec> = prev_epoch_cliqued_atts + let prev_epoch_cliqued_atts = prev_epoch_cliqued_atts .iter() .map(|(data, indexed)| AttestationRef { checkpoint: &prev_epoch_key, @@ -374,8 +374,7 @@ impl OperationPool { }) .filter_map(|att| { AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) - .collect(); + }); let curr_epoch_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( &curr_epoch_key, @@ -386,7 +385,7 @@ impl OperationPool { spec, ); - let curr_epoch_cliqued_atts: Vec> = curr_epoch_cliqued_atts + let curr_epoch_cliqued_atts = curr_epoch_cliqued_atts .iter() .map(|(data, indexed)| AttestationRef { checkpoint: &curr_epoch_key, @@ -395,8 +394,7 @@ impl OperationPool { }) .filter_map(|att| { AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) - .collect(); + }); let prev_epoch_limit = if let BeaconState::Base(base_state) = state { std::cmp::min( From 649c4bddc7b4f14886d819722c75ce462d210f2a Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 1 Sep 2023 20:46:32 -0600 Subject: [PATCH 11/37] using u64 instead of &u64 I think this might have been the issue but have not tested yet --- beacon_node/operation_pool/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 529f184cdfb..97f76856f9f 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -82,7 +82,7 @@ pub enum OpPoolError { #[derive(Default)] pub struct AttestationStats { - /// Total number of attestations for all committeees/indices/votes. + /// Total number of attestations for all committees/indices/votes. pub num_attestations: usize, /// Number of unique `AttestationData` attested to. pub num_attestation_data: usize, @@ -232,6 +232,7 @@ impl OperationPool { spec: &'a ChainSpec, ) -> Vec<(&CompactAttestationData, CompactIndexedAttestation)> { let mut cliqued_atts: Vec<(&CompactAttestationData, CompactIndexedAttestation)> = vec![]; + if let Some(AttestationDataMap { aggregate_attestations, unaggregate_attestations, @@ -817,8 +818,8 @@ fn is_compatible( x: &&CompactIndexedAttestation, y: &&CompactIndexedAttestation, ) -> bool { - let x_attester_set: HashSet<_> = x.attesting_indices.iter().collect(); - let y_attester_set: HashSet<_> = y.attesting_indices.iter().collect(); + let x_attester_set: HashSet = x.attesting_indices.iter().cloned().collect(); + let y_attester_set: HashSet = y.attesting_indices.iter().cloned().collect(); x_attester_set.is_disjoint(&y_attester_set) } From 4e81596dbd8e8a5fd8871c3cac80cc6fc5001b02 Mon Sep 17 00:00:00 2001 From: geemo Date: Thu, 28 Sep 2023 10:27:34 -0500 Subject: [PATCH 12/37] remove subset attestations --- beacon_node/operation_pool/src/lib.rs | 34 ++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 97f76856f9f..1a7a5b00c20 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -258,13 +258,35 @@ impl OperationPool { let cliques = bron_kerbosch(&aggregates, is_compatible); // aggregate each cliques corresponding attestaiions - let mut clique_aggregates = cliques.iter().map(|clique| { - let mut res_att = aggregates[clique[0]].clone(); - for ind in clique.iter().skip(1) { - res_att.aggregate(&aggregates[*ind]); + let mut clique_aggregates: Vec> = cliques + .iter() + .map(|clique| { + let mut res_att = aggregates[clique[0]].clone(); + for ind in clique.iter().skip(1) { + res_att.aggregate(&aggregates[*ind]); + } + res_att + }) + .collect(); + + let mut indices_to_remove = Vec::new(); + clique_aggregates + .sort_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); + for (index, clique) in clique_aggregates.iter().enumerate() { + for bigger_clique in clique_aggregates.iter().skip(index + 1) { + if clique + .aggregation_bits + .is_subset(&bigger_clique.aggregation_bits) + { + indices_to_remove.push(index); + break; + } } - res_att - }); + } + for index in indices_to_remove.iter().rev() { + clique_aggregates.remove(*index); + } + let mut clique_aggregates = clique_aggregates.into_iter(); // aggregate unaggregate attestations into the clique aggregates // if compatible From f14b56fc099807f5e7c0eadbf86fb293c346cf5d Mon Sep 17 00:00:00 2001 From: geemo Date: Wed, 4 Oct 2023 11:07:21 -0500 Subject: [PATCH 13/37] sproul patch: fixes iterator issue --- beacon_node/operation_pool/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 1a7a5b00c20..795b025a375 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -268,7 +268,6 @@ impl OperationPool { res_att }) .collect(); - let mut indices_to_remove = Vec::new(); clique_aggregates .sort_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); @@ -283,10 +282,10 @@ impl OperationPool { } } } + for index in indices_to_remove.iter().rev() { clique_aggregates.remove(*index); } - let mut clique_aggregates = clique_aggregates.into_iter(); // aggregate unaggregate attestations into the clique aggregates // if compatible @@ -299,7 +298,7 @@ impl OperationPool { }) }) { *num_valid += 1; - for mut clique_aggregate in &mut clique_aggregates { + for clique_aggregate in &mut clique_aggregates { if !clique_aggregate .attesting_indices .contains(&attestation.attesting_indices[0]) @@ -310,7 +309,8 @@ impl OperationPool { } } - cliqued_atts.extend(clique_aggregates.map(|indexed| (data, indexed))); + cliqued_atts + .extend(clique_aggregates.into_iter().map(|indexed| (data, indexed))); } } // include aggregated attestations from unaggregated attestations whose From 77bbfcc25010e6e2b44a5f7aed0301bfb58c035f Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 6 Oct 2023 21:32:15 -0500 Subject: [PATCH 14/37] use swap_remove --- beacon_node/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 795b025a375..f2753e759fd 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -284,7 +284,7 @@ impl OperationPool { } for index in indices_to_remove.iter().rev() { - clique_aggregates.remove(*index); + clique_aggregates.swap_remove(*index); } // aggregate unaggregate attestations into the clique aggregates From b02301ad4b7e9810e613fb25cdc1c889a9dd191b Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 6 Oct 2023 21:55:55 -0500 Subject: [PATCH 15/37] sort unstable by --- beacon_node/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index f2753e759fd..e4600d8671a 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -270,7 +270,7 @@ impl OperationPool { .collect(); let mut indices_to_remove = Vec::new(); clique_aggregates - .sort_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); + .sort_unstable_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); for (index, clique) in clique_aggregates.iter().enumerate() { for bigger_clique in clique_aggregates.iter().skip(index + 1) { if clique From f77c5c5e8866bc88f462f8ce7705b28c7792bddd Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 6 Oct 2023 22:35:55 -0500 Subject: [PATCH 16/37] minor --- beacon_node/operation_pool/src/lib.rs | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e4600d8671a..39327b78e6e 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -254,18 +254,14 @@ impl OperationPool { .collect(); *num_valid += aggregates.len() as i64; - // derive cliques for current attestation data - let cliques = bron_kerbosch(&aggregates, is_compatible); - // aggregate each cliques corresponding attestaiions - let mut clique_aggregates: Vec> = cliques + let mut clique_aggregates: Vec> = bron_kerbosch(&aggregates, is_compatible) .iter() .map(|clique| { - let mut res_att = aggregates[clique[0]].clone(); - for ind in clique.iter().skip(1) { - res_att.aggregate(&aggregates[*ind]); - } - res_att + clique.iter().skip(1).fold(aggregates[clique[0]].clone(), |mut acc, &ind| { + acc.aggregate(&aggregates[ind]); + acc + }) }) .collect(); let mut indices_to_remove = Vec::new(); From d2bd6afe7c3d5457cef2fdbbfde4764268f997d5 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 13 Oct 2023 13:55:44 -0500 Subject: [PATCH 17/37] bron_kerbosch done in parallel iter without changing the validity closure type --- beacon_node/operation_pool/src/lib.rs | 223 ++++++++++++++------------ 1 file changed, 119 insertions(+), 104 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 39327b78e6e..e4d0f158fc6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -44,6 +44,7 @@ use types::{ Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; +use rayon::prelude::*; type SyncContributions = RwLock>>>; @@ -230,112 +231,120 @@ impl OperationPool { mut validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, num_valid: &mut i64, spec: &'a ChainSpec, - ) -> Vec<(&CompactAttestationData, CompactIndexedAttestation)> { - let mut cliqued_atts: Vec<(&CompactAttestationData, CompactIndexedAttestation)> = vec![]; - + ) -> Vec<(&CompactAttestationData, Vec>)> { if let Some(AttestationDataMap { - aggregate_attestations, - unaggregate_attestations, - }) = all_attestations.get_attestation_map(checkpoint_key) + aggregate_attestations, + unaggregate_attestations, + }) = all_attestations.get_attestation_map(checkpoint_key) { - for (data, aggregates) in aggregate_attestations { - if data.slot + spec.min_attestation_inclusion_delay <= state.slot() + let mut cliques_from_aggregates: Vec<_> = aggregate_attestations.into_iter().filter(|(data, _)| { + data.slot + spec.min_attestation_inclusion_delay <= state.slot() && state.slot() <= data.slot + T::slots_per_epoch() - { - let aggregates: Vec<&CompactIndexedAttestation> = aggregates - .iter() - .filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, - }) - }) - .collect(); - *num_valid += aggregates.len() as i64; - - // aggregate each cliques corresponding attestaiions - let mut clique_aggregates: Vec> = bron_kerbosch(&aggregates, is_compatible) - .iter() - .map(|clique| { - clique.iter().skip(1).fold(aggregates[clique[0]].clone(), |mut acc, &ind| { - acc.aggregate(&aggregates[ind]); - acc - }) + }) + .map(|(data, aggregates)| { + let aggregates: Vec<&CompactIndexedAttestation> = aggregates + .iter() + .filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed, }) - .collect(); - let mut indices_to_remove = Vec::new(); - clique_aggregates - .sort_unstable_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); - for (index, clique) in clique_aggregates.iter().enumerate() { - for bigger_clique in clique_aggregates.iter().skip(index + 1) { - if clique - .aggregation_bits + }) + .collect(); + *num_valid += aggregates.len() as i64; + (data, aggregates) + }) + .collect::>)>>() + .into_par_iter() + .map(|(data, aggregates)| { + // aggregate each cliques corresponding attestaiions + let mut clique_aggregates: Vec> = bron_kerbosch(&aggregates, is_compatible) + .iter() + .map(|clique| { + clique.iter().skip(1).fold(aggregates[clique[0]].clone(), |mut acc, &ind| { + acc.aggregate(&aggregates[ind]); + acc + }) + }) + .collect(); + let mut indices_to_remove = Vec::new(); + clique_aggregates + .sort_unstable_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); + for (index, clique) in clique_aggregates.iter().enumerate() { + for bigger_clique in clique_aggregates.iter().skip(index + 1) { + if clique + .aggregation_bits .is_subset(&bigger_clique.aggregation_bits) - { - indices_to_remove.push(index); - break; - } - } - } - - for index in indices_to_remove.iter().rev() { - clique_aggregates.swap_remove(*index); + { + indices_to_remove.push(index); + break; + } } + } - // aggregate unaggregate attestations into the clique aggregates - // if compatible - if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { - for attestation in unaggregate_attestations.iter().filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, - }) - }) { - *num_valid += 1; - for clique_aggregate in &mut clique_aggregates { - if !clique_aggregate - .attesting_indices + for index in indices_to_remove.iter().rev() { + clique_aggregates.swap_remove(*index); + } + (data, clique_aggregates) + }) + .collect::>)>>() + .into_iter() + .map(|(data, mut clique_aggregates)| { + // aggregate unaggregate attestations into the clique aggregates + // if compatible + if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { + for attestation in unaggregate_attestations.iter().filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed, + }) + }) { + *num_valid += 1; + for clique_aggregate in &mut clique_aggregates { + if !clique_aggregate + .attesting_indices .contains(&attestation.attesting_indices[0]) - { - clique_aggregate.aggregate(attestation); - } - } + { + clique_aggregate.aggregate(attestation); + } } } - - cliqued_atts - .extend(clique_aggregates.into_iter().map(|indexed| (data, indexed))); } - } + (data, clique_aggregates) + }) + .collect(); + // include aggregated attestations from unaggregated attestations whose // attestation data doesn't appear in aggregated_attestations - for (data, attestations) in unaggregate_attestations { - if data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= data.slot + T::slots_per_epoch() - { - if !aggregate_attestations.contains_key(&data) { - let mut valid_attestations = attestations.iter().filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, - }) + unaggregate_attestations + .iter() + .filter(|(data, _)| { + data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= data.slot + T::slots_per_epoch() + && !aggregate_attestations.contains_key(&data) + }) + .for_each(|(data, unaggregates)| { + let mut valid_attestations = unaggregates.iter().filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed, + }) + }); + if let Some(att) = valid_attestations.next() { + let mut att = att.clone(); + valid_attestations.for_each(|valid_att| { + att.aggregate(valid_att) }); - - if let Some(first) = valid_attestations.next() { - let mut agg = first.clone(); - for attestation in valid_attestations { - agg.aggregate(&attestation); - } - cliqued_atts.push((data, agg)); - } + cliques_from_aggregates.push((data, vec![att])) } - } - } + }); + cliques_from_aggregates + } else { + vec![] } - cliqued_atts } /// Get a list of attestations for inclusion in a block. @@ -386,14 +395,17 @@ impl OperationPool { let prev_epoch_cliqued_atts = prev_epoch_cliqued_atts .iter() - .map(|(data, indexed)| AttestationRef { - checkpoint: &prev_epoch_key, - data, - indexed, + .map(|(data, atts)| { + atts.iter().map(|indexed| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }); + .flat_map(|att_max_cover| att_max_cover); let curr_epoch_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( &curr_epoch_key, @@ -406,14 +418,17 @@ impl OperationPool { let curr_epoch_cliqued_atts = curr_epoch_cliqued_atts .iter() - .map(|(data, indexed)| AttestationRef { - checkpoint: &curr_epoch_key, - data, - indexed, + .map(|(data, atts)| { + atts.iter().map(|indexed| AttestationRef { + checkpoint: &curr_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }); + .flat_map(|att_max_cover| att_max_cover); let prev_epoch_limit = if let BeaconState::Base(base_state) = state { std::cmp::min( From 5d2c4ba35d716adf9e41f15bfbcfd26fba958335 Mon Sep 17 00:00:00 2001 From: geemo Date: Thu, 19 Oct 2023 00:32:40 -0500 Subject: [PATCH 18/37] using keyed version of max cover --- beacon_node/operation_pool/src/attestation.rs | 7 ++- .../operation_pool/src/attestation_storage.rs | 2 +- .../operation_pool/src/attester_slashing.rs | 2 + beacon_node/operation_pool/src/max_cover.rs | 63 +++++++++++++------ 4 files changed, 52 insertions(+), 22 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 97c291aa855..14edc32fe0d 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,4 +1,4 @@ -use crate::attestation_storage::AttestationRef; +use crate::attestation_storage::{AttestationRef, CompactAttestationData}; use crate::max_cover::MaxCover; use crate::reward_cache::RewardCache; use state_processing::common::{ @@ -127,6 +127,7 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { type Object = Attestation; type Intermediate = AttestationRef<'a, T>; type Set = HashMap; + type Key = CompactAttestationData; fn intermediate(&self) -> &AttestationRef<'a, T> { &self.att @@ -164,6 +165,10 @@ impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { fn score(&self) -> usize { self.fresh_validators_rewards.values().sum::() as usize } + + fn key(&self) -> CompactAttestationData { + self.att.data.clone() + } } /// Extract the validators for which `attestation` would be their earliest in the epoch. diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index e984515895a..3ab3f251a97 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -12,7 +12,7 @@ pub struct CheckpointKey { pub target_epoch: Epoch, } -#[derive(Debug, PartialEq, Eq, Hash)] +#[derive(Debug, PartialEq, Eq, Hash, Clone)] pub struct CompactAttestationData { pub slot: Slot, pub index: u64, diff --git a/beacon_node/operation_pool/src/attester_slashing.rs b/beacon_node/operation_pool/src/attester_slashing.rs index f5916384d4b..1793425d94a 100644 --- a/beacon_node/operation_pool/src/attester_slashing.rs +++ b/beacon_node/operation_pool/src/attester_slashing.rs @@ -42,6 +42,7 @@ impl<'a, T: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, T> { type Intermediate = AttesterSlashing; /// The type used to represent sets. type Set = HashMap; + type Key = (); fn intermediate(&self) -> &AttesterSlashing { self.slashing @@ -69,4 +70,5 @@ impl<'a, T: EthSpec> MaxCover for AttesterSlashingMaxCover<'a, T> { fn score(&self) -> usize { self.effective_balances.values().sum::() as usize } + fn key(&self) -> Self::Key {} } diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index 2e629f786b3..69e46976e6c 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -1,3 +1,5 @@ +use std::{collections::HashMap, hash::Hash}; + use crate::metrics; use itertools::Itertools; @@ -15,6 +17,8 @@ pub trait MaxCover: Clone { type Intermediate: Clone; /// The type used to represent sets. type Set: Clone; + /// The type used to represent keys. + type Key: Clone + PartialEq + Eq + Hash; /// Extract the intermediate object. fn intermediate(&self) -> &Self::Intermediate; @@ -28,6 +32,8 @@ pub trait MaxCover: Clone { fn update_covering_set(&mut self, max_obj: &Self::Intermediate, max_set: &Self::Set); /// The quality of this item's covering set, usually its cardinality. fn score(&self) -> usize; + /// Convert value to keyed value. + fn key(&self) -> Self::Key; } /// Helper struct to track which items of the input are still available for inclusion. @@ -56,11 +62,15 @@ where T: MaxCover, { // Construct an initial vec of all items, marked available. - let mut all_items: Vec<_> = items_iter + let mut all_items: HashMap>> = items_iter .into_iter() .map(MaxCoverItem::new) .filter(|x| x.item.score() != 0) - .collect(); + .map(|val| (val.item.key(), val)) + .fold(HashMap::new(), |mut acc, (key, val)| { + acc.entry(key).or_insert(Vec::new()).push(val); + acc + }); metrics::set_int_gauge( &metrics::MAX_COVER_NON_ZERO_ITEMS, @@ -72,29 +82,39 @@ where for _ in 0..limit { // Select the item with the maximum score. - let best = match all_items + match all_items .iter_mut() - .filter(|x| x.available && x.item.score() != 0) - .max_by_key(|x| x.item.score()) + .max_by_key(|(_, vals)| { + vals.iter() + .filter(|val| val.available && val.item.score() != 0) + .map(|val| val.item.score()).max() + }) { - Some(x) => { - x.available = false; - x.item.clone() + Some((_, vals)) => { + let best = vals.iter().enumerate().max_by_key(|(_, val)| val.item.score()).map(|(ind, _)| ind); + if let Some(best_ind) = best { + let best_item = vals[best_ind].item.clone(); + if best_item.score() == 0 { + return result; + } else { + // Update the covering sets of the other items, for the inclusion of the selected item. + // Items covered by the selected item can't be re-covered. + vals[best_ind].available = false; + vals + .iter_mut() + .filter(|x| x.available && x.item.score() != 0) + .for_each(|x| { + x.item + .update_covering_set(best_item.intermediate(), best_item.covering_set()) + }); + result.push(best_item) + } + } else { + return result; + } } None => return result, }; - - // Update the covering sets of the other items, for the inclusion of the selected item. - // Items covered by the selected item can't be re-covered. - all_items - .iter_mut() - .filter(|x| x.available && x.item.score() != 0) - .for_each(|x| { - x.item - .update_covering_set(best.intermediate(), best.covering_set()) - }); - - result.push(best); } result @@ -128,6 +148,7 @@ mod test { type Object = Self; type Intermediate = Self; type Set = Self; + type Key = (); fn intermediate(&self) -> &Self { self @@ -149,6 +170,8 @@ mod test { fn score(&self) -> usize { self.len() } + + fn key(&self) -> () {} } fn example_system() -> Vec> { From f7404d47ba98b55e2316fb16153cf98b50d227bd Mon Sep 17 00:00:00 2001 From: geemo Date: Thu, 19 Oct 2023 01:34:16 -0500 Subject: [PATCH 19/37] fmt --- .../operation_pool/src/attestation_storage.rs | 5 +- beacon_node/operation_pool/src/lib.rs | 195 +++++++++--------- beacon_node/operation_pool/src/max_cover.rs | 29 +-- 3 files changed, 118 insertions(+), 111 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 3ab3f251a97..efcd6d37f57 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -153,10 +153,7 @@ impl AttestationMap { indexed, } = SplitAttestation::new(attestation, attesting_indices); - let attestation_map = self - .checkpoint_map - .entry(checkpoint) - .or_default(); + let attestation_map = self.checkpoint_map.entry(checkpoint).or_default(); let attestations = if indexed.attesting_indices.len() > 1 { attestation_map .aggregate_attestations diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e4d0f158fc6..1d3b48edef6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -30,6 +30,7 @@ use max_cover::maximum_cover; use parking_lot::{RwLock, RwLockWriteGuard}; use rand::seq::SliceRandom; use rand::thread_rng; +use rayon::prelude::*; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_exit, VerifySignatures, @@ -44,7 +45,6 @@ use types::{ Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, }; -use rayon::prelude::*; type SyncContributions = RwLock>>>; @@ -233,88 +233,95 @@ impl OperationPool { spec: &'a ChainSpec, ) -> Vec<(&CompactAttestationData, Vec>)> { if let Some(AttestationDataMap { - aggregate_attestations, - unaggregate_attestations, - }) = all_attestations.get_attestation_map(checkpoint_key) + aggregate_attestations, + unaggregate_attestations, + }) = all_attestations.get_attestation_map(checkpoint_key) { - let mut cliques_from_aggregates: Vec<_> = aggregate_attestations.into_iter().filter(|(data, _)| { - data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= data.slot + T::slots_per_epoch() - }) - .map(|(data, aggregates)| { - let aggregates: Vec<&CompactIndexedAttestation> = aggregates - .iter() - .filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, + let mut cliques_from_aggregates: Vec<_> = aggregate_attestations + .into_iter() + .filter(|(data, _)| { + data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= data.slot + T::slots_per_epoch() + }) + .map(|(data, aggregates)| { + let aggregates: Vec<&CompactIndexedAttestation> = aggregates + .iter() + .filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed, + }) }) - }) - .collect(); - *num_valid += aggregates.len() as i64; - (data, aggregates) - }) - .collect::>)>>() - .into_par_iter() - .map(|(data, aggregates)| { - // aggregate each cliques corresponding attestaiions - let mut clique_aggregates: Vec> = bron_kerbosch(&aggregates, is_compatible) - .iter() - .map(|clique| { - clique.iter().skip(1).fold(aggregates[clique[0]].clone(), |mut acc, &ind| { - acc.aggregate(&aggregates[ind]); - acc - }) - }) - .collect(); - let mut indices_to_remove = Vec::new(); - clique_aggregates - .sort_unstable_by(|a, b| a.attesting_indices.len().cmp(&b.attesting_indices.len())); - for (index, clique) in clique_aggregates.iter().enumerate() { - for bigger_clique in clique_aggregates.iter().skip(index + 1) { - if clique - .aggregation_bits + .collect(); + *num_valid += aggregates.len() as i64; + (data, aggregates) + }) + .collect::>)>>() + .into_par_iter() + .map(|(data, aggregates)| { + // aggregate each cliques corresponding attestaiions + let mut clique_aggregates: Vec> = + bron_kerbosch(&aggregates, is_compatible) + .iter() + .map(|clique| { + clique.iter().skip(1).fold( + aggregates[clique[0]].clone(), + |mut acc, &ind| { + acc.aggregate(&aggregates[ind]); + acc + }, + ) + }) + .collect(); + let mut indices_to_remove = Vec::new(); + clique_aggregates.sort_unstable_by(|a, b| { + a.attesting_indices.len().cmp(&b.attesting_indices.len()) + }); + for (index, clique) in clique_aggregates.iter().enumerate() { + for bigger_clique in clique_aggregates.iter().skip(index + 1) { + if clique + .aggregation_bits .is_subset(&bigger_clique.aggregation_bits) - { - indices_to_remove.push(index); - break; - } + { + indices_to_remove.push(index); + break; + } + } } - } - for index in indices_to_remove.iter().rev() { - clique_aggregates.swap_remove(*index); - } - (data, clique_aggregates) - }) - .collect::>)>>() - .into_iter() - .map(|(data, mut clique_aggregates)| { - // aggregate unaggregate attestations into the clique aggregates - // if compatible - if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { - for attestation in unaggregate_attestations.iter().filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, - }) - }) { - *num_valid += 1; - for clique_aggregate in &mut clique_aggregates { - if !clique_aggregate - .attesting_indices + for index in indices_to_remove.iter().rev() { + clique_aggregates.swap_remove(*index); + } + (data, clique_aggregates) + }) + .collect::>)>>() + .into_iter() + .map(|(data, mut clique_aggregates)| { + // aggregate unaggregate attestations into the clique aggregates + // if compatible + if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { + for attestation in unaggregate_attestations.iter().filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data: &data, + indexed, + }) + }) { + *num_valid += 1; + for clique_aggregate in &mut clique_aggregates { + if !clique_aggregate + .attesting_indices .contains(&attestation.attesting_indices[0]) - { - clique_aggregate.aggregate(attestation); - } + { + clique_aggregate.aggregate(attestation); + } + } } } - } - (data, clique_aggregates) - }) - .collect(); + (data, clique_aggregates) + }) + .collect(); // include aggregated attestations from unaggregated attestations whose // attestation data doesn't appear in aggregated_attestations @@ -335,9 +342,7 @@ impl OperationPool { }); if let Some(att) = valid_attestations.next() { let mut att = att.clone(); - valid_attestations.for_each(|valid_att| { - att.aggregate(valid_att) - }); + valid_attestations.for_each(|valid_att| att.aggregate(valid_att)); cliques_from_aggregates.push((data, vec![att])) } }); @@ -396,14 +401,15 @@ impl OperationPool { let prev_epoch_cliqued_atts = prev_epoch_cliqued_atts .iter() .map(|(data, atts)| { - atts.iter().map(|indexed| AttestationRef { - checkpoint: &prev_epoch_key, - data, - indexed, - }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) + atts.iter() + .map(|indexed| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) }) .flat_map(|att_max_cover| att_max_cover); @@ -419,14 +425,15 @@ impl OperationPool { let curr_epoch_cliqued_atts = curr_epoch_cliqued_atts .iter() .map(|(data, atts)| { - atts.iter().map(|indexed| AttestationRef { - checkpoint: &curr_epoch_key, - data, - indexed, - }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) + atts.iter() + .map(|indexed| AttestationRef { + checkpoint: &curr_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) }) .flat_map(|att_max_cover| att_max_cover); diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index 69e46976e6c..f958bd0ca8e 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -82,16 +82,18 @@ where for _ in 0..limit { // Select the item with the maximum score. - match all_items - .iter_mut() - .max_by_key(|(_, vals)| { - vals.iter() - .filter(|val| val.available && val.item.score() != 0) - .map(|val| val.item.score()).max() - }) - { + match all_items.iter_mut().max_by_key(|(_, vals)| { + vals.iter() + .filter(|val| val.available && val.item.score() != 0) + .map(|val| val.item.score()) + .max() + }) { Some((_, vals)) => { - let best = vals.iter().enumerate().max_by_key(|(_, val)| val.item.score()).map(|(ind, _)| ind); + let best = vals + .iter() + .enumerate() + .max_by_key(|(_, val)| val.item.score()) + .map(|(ind, _)| ind); if let Some(best_ind) = best { let best_item = vals[best_ind].item.clone(); if best_item.score() == 0 { @@ -100,12 +102,13 @@ where // Update the covering sets of the other items, for the inclusion of the selected item. // Items covered by the selected item can't be re-covered. vals[best_ind].available = false; - vals - .iter_mut() + vals.iter_mut() .filter(|x| x.available && x.item.score() != 0) .for_each(|x| { - x.item - .update_covering_set(best_item.intermediate(), best_item.covering_set()) + x.item.update_covering_set( + best_item.intermediate(), + best_item.covering_set(), + ) }); result.push(best_item) } From 9e781e1e6b57a05fe5242861bd8edbff93ac3b79 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 20 Oct 2023 16:56:23 +1100 Subject: [PATCH 20/37] Remove allocations from `find_pivot` --- beacon_node/operation_pool/src/bron_kerbosch.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 6467ac9d004..29f992c3f4f 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -98,7 +98,6 @@ fn bron_kerbosch_aux( let pivot = find_pivot(&p, &x, neighbourhoods); ip.retain(|e| !neighbourhoods[pivot].contains(e)); - // while !mp.is_empty() { while !ip.is_empty() { // v let v = ip[0]; @@ -129,12 +128,10 @@ fn bron_kerbosch_aux( /// Identifies pivot for Bron-Kerbosh pivoting technique. fn find_pivot(p: &[usize], x: &[usize], neighbourhoods: &[Vec]) -> usize { - let mut px = p.to_vec(); - px.append(&mut x.to_vec()); - *px.iter() + *p.iter() + .chain(x.iter()) .min_by_key(|&e| { - let pp = p.to_vec(); - pp.iter() + p.iter() .filter(|ee| neighbourhoods[*e].contains(ee)) .count() }) From 06aed57e08f41ab039125a99659aa54439119ae1 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 20 Oct 2023 17:19:15 +1100 Subject: [PATCH 21/37] Fix bug in max cover and optimise --- beacon_node/operation_pool/src/max_cover.rs | 57 +++++++++------------ 1 file changed, 24 insertions(+), 33 deletions(-) diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index f958bd0ca8e..293fffdf594 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -82,42 +82,33 @@ where for _ in 0..limit { // Select the item with the maximum score. - match all_items.iter_mut().max_by_key(|(_, vals)| { - vals.iter() - .filter(|val| val.available && val.item.score() != 0) - .map(|val| val.item.score()) - .max() - }) { - Some((_, vals)) => { - let best = vals - .iter() - .enumerate() - .max_by_key(|(_, val)| val.item.score()) - .map(|(ind, _)| ind); - if let Some(best_ind) = best { - let best_item = vals[best_ind].item.clone(); - if best_item.score() == 0 { - return result; - } else { - // Update the covering sets of the other items, for the inclusion of the selected item. - // Items covered by the selected item can't be re-covered. - vals[best_ind].available = false; - vals.iter_mut() - .filter(|x| x.available && x.item.score() != 0) - .for_each(|x| { - x.item.update_covering_set( - best_item.intermediate(), - best_item.covering_set(), - ) - }); - result.push(best_item) - } - } else { - return result; - } + let best = all_items + .iter_mut() + .flat_map(|(key, items)| items.iter_mut().map(move |x| (key, x))) + .filter(|(_, val)| val.available && val.item.score() != 0) + .max_by_key(|(_, val)| val.item.score()); + + let (best_key, best_item) = match best { + Some((key, val)) => { + val.available = false; + (key.clone(), val.item.clone()) } None => return result, }; + + // Update the covering sets of the other items, for the inclusion of the selected item. + // Items covered by the selected item can't be re-covered. + all_items.get_mut(&best_key).map(|items| { + items + .iter_mut() + .filter(|x| x.available && x.item.score() != 0) + .for_each(|x| { + x.item + .update_covering_set(best_item.intermediate(), best_item.covering_set()) + }); + }); + + result.push(best_item); } result From b23449f13474774b20c75251a3aa4e05e1749e48 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 20 Oct 2023 17:27:06 -0500 Subject: [PATCH 22/37] add quickcheck tests --- Cargo.lock | 1 + beacon_node/operation_pool/Cargo.toml | 1 + .../operation_pool/src/bron_kerbosch.rs | 61 +++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 70b77406432..9d3c21d9427 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5384,6 +5384,7 @@ dependencies = [ "lighthouse_metrics", "maplit", "parking_lot 0.12.1", + "quickcheck", "rand", "rayon", "serde", diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index dd8cd9c49ed..1ea5f0efecc 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -24,3 +24,4 @@ rand = { workspace = true } beacon_chain = { workspace = true } tokio = { workspace = true } maplit = { workspace = true } +quickcheck = { workspace = true } diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 29f992c3f4f..3559498220a 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -141,6 +141,8 @@ fn find_pivot(p: &[usize], x: &[usize], neighbourhoods: &[Vec]) -> usize #[cfg(test)] mod tests { use super::*; + use quickcheck::quickcheck; + use std::collections::HashSet; #[test] fn bron_kerbosch_small_test() { let vertices: Vec = (0..7).collect(); @@ -165,4 +167,63 @@ mod tests { println!("{:?}", bron_kerbosch(&vertices, is_compatible)); } + + quickcheck! { + fn no_panic(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { + let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); + bron_kerbosch(&vertices, is_compatible); + true + } + + fn at_least_one_clique_returned(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { + if vertices.len() == 0 { + return true; + } + let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); + let res = bron_kerbosch(&vertices, is_compatible); + res.len() >= 1 + } + + fn all_claimed_cliques_are_cliques(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { + let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); + let claimed_cliques = bron_kerbosch(&vertices, is_compatible); + for clique in claimed_cliques { + for (ind1, vertex) in clique.iter().enumerate() { + for (ind2, other_vertex) in clique.iter().enumerate() { + if ind1 == ind2 { + continue + } + if !is_compatible(vertex, other_vertex) { + return false; + } + } + } + } + true + } + + fn no_clique_is_a_subset_of_other_clique(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { + let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); + let claimed_cliques = bron_kerbosch(&vertices, is_compatible); + let is_subset = |set1: Vec, set2: Vec| -> bool { + for vertex in set1 { + if !set2.contains(&vertex) { + return false; + } + } + true + }; + for (ind1, clique) in claimed_cliques.iter().enumerate() { + for (ind2, other_clique) in claimed_cliques.iter().enumerate() { + if ind1 == ind2 { + continue; + } + if is_subset(clique.clone(), other_clique.clone()) { + return false; + } + } + } + true + } + } } From 01d2a132ff5babbacd610078deaddf304ce6bef1 Mon Sep 17 00:00:00 2001 From: geemo Date: Sat, 21 Oct 2023 22:28:32 -0500 Subject: [PATCH 23/37] corrected test --- beacon_node/operation_pool/src/bron_kerbosch.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 3559498220a..81633c7c761 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -187,13 +187,14 @@ mod tests { fn all_claimed_cliques_are_cliques(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); let claimed_cliques = bron_kerbosch(&vertices, is_compatible); + println!("{:?}", claimed_cliques); for clique in claimed_cliques { for (ind1, vertex) in clique.iter().enumerate() { for (ind2, other_vertex) in clique.iter().enumerate() { if ind1 == ind2 { continue } - if !is_compatible(vertex, other_vertex) { + if !is_compatible(&vertices[*vertex], &vertices[*other_vertex]) { return false; } } From a4fa4cc114e00c9869458ba7e96761fb5f7f91ca Mon Sep 17 00:00:00 2001 From: geemo Date: Wed, 25 Oct 2023 15:06:18 -0500 Subject: [PATCH 24/37] added test and fixed other stuff. Test is failing --- .../operation_pool/src/attestation_storage.rs | 17 +-- beacon_node/operation_pool/src/lib.rs | 109 +++++++++++++++++- 2 files changed, 112 insertions(+), 14 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index efcd6d37f57..fa01fd047fb 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -244,28 +244,23 @@ impl AttestationDataMap { pub fn stats(&self) -> AttestationStats { let mut stats = AttestationStats::default(); - let mut data_to_num_attestations: HashMap<&CompactAttestationData, usize> = HashMap::new(); + let mut max_aggregates_per_data = 0; - for (data, aggregates) in self.aggregate_attestations.iter() { + for (_, aggregates) in self.aggregate_attestations.iter() { stats.num_attestations += aggregates.len(); stats.num_attestation_data += 1; stats.max_aggregates_per_data = std::cmp::max(stats.max_aggregates_per_data, aggregates.len()); - data_to_num_attestations.insert(data, aggregates.len()); + if aggregates.len() > max_aggregates_per_data { + max_aggregates_per_data = aggregates.len(); + } } for (data, unaggregates) in self.unaggregate_attestations.iter() { stats.num_attestations += unaggregates.len(); - if let Some(aggregates_num) = data_to_num_attestations.get(data) { - stats.max_aggregates_per_data = std::cmp::max( - stats.max_aggregates_per_data, - aggregates_num + unaggregates.len(), - ); - } else { + if !self.aggregate_attestations.contains_key(data) { stats.num_attestation_data += 1; - stats.max_aggregates_per_data = - std::cmp::max(stats.max_aggregates_per_data, unaggregates.len()); } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 1d3b48edef6..963414af756 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -858,9 +858,7 @@ fn is_compatible( x: &&CompactIndexedAttestation, y: &&CompactIndexedAttestation, ) -> bool { - let x_attester_set: HashSet = x.attesting_indices.iter().cloned().collect(); - let y_attester_set: HashSet = y.attesting_indices.iter().cloned().collect(); - x_attester_set.is_disjoint(&y_attester_set) + x.signers_disjoint_from(y) } /// Filter up to a maximum number of operations out of an iterator. @@ -929,6 +927,7 @@ mod release_tests { }; use lazy_static::lazy_static; use maplit::hashset; + use state_processing::state_advance::complete_state_advance; use state_processing::{common::get_attesting_indices_from_state, VerifyOperation}; use std::collections::BTreeSet; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; @@ -1194,6 +1193,110 @@ mod release_tests { assert_eq!(op_pool.num_attestations(), committees.len()); } + /// Adding lots of attestations that only intersect pairwise should lead to two aggregate + /// attestations. + #[test] + fn attestation_pairwise_overlapping() { + let (harness, ref spec) = attestation_test_state::(1); + + let mut state = harness.get_current_state(); + + let op_pool = OperationPool::::new(); + + let slot = state.slot(); + let committees = state + .get_beacon_committees_at_slot(slot) + .unwrap() + .into_iter() + .map(BeaconCommittee::into_owned) + .collect::>(); + + let num_validators = 16 as usize * spec.target_committee_size; + + let attestations = harness.make_attestations( + (0..num_validators).collect::>().as_slice(), + &state, + Hash256::zero(), + SignedBeaconBlockHash::from(Hash256::zero()), + slot, + ); + + let step_size = 2; + // Create attestations that overlap on `step_size` validators, like: + // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... + for (atts1, _) in attestations { + let atts2 = atts1.clone(); + let aggs1 = atts1 + .chunks_exact(step_size * 2) + .map(|chunk| { + let agg = chunk.into_iter().map(|(att, _)| att).fold::, + >, _>( + None, + |att, new_att| { + if let Some(mut a) = att { + a.aggregate(new_att); + Some(a) + } else { + Some(new_att.clone()) + } + }, + ); + agg.unwrap() + }) + .collect::>(); + let aggs2 = atts2 + .into_iter() + .skip(step_size) + .collect::>() + .as_slice() + .chunks_exact(step_size * 2) + .map(|chunk| { + let agg = chunk.into_iter().map(|(att, _)| att).fold::, + >, _>( + None, + |att, new_att| { + if let Some(mut a) = att { + a.aggregate(new_att); + Some(a) + } else { + Some(new_att.clone()) + } + }, + ); + agg.unwrap() + }) + .collect::>(); + + for att in aggs1.into_iter().chain(aggs2.into_iter()) { + let attesting_indices = get_attesting_indices_from_state(&state, &att).unwrap(); + op_pool.insert_attestation(att, attesting_indices).unwrap(); + } + } + + let mut num_valid = 0; + let (prev, curr) = CheckpointKey::keys_for_state(&state); + let all_attestations = op_pool.attestations.read(); + + complete_state_advance(&mut state, None, Slot::new(1), spec).unwrap(); + let aggregated_attestations = op_pool.get_clique_aggregate_attestations_for_epoch( + &curr, + &all_attestations, + &state, + |_| true, + &mut num_valid, + spec + ); + + // The attestations should get aggregated into two attestations that comprise all + // validators. + let stats = op_pool.attestation_stats(); + assert_eq!(stats.num_attestation_data, committees.len()); + // assert_eq!(stats.num_attestations, 2 * committees.len()); + assert_eq!(aggregated_attestations[0].1.len(), 2); + } + /// Create a bunch of attestations signed by a small number of validators, and another /// bunch signed by a larger number, such that there are at least `max_attestations` /// signed by the larger number. Then, check that `get_attestations` only returns the From 2dba1275cd912bed8cb9ff95d085a2e48c6275ff Mon Sep 17 00:00:00 2001 From: geemo Date: Sun, 29 Oct 2023 02:45:06 -0500 Subject: [PATCH 25/37] HashTrieSet --- Cargo.lock | 19 +++++ Cargo.toml | 1 + beacon_node/operation_pool/Cargo.toml | 1 + .../operation_pool/src/bron_kerbosch.rs | 70 +++++++++---------- beacon_node/operation_pool/src/lib.rs | 2 +- 5 files changed, 57 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d3c21d9427..da72ccbb5df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -215,6 +215,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "archery" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab7d8a6d00b222909638a01ddcc8c533219e9d5bfada1613afae43481f2fc699" +dependencies = [ + "static_assertions", +] + [[package]] name = "arrayref" version = "0.3.7" @@ -5387,6 +5396,7 @@ dependencies = [ "quickcheck", "rand", "rayon", + "rpds", "serde", "state_processing", "store", @@ -6421,6 +6431,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "rpds" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99334e9410cf4d16241bb88b27bc282e140327a4c4759be76f8a96e6d0cd0f35" +dependencies = [ + "archery", +] + [[package]] name = "rtnetlink" version = "0.10.1" diff --git a/Cargo.toml b/Cargo.toml index 9adb913ff5e..7a29e197c50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,6 +139,7 @@ rayon = "1.7" regex = "1" reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.16" +rpds = "1.0.1" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } serde_json = "1" diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 1ea5f0efecc..2932dfafd57 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -19,6 +19,7 @@ serde = { workspace = true } store = { workspace = true } bitvec = { workspace = true } rand = { workspace = true } +rpds = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 81633c7c761..217a9e025fd 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -1,12 +1,13 @@ +use rpds::HashTrieSet; /// Entry point for the Bron-Kerbosh algorithm. Takes a vector of `vertices` of type /// `T : Compatible`. Returns all the maximal cliques (as a matrix of indices) for the graph /// `G = (V,E)` where `V` is `vertices` and `E` encodes the `is_compatible` relationship. pub fn bron_kerbosch bool>( vertices: &Vec, is_compatible: F, -) -> Vec> { +) -> Vec> { // create empty vector to store cliques - let mut cliques: Vec> = vec![]; + let mut cliques: Vec> = vec![]; if vertices.len() > 0 { // build neighbourhoods and degeneracy ordering, also move to index-based reasoning @@ -21,7 +22,7 @@ pub fn bron_kerbosch bool>( .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) .map(|j| ordering[j]) .collect(); - let r = vec![vi]; + let r = HashTrieSet::default().insert(vi); let x = (0..i) .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) .map(|j| ordering[j]) @@ -80,54 +81,40 @@ fn degeneracy_order(num_vertices: usize, neighbourhoods: &[Vec]) -> Vec( - r: Vec, - p: Vec, - x: Vec, + r: HashTrieSet, + mut p: HashTrieSet, + mut x: HashTrieSet, neighbourhoods: &Vec>, publish_clique: &mut F, ) where - F: FnMut(Vec), + F: FnMut(HashTrieSet), { if p.is_empty() && x.is_empty() { publish_clique(r); return; } - // modified p (p \ neighbourhood(pivot)), modified x - let (mut ip, mut mp, mut mx) = (p.clone(), p.clone(), x.clone()); let pivot = find_pivot(&p, &x, neighbourhoods); - ip.retain(|e| !neighbourhoods[pivot].contains(e)); - - while !ip.is_empty() { - // v - let v = ip[0]; - - let n = &neighbourhoods[v]; - - let (mut nr, mut np, mut nx) = (r.clone(), mp.clone(), mx.clone()); + let pivot_neighbours: HashTrieSet = neighbourhoods[pivot].iter().cloned().collect(); + + let ip = p.iter().cloned().filter(|e| !pivot_neighbours.contains(e)).collect::>(); - // r U { v } - nr.push(v); + for v in ip.iter() { + let n_set: HashTrieSet = neighbourhoods[*v].iter().cloned().collect(); - // p intersect neighbourhood { v } - np.retain(|e| n.contains(e)); + let nr = r.insert(*v); + let np = p.iter().cloned().filter(|e| n_set.contains(e)).collect::>(); + let nx = x.iter().cloned().filter(|e| n_set.contains(e)).collect::>(); - // x intersect neighbourhood { v } - nx.retain(|e| n.contains(e)); - - // recursive call bron_kerbosch_aux(nr, np, nx, neighbourhoods, publish_clique); - // p \ { v }, x U { v } - mp.remove(mp.iter().position(|x| *x == v).unwrap()); - ip = mp.clone(); - ip.retain(|e| !neighbourhoods[pivot].contains(e)); - mx.push(v); + p = p.remove(v); + x = x.insert(*v); } } /// Identifies pivot for Bron-Kerbosh pivoting technique. -fn find_pivot(p: &[usize], x: &[usize], neighbourhoods: &[Vec]) -> usize { +fn find_pivot(p: &HashTrieSet, x: &HashTrieSet, neighbourhoods: &[Vec]) -> usize { *p.iter() .chain(x.iter()) .min_by_key(|&e| { @@ -184,10 +171,23 @@ mod tests { res.len() >= 1 } + fn no_clique_is_empty(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { + if vertices.len() == 0 { + return true; + } + let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); + let res = bron_kerbosch(&vertices, is_compatible); + for clique in res.iter() { + if clique.is_empty() { + return false; + } + } + true + } + fn all_claimed_cliques_are_cliques(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); let claimed_cliques = bron_kerbosch(&vertices, is_compatible); - println!("{:?}", claimed_cliques); for clique in claimed_cliques { for (ind1, vertex) in clique.iter().enumerate() { for (ind2, other_vertex) in clique.iter().enumerate() { @@ -206,8 +206,8 @@ mod tests { fn no_clique_is_a_subset_of_other_clique(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); let claimed_cliques = bron_kerbosch(&vertices, is_compatible); - let is_subset = |set1: Vec, set2: Vec| -> bool { - for vertex in set1 { + let is_subset = |set1: HashTrieSet, set2: HashTrieSet| -> bool { + for vertex in set1.iter() { if !set2.contains(&vertex) { return false; } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 963414af756..f6a1bf96fbd 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -266,7 +266,7 @@ impl OperationPool { .iter() .map(|clique| { clique.iter().skip(1).fold( - aggregates[clique[0]].clone(), + aggregates[*clique.iter().next().unwrap()].clone(), |mut acc, &ind| { acc.aggregate(&aggregates[ind]); acc From b93979a1c755f3a4cb2e1ba50f4132af6a8fc91d Mon Sep 17 00:00:00 2001 From: geemo Date: Mon, 30 Oct 2023 12:20:39 -0500 Subject: [PATCH 26/37] Better use of HashTrieSet --- .../operation_pool/src/bron_kerbosch.rs | 19 ++++++++++++++++--- beacon_node/operation_pool/src/lib.rs | 2 +- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 217a9e025fd..c648a7e1e39 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -97,14 +97,14 @@ fn bron_kerbosch_aux( let pivot = find_pivot(&p, &x, neighbourhoods); let pivot_neighbours: HashTrieSet = neighbourhoods[pivot].iter().cloned().collect(); - let ip = p.iter().cloned().filter(|e| !pivot_neighbours.contains(e)).collect::>(); + let ip = hash_set_filter(&p, |e| pivot_neighbours.contains(e)); for v in ip.iter() { let n_set: HashTrieSet = neighbourhoods[*v].iter().cloned().collect(); let nr = r.insert(*v); - let np = p.iter().cloned().filter(|e| n_set.contains(e)).collect::>(); - let nx = x.iter().cloned().filter(|e| n_set.contains(e)).collect::>(); + let np = hash_set_filter(&p, |e| !n_set.contains(e)); + let nx = hash_set_filter(&x, |e| !n_set.contains(e)); bron_kerbosch_aux(nr, np, nx, neighbourhoods, publish_clique); @@ -125,6 +125,19 @@ fn find_pivot(p: &HashTrieSet, x: &HashTrieSet, neighbourhoods: &[ .unwrap() } +fn hash_set_filter

(set: &HashTrieSet, predicate: P) -> HashTrieSet +where + P: Fn(&usize) -> bool +{ + let mut new_set = set.clone(); + for e in set.iter() { + if predicate(e) { + new_set.remove_mut(e); + } + } + new_set +} + #[cfg(test)] mod tests { use super::*; diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index f6a1bf96fbd..5be7d5b918e 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1211,7 +1211,7 @@ mod release_tests { .map(BeaconCommittee::into_owned) .collect::>(); - let num_validators = 16 as usize * spec.target_committee_size; + let num_validators = 32 as usize * spec.target_committee_size; let attestations = harness.make_attestations( (0..num_validators).collect::>().as_slice(), From e6f87efd3fd9203c290cd468a2f15efb3194b6b5 Mon Sep 17 00:00:00 2001 From: geemo Date: Mon, 30 Oct 2023 13:12:01 -0500 Subject: [PATCH 27/37] fmt --- beacon_node/operation_pool/src/bron_kerbosch.rs | 12 ++++++++---- beacon_node/operation_pool/src/lib.rs | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index c648a7e1e39..27ef537fbd5 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -96,7 +96,7 @@ fn bron_kerbosch_aux( let pivot = find_pivot(&p, &x, neighbourhoods); let pivot_neighbours: HashTrieSet = neighbourhoods[pivot].iter().cloned().collect(); - + let ip = hash_set_filter(&p, |e| pivot_neighbours.contains(e)); for v in ip.iter() { @@ -114,7 +114,11 @@ fn bron_kerbosch_aux( } /// Identifies pivot for Bron-Kerbosh pivoting technique. -fn find_pivot(p: &HashTrieSet, x: &HashTrieSet, neighbourhoods: &[Vec]) -> usize { +fn find_pivot( + p: &HashTrieSet, + x: &HashTrieSet, + neighbourhoods: &[Vec], +) -> usize { *p.iter() .chain(x.iter()) .min_by_key(|&e| { @@ -125,9 +129,9 @@ fn find_pivot(p: &HashTrieSet, x: &HashTrieSet, neighbourhoods: &[ .unwrap() } -fn hash_set_filter

(set: &HashTrieSet, predicate: P) -> HashTrieSet +fn hash_set_filter

(set: &HashTrieSet, predicate: P) -> HashTrieSet where - P: Fn(&usize) -> bool + P: Fn(&usize) -> bool, { let mut new_set = set.clone(); for e in set.iter() { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 5be7d5b918e..55121c7700f 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1211,7 +1211,7 @@ mod release_tests { .map(BeaconCommittee::into_owned) .collect::>(); - let num_validators = 32 as usize * spec.target_committee_size; + let num_validators = 16 as usize * spec.target_committee_size; let attestations = harness.make_attestations( (0..num_validators).collect::>().as_slice(), @@ -1286,7 +1286,7 @@ mod release_tests { &state, |_| true, &mut num_valid, - spec + spec, ); // The attestations should get aggregated into two attestations that comprise all From e1a01e34180f90aacf775187016cc4e025d0ac97 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Oct 2023 13:42:11 +1100 Subject: [PATCH 28/37] Fix degeneracy order and remove more clones --- .../operation_pool/src/bron_kerbosch.rs | 40 +++++++------------ beacon_node/operation_pool/src/lib.rs | 29 ++++++++++---- 2 files changed, 37 insertions(+), 32 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 27ef537fbd5..3859482b7e6 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -11,7 +11,7 @@ pub fn bron_kerbosch bool>( if vertices.len() > 0 { // build neighbourhoods and degeneracy ordering, also move to index-based reasoning - let neighbourhoods = compute_neigbourhoods(vertices, is_compatible); + let neighbourhoods = compute_neighbourhoods(vertices, is_compatible); let ordering = degeneracy_order(vertices.len(), &neighbourhoods); let mut publish_clique = |c| cliques.push(c); @@ -39,17 +39,17 @@ pub fn bron_kerbosch bool>( /// `is_compatible(&a, &b) == true`. The function assumes that `is_compatible` is symmetric, /// and returns a symmetric matrix (`Vec>`) of indices, where each index corresponds /// to the relative vertex in `vertices`. -fn compute_neigbourhoods bool>( +fn compute_neighbourhoods bool>( vertices: &Vec, is_compatible: F, -) -> Vec> { +) -> Vec> { let mut neighbourhoods = vec![]; - neighbourhoods.resize_with(vertices.len(), Vec::new); + neighbourhoods.resize_with(vertices.len(), HashTrieSet::new); for i in 0..vertices.len() - 1 { for j in i + 1..vertices.len() { if is_compatible(&vertices[i], &vertices[j]) { - neighbourhoods[i].push(j); - neighbourhoods[j].push(i); + neighbourhoods[i].insert_mut(j); + neighbourhoods[j].insert_mut(i); } } } @@ -57,20 +57,10 @@ fn compute_neigbourhoods bool>( } /// Produces a degeneracy ordering of a set of vertices. -fn degeneracy_order(num_vertices: usize, neighbourhoods: &[Vec]) -> Vec { +fn degeneracy_order(num_vertices: usize, neighbourhoods: &[HashTrieSet]) -> Vec { let mut v: Vec = (0..num_vertices).collect(); - let mut o = vec![]; - // move vertices from v to o in order of minimum degree - while !v.is_empty() { - let m: Option = (0..v.len()).min_by_key(|i| neighbourhoods[*i].len()); - if let Some(i) = m { - o.push(v[i]); - v.remove(i); - } else { - break; - } - } - o + v.sort_unstable_by_key(|i| neighbourhoods[*i].size()); + v } /// Auxiliary function to be used in the recursive call of the Bron-Kerbosh algorithm. @@ -84,7 +74,7 @@ fn bron_kerbosch_aux( r: HashTrieSet, mut p: HashTrieSet, mut x: HashTrieSet, - neighbourhoods: &Vec>, + neighbourhoods: &Vec>, publish_clique: &mut F, ) where F: FnMut(HashTrieSet), @@ -95,12 +85,12 @@ fn bron_kerbosch_aux( } let pivot = find_pivot(&p, &x, neighbourhoods); - let pivot_neighbours: HashTrieSet = neighbourhoods[pivot].iter().cloned().collect(); + let pivot_neighbours: &HashTrieSet = &neighbourhoods[pivot]; let ip = hash_set_filter(&p, |e| pivot_neighbours.contains(e)); for v in ip.iter() { - let n_set: HashTrieSet = neighbourhoods[*v].iter().cloned().collect(); + let n_set: &HashTrieSet = &neighbourhoods[*v]; let nr = r.insert(*v); let np = hash_set_filter(&p, |e| !n_set.contains(e)); @@ -108,8 +98,8 @@ fn bron_kerbosch_aux( bron_kerbosch_aux(nr, np, nx, neighbourhoods, publish_clique); - p = p.remove(v); - x = x.insert(*v); + p.remove_mut(v); + x.insert_mut(*v); } } @@ -117,7 +107,7 @@ fn bron_kerbosch_aux( fn find_pivot( p: &HashTrieSet, x: &HashTrieSet, - neighbourhoods: &[Vec], + neighbourhoods: &[HashTrieSet], ) -> usize { *p.iter() .chain(x.iter()) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 55121c7700f..de6246b99a0 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1211,7 +1211,8 @@ mod release_tests { .map(BeaconCommittee::into_owned) .collect::>(); - let num_validators = 16 as usize * spec.target_committee_size; + // NOTE: this test is VERY sensitive to the number of attestations + let num_validators = 16 * spec.target_committee_size; let attestations = harness.make_attestations( (0..num_validators).collect::>().as_slice(), @@ -1225,6 +1226,11 @@ mod release_tests { // Create attestations that overlap on `step_size` validators, like: // {0,1,2,3}, {2,3,4,5}, {4,5,6,7}, ... for (atts1, _) in attestations { + assert_eq!( + (atts1.len() - step_size) % (2 * step_size), + 0, + "to get two aggregates we need to be able to split by 2*step_size" + ); let atts2 = atts1.clone(); let aggs1 = atts1 .chunks_exact(step_size * 2) @@ -1276,11 +1282,11 @@ mod release_tests { } let mut num_valid = 0; - let (prev, curr) = CheckpointKey::keys_for_state(&state); + let (_, curr) = CheckpointKey::keys_for_state(&state); let all_attestations = op_pool.attestations.read(); complete_state_advance(&mut state, None, Slot::new(1), spec).unwrap(); - let aggregated_attestations = op_pool.get_clique_aggregate_attestations_for_epoch( + let clique_attestations = op_pool.get_clique_aggregate_attestations_for_epoch( &curr, &all_attestations, &state, @@ -1288,13 +1294,22 @@ mod release_tests { &mut num_valid, spec, ); + let best_attestations = op_pool + .get_attestations(&state, |_| true, |_| true, spec) + .unwrap(); + + // There should only be attestations for 1 attestation data. + let stats = op_pool.attestation_stats(); + assert_eq!(committees.len(), 1); + assert_eq!(stats.num_attestation_data, 1); + + // There should be multiple cliques for this single attestation data. + assert_eq!(clique_attestations.len(), 1); + assert!(clique_attestations[0].1.len() > 1); // The attestations should get aggregated into two attestations that comprise all // validators. - let stats = op_pool.attestation_stats(); - assert_eq!(stats.num_attestation_data, committees.len()); - // assert_eq!(stats.num_attestations, 2 * committees.len()); - assert_eq!(aggregated_attestations[0].1.len(), 2); + assert_eq!(best_attestations.len(), 2); } /// Create a bunch of attestations signed by a small number of validators, and another From c53d23683ca059f5ca801622e95c43fd4d5e0810 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 31 Oct 2023 16:13:54 +1100 Subject: [PATCH 29/37] Back to vecs --- .../operation_pool/src/bron_kerbosch.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 3859482b7e6..550178b0948 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -42,14 +42,14 @@ pub fn bron_kerbosch bool>( fn compute_neighbourhoods bool>( vertices: &Vec, is_compatible: F, -) -> Vec> { +) -> Vec> { let mut neighbourhoods = vec![]; - neighbourhoods.resize_with(vertices.len(), HashTrieSet::new); + neighbourhoods.resize_with(vertices.len(), Vec::new); for i in 0..vertices.len() - 1 { for j in i + 1..vertices.len() { if is_compatible(&vertices[i], &vertices[j]) { - neighbourhoods[i].insert_mut(j); - neighbourhoods[j].insert_mut(i); + neighbourhoods[i].push(j); + neighbourhoods[j].push(i); } } } @@ -57,9 +57,9 @@ fn compute_neighbourhoods bool>( } /// Produces a degeneracy ordering of a set of vertices. -fn degeneracy_order(num_vertices: usize, neighbourhoods: &[HashTrieSet]) -> Vec { +fn degeneracy_order(num_vertices: usize, neighbourhoods: &[Vec]) -> Vec { let mut v: Vec = (0..num_vertices).collect(); - v.sort_unstable_by_key(|i| neighbourhoods[*i].size()); + v.sort_unstable_by_key(|i| neighbourhoods[*i].len()); v } @@ -74,7 +74,7 @@ fn bron_kerbosch_aux( r: HashTrieSet, mut p: HashTrieSet, mut x: HashTrieSet, - neighbourhoods: &Vec>, + neighbourhoods: &Vec>, publish_clique: &mut F, ) where F: FnMut(HashTrieSet), @@ -85,12 +85,12 @@ fn bron_kerbosch_aux( } let pivot = find_pivot(&p, &x, neighbourhoods); - let pivot_neighbours: &HashTrieSet = &neighbourhoods[pivot]; + let pivot_neighbours = &neighbourhoods[pivot]; let ip = hash_set_filter(&p, |e| pivot_neighbours.contains(e)); for v in ip.iter() { - let n_set: &HashTrieSet = &neighbourhoods[*v]; + let n_set = &neighbourhoods[*v]; let nr = r.insert(*v); let np = hash_set_filter(&p, |e| !n_set.contains(e)); @@ -107,7 +107,7 @@ fn bron_kerbosch_aux( fn find_pivot( p: &HashTrieSet, x: &HashTrieSet, - neighbourhoods: &[HashTrieSet], + neighbourhoods: &[Vec], ) -> usize { *p.iter() .chain(x.iter()) From 7a8da7324efbd517d2c8d1c30e805cc59454790f Mon Sep 17 00:00:00 2001 From: geemo Date: Wed, 1 Nov 2023 22:29:16 -0500 Subject: [PATCH 30/37] adding limit for BK vertices --- beacon_node/beacon_chain/src/beacon_chain.rs | 1 + .../operation_pool/src/bron_kerbosch.rs | 4 +-- beacon_node/operation_pool/src/lib.rs | 28 +++++++++++++++---- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index cdad16b07bf..d4f8b57add2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4739,6 +4739,7 @@ impl BeaconChain { &state, prev_attestation_filter, curr_attestation_filter, + 16, &self.spec, ) .map_err(BlockProductionError::OpPoolError)?; diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 550178b0948..3cb95a47afe 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -3,7 +3,7 @@ use rpds::HashTrieSet; /// `T : Compatible`. Returns all the maximal cliques (as a matrix of indices) for the graph /// `G = (V,E)` where `V` is `vertices` and `E` encodes the `is_compatible` relationship. pub fn bron_kerbosch bool>( - vertices: &Vec, + vertices: &[T], is_compatible: F, ) -> Vec> { // create empty vector to store cliques @@ -40,7 +40,7 @@ pub fn bron_kerbosch bool>( /// and returns a symmetric matrix (`Vec>`) of indices, where each index corresponds /// to the relative vertex in `vertices`. fn compute_neighbourhoods bool>( - vertices: &Vec, + vertices: &[T], is_compatible: F, ) -> Vec> { let mut neighbourhoods = vec![]; diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index de6246b99a0..47a0ddeba49 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -230,6 +230,7 @@ impl OperationPool { state: &'a BeaconState, mut validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, num_valid: &mut i64, + max_vertices: usize, spec: &'a ChainSpec, ) -> Vec<(&CompactAttestationData, Vec>)> { if let Some(AttestationDataMap { @@ -259,7 +260,18 @@ impl OperationPool { }) .collect::>)>>() .into_par_iter() - .map(|(data, aggregates)| { + .map(|(data, mut aggregates)| { + // Take the N aggregates with the highest number of set bits + // This is needed to avoid the bron_kerbosch algorithm generating millions of + // cliques + let aggregates = if aggregates.len() > max_vertices { + let (left, _, _) = aggregates.select_nth_unstable_by_key(max_vertices, |a| { + std::cmp::Reverse(a.aggregation_bits.num_set_bits()) + }); + left + } else { + aggregates.as_slice() + }; // aggregate each cliques corresponding attestaiions let mut clique_aggregates: Vec> = bron_kerbosch(&aggregates, is_compatible) @@ -363,6 +375,7 @@ impl OperationPool { state: &BeaconState, prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, + max_vertices: usize, spec: &ChainSpec, ) -> Result>, OpPoolError> { // Attestations for the current fork, which may be from the current or previous epoch. @@ -392,6 +405,7 @@ impl OperationPool { state, prev_epoch_validity_filter, &mut num_prev_valid, + max_vertices, spec, ) } else { @@ -419,6 +433,7 @@ impl OperationPool { state, curr_epoch_validity_filter, &mut num_curr_valid, + max_vertices, spec, ); @@ -1123,7 +1138,7 @@ mod release_tests { // Before the min attestation inclusion delay, get_attestations shouldn't return anything. assert_eq!( op_pool - .get_attestations(&state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, 16, spec) .expect("should have attestations") .len(), 0 @@ -1133,7 +1148,7 @@ mod release_tests { *state.slot_mut() += spec.min_attestation_inclusion_delay; let block_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, 16, spec) .expect("Should have block attestations"); assert_eq!(block_attestations.len(), committees.len()); @@ -1292,10 +1307,11 @@ mod release_tests { &state, |_| true, &mut num_valid, + 32, spec, ); let best_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, 32, spec) .unwrap(); // There should only be attestations for 1 attestation data. @@ -1397,7 +1413,7 @@ mod release_tests { *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, 32, spec) .expect("should have best attestations"); assert_eq!(best_attestations.len(), max_attestations); @@ -1494,7 +1510,7 @@ mod release_tests { *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, spec) + .get_attestations(&state, |_| true, |_| true, 32, spec) .expect("should have valid best attestations"); assert_eq!(best_attestations.len(), max_attestations); From 41fc685265fb9c31269bd575e0c9b7d56bc8a120 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 3 Nov 2023 10:00:50 +1100 Subject: [PATCH 31/37] Allocation optimisations & Clippy cleanup --- beacon_node/beacon_chain/src/beacon_chain.rs | 12 +- beacon_node/operation_pool/src/attestation.rs | 1 + .../operation_pool/src/attestation_storage.rs | 2 +- .../operation_pool/src/bron_kerbosch.rs | 116 ++++--- beacon_node/operation_pool/src/lib.rs | 327 +++++++++--------- beacon_node/operation_pool/src/max_cover.rs | 8 +- 6 files changed, 254 insertions(+), 212 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d4f8b57add2..14636d974eb 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4724,13 +4724,17 @@ impl BeaconChain { let attestation_packing_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_ATTESTATION_TIMES); - let mut prev_filter_cache = HashMap::new(); + let prev_chain = self.clone(); + let prev_filter_cache_lock = Mutex::new(HashMap::new()); let prev_attestation_filter = |att: &AttestationRef| { - self.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) + let mut prev_filter_cache = prev_filter_cache_lock.lock(); + prev_chain.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) }; - let mut curr_filter_cache = HashMap::new(); + let curr_chain = self.clone(); + let curr_filter_cache_lock = Mutex::new(HashMap::new()); let curr_attestation_filter = |att: &AttestationRef| { - self.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) + let mut curr_filter_cache = curr_filter_cache_lock.lock(); + curr_chain.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) }; let mut attestations = self diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index 14edc32fe0d..9507368c632 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -193,6 +193,7 @@ pub fn earliest_attestation_validators( } else if attestation.checkpoint.target_epoch == state.previous_epoch() { &base_state.previous_epoch_attestations } else { + #[allow(clippy::unwrap_used)] return BitList::with_capacity(0).unwrap(); }; diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index fa01fd047fb..e077fd9e5e0 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -163,7 +163,7 @@ impl AttestationMap { attestation_map .unaggregate_attestations .entry(data) - .or_insert_with(Vec::new) + .or_default() }; let mut observed = false; for existing_attestation in attestations.iter_mut() { diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index 3cb95a47afe..b66ecade037 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -1,37 +1,48 @@ +use crate::OpPoolError as Error; use rpds::HashTrieSet; + /// Entry point for the Bron-Kerbosh algorithm. Takes a vector of `vertices` of type /// `T : Compatible`. Returns all the maximal cliques (as a matrix of indices) for the graph /// `G = (V,E)` where `V` is `vertices` and `E` encodes the `is_compatible` relationship. pub fn bron_kerbosch bool>( vertices: &[T], is_compatible: F, -) -> Vec> { +) -> Result>, Error> { // create empty vector to store cliques let mut cliques: Vec> = vec![]; - if vertices.len() > 0 { + if !vertices.is_empty() { // build neighbourhoods and degeneracy ordering, also move to index-based reasoning - let neighbourhoods = compute_neighbourhoods(vertices, is_compatible); + let neighbourhoods = + compute_neighbourhoods(vertices, is_compatible).ok_or(Error::BronKerboschLogicError)?; let ordering = degeneracy_order(vertices.len(), &neighbourhoods); let mut publish_clique = |c| cliques.push(c); - for i in 0..ordering.len() { - let vi = ordering[i]; - let p = (i + 1..ordering.len()) - .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) - .map(|j| ordering[j]) + for (i, &vi) in ordering.iter().enumerate() { + let vi_neighbourhood = neighbourhoods + .get(vi) + .ok_or(Error::BronKerboschLogicError)?; + let p = ordering + .get(i + 1..ordering.len()) + .ok_or(Error::BronKerboschLogicError)? + .iter() + .filter(|pj| vi_neighbourhood.contains(pj)) + .copied() .collect(); let r = HashTrieSet::default().insert(vi); - let x = (0..i) - .filter(|j| neighbourhoods[vi].contains(&ordering[*j])) - .map(|j| ordering[j]) + let x = ordering + .get(0..i) + .ok_or(Error::BronKerboschLogicError)? + .iter() + .filter(|xj| vi_neighbourhood.contains(xj)) + .copied() .collect(); - bron_kerbosch_aux(r, p, x, &neighbourhoods, &mut publish_clique) + bron_kerbosch_aux(r, p, x, &neighbourhoods, &mut publish_clique)?; } } - cliques + Ok(cliques) } /// A function to the neighbourhoods for all nodes in the list. The neighbourhood `N(a)` of a @@ -42,24 +53,24 @@ pub fn bron_kerbosch bool>( fn compute_neighbourhoods bool>( vertices: &[T], is_compatible: F, -) -> Vec> { +) -> Option>> { let mut neighbourhoods = vec![]; neighbourhoods.resize_with(vertices.len(), Vec::new); - for i in 0..vertices.len() - 1 { - for j in i + 1..vertices.len() { - if is_compatible(&vertices[i], &vertices[j]) { - neighbourhoods[i].push(j); - neighbourhoods[j].push(i); + for (i, vi) in vertices.get(0..vertices.len() - 1)?.iter().enumerate() { + for (j, vj) in vertices.get(i + 1..vertices.len())?.iter().enumerate() { + if is_compatible(vi, vj) { + neighbourhoods.get_mut(i)?.push(j); + neighbourhoods.get_mut(j)?.push(i); } } } - neighbourhoods + Some(neighbourhoods) } /// Produces a degeneracy ordering of a set of vertices. fn degeneracy_order(num_vertices: usize, neighbourhoods: &[Vec]) -> Vec { let mut v: Vec = (0..num_vertices).collect(); - v.sort_unstable_by_key(|i| neighbourhoods[*i].len()); + v.sort_unstable_by_key(|i| neighbourhoods.get(*i).map(|n| n.len()).unwrap_or(0)); v } @@ -76,31 +87,37 @@ fn bron_kerbosch_aux( mut x: HashTrieSet, neighbourhoods: &Vec>, publish_clique: &mut F, -) where +) -> Result<(), Error> +where F: FnMut(HashTrieSet), { if p.is_empty() && x.is_empty() { publish_clique(r); - return; + return Ok(()); } - let pivot = find_pivot(&p, &x, neighbourhoods); - let pivot_neighbours = &neighbourhoods[pivot]; + let pivot = find_pivot(&p, &x, neighbourhoods)?; + let pivot_neighbours = neighbourhoods + .get(pivot) + .ok_or(Error::BronKerboschLogicError)?; - let ip = hash_set_filter(&p, |e| pivot_neighbours.contains(e)); + let ip = hash_set_filter(&p, |e| !pivot_neighbours.contains(e)); for v in ip.iter() { - let n_set = &neighbourhoods[*v]; + let n_set = neighbourhoods + .get(*v) + .ok_or(Error::BronKerboschLogicError)?; let nr = r.insert(*v); - let np = hash_set_filter(&p, |e| !n_set.contains(e)); - let nx = hash_set_filter(&x, |e| !n_set.contains(e)); + let np = hash_set_filter(&p, |e| n_set.contains(e)); + let nx = hash_set_filter(&x, |e| n_set.contains(e)); - bron_kerbosch_aux(nr, np, nx, neighbourhoods, publish_clique); + bron_kerbosch_aux(nr, np, nx, neighbourhoods, publish_clique)?; p.remove_mut(v); x.insert_mut(*v); } + Ok(()) } /// Identifies pivot for Bron-Kerbosh pivoting technique. @@ -108,24 +125,31 @@ fn find_pivot( p: &HashTrieSet, x: &HashTrieSet, neighbourhoods: &[Vec], -) -> usize { - *p.iter() +) -> Result { + p.iter() .chain(x.iter()) .min_by_key(|&e| { p.iter() - .filter(|ee| neighbourhoods[*e].contains(ee)) + .filter(|ee| { + neighbourhoods + .get(*e) + .map(|n| n.contains(ee)) + .unwrap_or(false) + }) .count() }) - .unwrap() + .copied() + .ok_or(Error::BronKerboschLogicError) } +/// Store the members of `set` matching `predicate` in a new set. fn hash_set_filter

(set: &HashTrieSet, predicate: P) -> HashTrieSet where P: Fn(&usize) -> bool, { let mut new_set = set.clone(); for e in set.iter() { - if predicate(e) { + if !predicate(e) { new_set.remove_mut(e); } } @@ -140,7 +164,7 @@ mod tests { #[test] fn bron_kerbosch_small_test() { let vertices: Vec = (0..7).collect(); - let edges = vec![ + let edges = [ (0, 1), (0, 2), (0, 3), @@ -159,31 +183,31 @@ mod tests { edges.contains(&(*first, *second)) || edges.contains(&(*first, *second)) }; - println!("{:?}", bron_kerbosch(&vertices, is_compatible)); + println!("{:?}", bron_kerbosch(&vertices, is_compatible).unwrap()); } quickcheck! { fn no_panic(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); - bron_kerbosch(&vertices, is_compatible); + bron_kerbosch(&vertices, is_compatible).unwrap(); true } fn at_least_one_clique_returned(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { - if vertices.len() == 0 { + if vertices.is_empty() { return true; } let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); - let res = bron_kerbosch(&vertices, is_compatible); - res.len() >= 1 + let res = bron_kerbosch(&vertices, is_compatible).unwrap(); + !res.is_empty() } fn no_clique_is_empty(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { - if vertices.len() == 0 { + if vertices.is_empty() { return true; } let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); - let res = bron_kerbosch(&vertices, is_compatible); + let res = bron_kerbosch(&vertices, is_compatible).unwrap(); for clique in res.iter() { if clique.is_empty() { return false; @@ -194,7 +218,7 @@ mod tests { fn all_claimed_cliques_are_cliques(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); - let claimed_cliques = bron_kerbosch(&vertices, is_compatible); + let claimed_cliques = bron_kerbosch(&vertices, is_compatible).unwrap(); for clique in claimed_cliques { for (ind1, vertex) in clique.iter().enumerate() { for (ind2, other_vertex) in clique.iter().enumerate() { @@ -212,10 +236,10 @@ mod tests { fn no_clique_is_a_subset_of_other_clique(vertices: Vec, adjacencies: HashSet<(usize, usize)>) -> bool { let is_compatible = |i: &usize, j: &usize| adjacencies.contains(&(*i, *j)) || adjacencies.contains(&(*j, *i)); - let claimed_cliques = bron_kerbosch(&vertices, is_compatible); + let claimed_cliques = bron_kerbosch(&vertices, is_compatible).unwrap(); let is_subset = |set1: HashTrieSet, set2: HashTrieSet| -> bool { for vertex in set1.iter() { - if !set2.contains(&vertex) { + if !set2.contains(vertex) { return false; } } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 47a0ddeba49..2e64a412957 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1,3 +1,16 @@ +// Clippy lint set up +#![cfg_attr( + not(test), + deny( + clippy::unwrap_used, + clippy::expect_used, + clippy::panic, + clippy::unreachable, + clippy::todo, + clippy::indexing_slicing + ) +)] + mod attestation; mod attestation_id; mod attestation_storage; @@ -39,6 +52,7 @@ use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::marker::PhantomData; use std::ptr; +use std::sync::atomic::{AtomicUsize, Ordering}; use types::{ sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, AbstractExecPayload, Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, @@ -79,6 +93,7 @@ pub enum OpPoolError { RewardCacheValidatorUnknown(BeaconStateError), RewardCacheOutOfBounds, IncorrectOpPoolVariant, + BronKerboschLogicError, } #[derive(Default)] @@ -223,145 +238,131 @@ impl OperationPool { /// with resepct to the graph with attestations as vertices and an edge encoding /// compatibility for aggregation. #[allow(clippy::too_many_arguments)] + #[allow(clippy::type_complexity)] fn get_clique_aggregate_attestations_for_epoch<'a>( &'a self, checkpoint_key: &'a CheckpointKey, all_attestations: &'a AttestationMap, state: &'a BeaconState, - mut validity_filter: impl FnMut(&AttestationRef<'a, T>) -> bool + Send, - num_valid: &mut i64, + validity_filter: impl Fn(&AttestationRef<'a, T>) -> bool + Send + Sync, + num_valid: &AtomicUsize, max_vertices: usize, spec: &'a ChainSpec, - ) -> Vec<(&CompactAttestationData, Vec>)> { - if let Some(AttestationDataMap { + ) -> Result>)>, OpPoolError> + { + let Some(AttestationDataMap { aggregate_attestations, unaggregate_attestations, }) = all_attestations.get_attestation_map(checkpoint_key) - { - let mut cliques_from_aggregates: Vec<_> = aggregate_attestations - .into_iter() - .filter(|(data, _)| { - data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= data.slot + T::slots_per_epoch() - }) - .map(|(data, aggregates)| { - let aggregates: Vec<&CompactIndexedAttestation> = aggregates - .iter() - .filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, - }) + else { + return Ok(vec![]); + }; + let mut cliques_from_aggregates: Vec<_> = aggregate_attestations + .into_par_iter() + .filter(|(data, _)| { + data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= data.slot + T::slots_per_epoch() + }) + .map(|(data, aggregates)| { + let aggregates: Vec<&CompactIndexedAttestation> = aggregates + .iter() + .filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data, + indexed, }) - .collect(); - *num_valid += aggregates.len() as i64; - (data, aggregates) - }) - .collect::>)>>() - .into_par_iter() - .map(|(data, mut aggregates)| { - // Take the N aggregates with the highest number of set bits - // This is needed to avoid the bron_kerbosch algorithm generating millions of - // cliques - let aggregates = if aggregates.len() > max_vertices { - let (left, _, _) = aggregates.select_nth_unstable_by_key(max_vertices, |a| { - std::cmp::Reverse(a.aggregation_bits.num_set_bits()) - }); - left - } else { - aggregates.as_slice() - }; - // aggregate each cliques corresponding attestaiions - let mut clique_aggregates: Vec> = - bron_kerbosch(&aggregates, is_compatible) - .iter() - .map(|clique| { - clique.iter().skip(1).fold( - aggregates[*clique.iter().next().unwrap()].clone(), - |mut acc, &ind| { - acc.aggregate(&aggregates[ind]); - acc - }, - ) - }) - .collect(); - let mut indices_to_remove = Vec::new(); - clique_aggregates.sort_unstable_by(|a, b| { - a.attesting_indices.len().cmp(&b.attesting_indices.len()) + }) + .collect(); + num_valid.fetch_add(aggregates.len(), Ordering::Relaxed); + (data, aggregates) + }) + .map(|(data, mut aggregates)| { + // Take the N aggregates with the highest number of set bits + // This is needed to avoid the bron_kerbosch algorithm generating millions of + // cliques + let aggregates = if aggregates.len() > max_vertices { + let (left, _, _) = aggregates.select_nth_unstable_by_key(max_vertices, |a| { + std::cmp::Reverse(a.aggregation_bits.num_set_bits()) }); - for (index, clique) in clique_aggregates.iter().enumerate() { - for bigger_clique in clique_aggregates.iter().skip(index + 1) { - if clique - .aggregation_bits - .is_subset(&bigger_clique.aggregation_bits) - { - indices_to_remove.push(index); - break; - } + left + } else { + aggregates.as_slice() + }; + // aggregate each clique's corresponding attestations + let mut clique_aggregates: Vec> = + bron_kerbosch(aggregates, is_compatible)? + .iter() + .map(|clique| { + aggregate_attestations_by_indices(aggregates, clique.iter().copied()) + .ok_or(OpPoolError::BronKerboschLogicError) + }) + .collect::, _>>()?; + let mut indices_to_remove = Vec::new(); + clique_aggregates.sort_unstable_by_key(|att| att.attesting_indices.len()); + for (index, clique) in clique_aggregates.iter().enumerate() { + for bigger_clique in clique_aggregates.iter().skip(index + 1) { + if clique + .aggregation_bits + .is_subset(&bigger_clique.aggregation_bits) + { + indices_to_remove.push(index); + break; } } + } - for index in indices_to_remove.iter().rev() { - clique_aggregates.swap_remove(*index); - } - (data, clique_aggregates) - }) - .collect::>)>>() - .into_iter() - .map(|(data, mut clique_aggregates)| { - // aggregate unaggregate attestations into the clique aggregates - // if compatible - if let Some(unaggregate_attestations) = unaggregate_attestations.get(&data) { - for attestation in unaggregate_attestations.iter().filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data: &data, - indexed, - }) - }) { - *num_valid += 1; - for clique_aggregate in &mut clique_aggregates { - if !clique_aggregate - .attesting_indices - .contains(&attestation.attesting_indices[0]) - { - clique_aggregate.aggregate(attestation); - } - } - } - } - (data, clique_aggregates) - }) - .collect(); - - // include aggregated attestations from unaggregated attestations whose - // attestation data doesn't appear in aggregated_attestations - unaggregate_attestations - .iter() - .filter(|(data, _)| { - data.slot + spec.min_attestation_inclusion_delay <= state.slot() - && state.slot() <= data.slot + T::slots_per_epoch() - && !aggregate_attestations.contains_key(&data) - }) - .for_each(|(data, unaggregates)| { - let mut valid_attestations = unaggregates.iter().filter(|indexed| { + for index in indices_to_remove.iter().rev() { + clique_aggregates.swap_remove(*index); + } + + // aggregate unaggregate attestations into the clique aggregates + // if compatible + if let Some(unaggregate_attestations) = unaggregate_attestations.get(data) { + for attestation in unaggregate_attestations.iter().filter(|indexed| { validity_filter(&AttestationRef { checkpoint: checkpoint_key, - data: &data, + data, indexed, }) - }); - if let Some(att) = valid_attestations.next() { - let mut att = att.clone(); - valid_attestations.for_each(|valid_att| att.aggregate(valid_att)); - cliques_from_aggregates.push((data, vec![att])) + }) { + num_valid.fetch_add(1, Ordering::Relaxed); + for clique_aggregate in &mut clique_aggregates { + if clique_aggregate.signers_disjoint_from(attestation) { + clique_aggregate.aggregate(attestation); + } + } } + } + + Ok((data, clique_aggregates)) + }) + .collect::, OpPoolError>>()?; + + // include aggregated attestations from unaggregated attestations whose + // attestation data doesn't appear in aggregated_attestations + unaggregate_attestations + .iter() + .filter(|(data, _)| { + data.slot + spec.min_attestation_inclusion_delay <= state.slot() + && state.slot() <= data.slot + T::slots_per_epoch() + && !aggregate_attestations.contains_key(data) + }) + .for_each(|(data, unaggregates)| { + let mut valid_attestations = unaggregates.iter().filter(|indexed| { + validity_filter(&AttestationRef { + checkpoint: checkpoint_key, + data, + indexed, + }) }); - cliques_from_aggregates - } else { - vec![] - } + if let Some(att) = valid_attestations.next() { + let mut att = att.clone(); + valid_attestations.for_each(|valid_att| att.aggregate(valid_att)); + cliques_from_aggregates.push((data, vec![att])) + } + }); + Ok(cliques_from_aggregates) } /// Get a list of attestations for inclusion in a block. @@ -373,8 +374,8 @@ impl OperationPool { pub fn get_attestations( &self, state: &BeaconState, - prev_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, - curr_epoch_validity_filter: impl for<'a> FnMut(&AttestationRef<'a, T>) -> bool + Send, + prev_epoch_validity_filter: impl for<'a> Fn(&AttestationRef<'a, T>) -> bool + Send + Sync, + curr_epoch_validity_filter: impl for<'a> Fn(&AttestationRef<'a, T>) -> bool + Send + Sync, max_vertices: usize, spec: &ChainSpec, ) -> Result>, OpPoolError> { @@ -394,8 +395,8 @@ impl OperationPool { // Split attestations for the previous & current epochs, so that we // can optimise them individually in parallel. - let mut num_prev_valid = 0_i64; - let mut num_curr_valid = 0_i64; + let num_prev_valid = AtomicUsize::new(0); + let num_curr_valid = AtomicUsize::new(0); // If we're in the genesis epoch, just use the current epoch attestations. let prev_epoch_cliqued_atts = if prev_epoch_key != curr_epoch_key { @@ -404,53 +405,47 @@ impl OperationPool { &*all_attestations, state, prev_epoch_validity_filter, - &mut num_prev_valid, + &num_prev_valid, max_vertices, spec, - ) + )? } else { vec![] }; - let prev_epoch_cliqued_atts = prev_epoch_cliqued_atts - .iter() - .map(|(data, atts)| { - atts.iter() - .map(|indexed| AttestationRef { - checkpoint: &prev_epoch_key, - data, - indexed, - }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) - }) - .flat_map(|att_max_cover| att_max_cover); + let prev_epoch_cliqued_atts = prev_epoch_cliqued_atts.iter().flat_map(|(data, atts)| { + atts.iter() + .map(|indexed| AttestationRef { + checkpoint: &prev_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) + }); let curr_epoch_cliqued_atts = self.get_clique_aggregate_attestations_for_epoch( &curr_epoch_key, &*all_attestations, state, curr_epoch_validity_filter, - &mut num_curr_valid, + &num_curr_valid, max_vertices, spec, - ); - - let curr_epoch_cliqued_atts = curr_epoch_cliqued_atts - .iter() - .map(|(data, atts)| { - atts.iter() - .map(|indexed| AttestationRef { - checkpoint: &curr_epoch_key, - data, - indexed, - }) - .filter_map(|att| { - AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) - }) - }) - .flat_map(|att_max_cover| att_max_cover); + )?; + + let curr_epoch_cliqued_atts = curr_epoch_cliqued_atts.iter().flat_map(|(data, atts)| { + atts.iter() + .map(|indexed| AttestationRef { + checkpoint: &curr_epoch_key, + data, + indexed, + }) + .filter_map(|att| { + AttMaxCover::new(att, state, &reward_cache, total_active_balance, spec) + }) + }); let prev_epoch_limit = if let BeaconState::Base(base_state) = state { std::cmp::min( @@ -486,8 +481,14 @@ impl OperationPool { }, ); - metrics::set_gauge(&metrics::NUM_PREV_EPOCH_ATTESTATIONS, num_prev_valid); - metrics::set_gauge(&metrics::NUM_CURR_EPOCH_ATTESTATIONS, num_curr_valid); + metrics::set_gauge( + &metrics::NUM_PREV_EPOCH_ATTESTATIONS, + num_prev_valid.load(Ordering::Relaxed) as i64, + ); + metrics::set_gauge( + &metrics::NUM_CURR_EPOCH_ATTESTATIONS, + num_curr_valid.load(Ordering::Relaxed) as i64, + ); Ok(max_cover::merge_solutions( curr_cover, @@ -876,6 +877,18 @@ fn is_compatible( x.signers_disjoint_from(y) } +fn aggregate_attestations_by_indices( + attestations: &[&CompactIndexedAttestation], + mut indices: impl Iterator, +) -> Option> { + let first = indices.next()?; + let mut attestation: CompactIndexedAttestation = (*attestations.get(first)?).clone(); + for i in indices { + attestation.aggregate(attestations.get(i)?); + } + Some(attestation) +} + /// Filter up to a maximum number of operations out of an iterator. fn filter_limit_operations<'a, T: 'a, V: 'a, I, F, G>( operations: I, diff --git a/beacon_node/operation_pool/src/max_cover.rs b/beacon_node/operation_pool/src/max_cover.rs index 293fffdf594..c7d52b60562 100644 --- a/beacon_node/operation_pool/src/max_cover.rs +++ b/beacon_node/operation_pool/src/max_cover.rs @@ -68,7 +68,7 @@ where .filter(|x| x.item.score() != 0) .map(|val| (val.item.key(), val)) .fold(HashMap::new(), |mut acc, (key, val)| { - acc.entry(key).or_insert(Vec::new()).push(val); + acc.entry(key).or_default().push(val); acc }); @@ -98,7 +98,7 @@ where // Update the covering sets of the other items, for the inclusion of the selected item. // Items covered by the selected item can't be re-covered. - all_items.get_mut(&best_key).map(|items| { + if let Some(items) = all_items.get_mut(&best_key) { items .iter_mut() .filter(|x| x.available && x.item.score() != 0) @@ -106,7 +106,7 @@ where x.item .update_covering_set(best_item.intermediate(), best_item.covering_set()) }); - }); + } result.push(best_item); } @@ -165,7 +165,7 @@ mod test { self.len() } - fn key(&self) -> () {} + fn key(&self) {} } fn example_system() -> Vec> { From bca7b9f5dbcd797b708b6bf9579d5f1ab2994965 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 3 Nov 2023 10:51:38 +1100 Subject: [PATCH 32/37] Add const for aggregates-per-data --- beacon_node/beacon_chain/src/beacon_chain.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 14636d974eb..c9abfdf7cc4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -165,6 +165,13 @@ const PREPARE_PROPOSER_HISTORIC_EPOCHS: u64 = 4; /// impact whilst having 8 epochs without a block is a comfortable grace period. const MAX_PER_SLOT_FORK_CHOICE_DISTANCE: u64 = 256; +/// The maximum number of aggregates per `AttestationData` to supply to Bron-Kerbosch (BK). +/// +/// This value is chosen to be sufficient for ~16 aggregators on mainnet, and in practice should +/// never be reached. Higher values *could* lead to exponential blow-up in the running time of BK +/// if an attacker found a way to generate a lot of distinct aggregates. +const MAX_AGGREGATES_PER_DATA_FOR_CLIQUES: usize = 20; + /// Reported to the user when the justified block has an invalid execution payload. pub const INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON: &str = "Justified block has an invalid execution payload."; @@ -4743,7 +4750,7 @@ impl BeaconChain { &state, prev_attestation_filter, curr_attestation_filter, - 16, + MAX_AGGREGATES_PER_DATA_FOR_CLIQUES, &self.spec, ) .map_err(BlockProductionError::OpPoolError)?; From 498c2c112ab24a3b76338dd23bc6a2fdeb3e158e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 3 Nov 2023 12:01:43 +1100 Subject: [PATCH 33/37] Fix compute_neighbourhood and tests! --- .../operation_pool/src/bron_kerbosch.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 22 ++++++++++--------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/beacon_node/operation_pool/src/bron_kerbosch.rs b/beacon_node/operation_pool/src/bron_kerbosch.rs index b66ecade037..28b8a6222e0 100644 --- a/beacon_node/operation_pool/src/bron_kerbosch.rs +++ b/beacon_node/operation_pool/src/bron_kerbosch.rs @@ -57,7 +57,7 @@ fn compute_neighbourhoods bool>( let mut neighbourhoods = vec![]; neighbourhoods.resize_with(vertices.len(), Vec::new); for (i, vi) in vertices.get(0..vertices.len() - 1)?.iter().enumerate() { - for (j, vj) in vertices.get(i + 1..vertices.len())?.iter().enumerate() { + for (j, vj) in vertices.iter().enumerate().skip(i + 1) { if is_compatible(vi, vj) { neighbourhoods.get_mut(i)?.push(j); neighbourhoods.get_mut(j)?.push(i); diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 2e64a412957..69bf838df54 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1309,20 +1309,22 @@ mod release_tests { } } - let mut num_valid = 0; + let num_valid = AtomicUsize::new(0); let (_, curr) = CheckpointKey::keys_for_state(&state); let all_attestations = op_pool.attestations.read(); complete_state_advance(&mut state, None, Slot::new(1), spec).unwrap(); - let clique_attestations = op_pool.get_clique_aggregate_attestations_for_epoch( - &curr, - &all_attestations, - &state, - |_| true, - &mut num_valid, - 32, - spec, - ); + let clique_attestations = op_pool + .get_clique_aggregate_attestations_for_epoch( + &curr, + &all_attestations, + &state, + |_| true, + &num_valid, + 32, + spec, + ) + .unwrap(); let best_attestations = op_pool .get_attestations(&state, |_| true, |_| true, 32, spec) .unwrap(); From c6b9938a3e67708d66318ef91f5841a7216932a3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 3 Nov 2023 12:02:17 +1100 Subject: [PATCH 34/37] Simplify stats --- beacon_node/operation_pool/src/attestation_storage.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index e077fd9e5e0..5a5209e5bb0 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -244,17 +244,12 @@ impl AttestationDataMap { pub fn stats(&self) -> AttestationStats { let mut stats = AttestationStats::default(); - let mut max_aggregates_per_data = 0; - for (_, aggregates) in self.aggregate_attestations.iter() { + for aggregates in self.aggregate_attestations.values() { stats.num_attestations += aggregates.len(); stats.num_attestation_data += 1; stats.max_aggregates_per_data = std::cmp::max(stats.max_aggregates_per_data, aggregates.len()); - - if aggregates.len() > max_aggregates_per_data { - max_aggregates_per_data = aggregates.len(); - } } for (data, unaggregates) in self.unaggregate_attestations.iter() { From 0e2f24e5be400e88f8897b4e6796bc95071ad821 Mon Sep 17 00:00:00 2001 From: geemo Date: Fri, 3 Nov 2023 12:12:26 -0500 Subject: [PATCH 35/37] change validity filter and use it only when necessary --- beacon_node/beacon_chain/src/beacon_chain.rs | 42 +++++++++----- beacon_node/beacon_chain/src/metrics.rs | 4 -- beacon_node/operation_pool/src/lib.rs | 61 +++++++++----------- 3 files changed, 55 insertions(+), 52 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e05836a6677..7ed41c7752e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -83,7 +83,10 @@ use futures::channel::mpsc::Sender; use itertools::process_results; use itertools::Itertools; use kzg::Kzg; -use operation_pool::{AttestationRef, OperationPool, PersistedOperationPool, ReceivedPreCapella}; +use operation_pool::{ + CheckpointKey, CompactAttestationData, OperationPool, PersistedOperationPool, + ReceivedPreCapella, +}; use parking_lot::{Mutex, RwLock}; use proto_array::{DoNotReOrg, ProposerHeadError}; use safe_arith::SafeArith; @@ -2241,15 +2244,16 @@ impl BeaconChain { pub fn filter_op_pool_attestation( &self, filter_cache: &mut HashMap<(Hash256, Epoch), bool>, - att: &AttestationRef, + checkpoint: &CheckpointKey, + data: &CompactAttestationData, state: &BeaconState, ) -> bool { *filter_cache - .entry((att.data.beacon_block_root, att.checkpoint.target_epoch)) + .entry((data.beacon_block_root, checkpoint.target_epoch)) .or_insert_with(|| { self.shuffling_is_compatible( - &att.data.beacon_block_root, - att.checkpoint.target_epoch, + &data.beacon_block_root, + checkpoint.target_epoch, state, ) }) @@ -4770,16 +4774,28 @@ impl BeaconChain { let prev_chain = self.clone(); let prev_filter_cache_lock = Mutex::new(HashMap::new()); - let prev_attestation_filter = |att: &AttestationRef| { - let mut prev_filter_cache = prev_filter_cache_lock.lock(); - prev_chain.filter_op_pool_attestation(&mut prev_filter_cache, att, &state) - }; + let prev_attestation_filter = + |checkpoint: &CheckpointKey, data: &CompactAttestationData| { + let mut prev_filter_cache = prev_filter_cache_lock.lock(); + prev_chain.filter_op_pool_attestation( + &mut prev_filter_cache, + checkpoint, + data, + &state, + ) + }; let curr_chain = self.clone(); let curr_filter_cache_lock = Mutex::new(HashMap::new()); - let curr_attestation_filter = |att: &AttestationRef| { - let mut curr_filter_cache = curr_filter_cache_lock.lock(); - curr_chain.filter_op_pool_attestation(&mut curr_filter_cache, att, &state) - }; + let curr_attestation_filter = + |checkpoint: &CheckpointKey, data: &CompactAttestationData| { + let mut curr_filter_cache = curr_filter_cache_lock.lock(); + curr_chain.filter_op_pool_attestation( + &mut curr_filter_cache, + checkpoint, + data, + &state, + ) + }; let mut attestations = self .op_pool diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index a23bcdc0b55..45857993342 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -121,10 +121,6 @@ lazy_static! { "beacon_block_production_slot_process_seconds", "Time taken to advance the state to the block production slot" ); - pub static ref BLOCK_PRODUCTION_UNAGGREGATED_TIMES: Result = try_create_histogram( - "beacon_block_production_unaggregated_seconds", - "Time taken to import the naive aggregation pool for block production" - ); pub static ref BLOCK_PRODUCTION_ATTESTATION_TIMES: Result = try_create_histogram( "beacon_block_production_attestation_seconds", "Time taken to pack attestations into a block" diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 69bf838df54..8f45fb9846e 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -25,8 +25,10 @@ mod sync_aggregate_id; pub use crate::bls_to_execution_changes::ReceivedPreCapella; pub use attestation::{earliest_attestation_validators, AttMaxCover}; -use attestation_storage::{AttestationDataMap, CompactAttestationData, CompactIndexedAttestation}; -pub use attestation_storage::{AttestationRef, SplitAttestation}; +use attestation_storage::{AttestationDataMap, AttestationMap, CompactIndexedAttestation}; +pub use attestation_storage::{ + AttestationRef, CheckpointKey, CompactAttestationData, SplitAttestation, +}; use bron_kerbosch::bron_kerbosch; pub use max_cover::MaxCover; pub use persistence::{ @@ -35,7 +37,6 @@ pub use persistence::{ }; pub use reward_cache::RewardCache; -use crate::attestation_storage::{AttestationMap, CheckpointKey}; use crate::bls_to_execution_changes::BlsToExecutionChanges; use crate::sync_aggregate_id::SyncAggregateId; use attester_slashing::AttesterSlashingMaxCover; @@ -244,7 +245,7 @@ impl OperationPool { checkpoint_key: &'a CheckpointKey, all_attestations: &'a AttestationMap, state: &'a BeaconState, - validity_filter: impl Fn(&AttestationRef<'a, T>) -> bool + Send + Sync, + validity_filter: impl Fn(&CheckpointKey, &CompactAttestationData) -> bool + Send + Sync, num_valid: &AtomicUsize, max_vertices: usize, spec: &'a ChainSpec, @@ -266,13 +267,7 @@ impl OperationPool { .map(|(data, aggregates)| { let aggregates: Vec<&CompactIndexedAttestation> = aggregates .iter() - .filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data, - indexed, - }) - }) + .filter(|_| validity_filter(checkpoint_key, data)) .collect(); num_valid.fetch_add(aggregates.len(), Ordering::Relaxed); (data, aggregates) @@ -319,13 +314,7 @@ impl OperationPool { // aggregate unaggregate attestations into the clique aggregates // if compatible if let Some(unaggregate_attestations) = unaggregate_attestations.get(data) { - for attestation in unaggregate_attestations.iter().filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data, - indexed, - }) - }) { + for attestation in unaggregate_attestations { num_valid.fetch_add(1, Ordering::Relaxed); for clique_aggregate in &mut clique_aggregates { if clique_aggregate.signers_disjoint_from(attestation) { @@ -349,16 +338,14 @@ impl OperationPool { && !aggregate_attestations.contains_key(data) }) .for_each(|(data, unaggregates)| { - let mut valid_attestations = unaggregates.iter().filter(|indexed| { - validity_filter(&AttestationRef { - checkpoint: checkpoint_key, - data, - indexed, - }) - }); - if let Some(att) = valid_attestations.next() { + let mut unaggregates = unaggregates.iter(); + if let Some(att) = unaggregates.next() { + if !validity_filter(checkpoint_key, data) { + return; + } + let mut att = att.clone(); - valid_attestations.for_each(|valid_att| att.aggregate(valid_att)); + unaggregates.for_each(|valid_att| att.aggregate(valid_att)); cliques_from_aggregates.push((data, vec![att])) } }); @@ -374,8 +361,12 @@ impl OperationPool { pub fn get_attestations( &self, state: &BeaconState, - prev_epoch_validity_filter: impl for<'a> Fn(&AttestationRef<'a, T>) -> bool + Send + Sync, - curr_epoch_validity_filter: impl for<'a> Fn(&AttestationRef<'a, T>) -> bool + Send + Sync, + prev_epoch_validity_filter: impl for<'a> Fn(&CheckpointKey, &CompactAttestationData) -> bool + + Send + + Sync, + curr_epoch_validity_filter: impl for<'a> Fn(&CheckpointKey, &CompactAttestationData) -> bool + + Send + + Sync, max_vertices: usize, spec: &ChainSpec, ) -> Result>, OpPoolError> { @@ -1151,7 +1142,7 @@ mod release_tests { // Before the min attestation inclusion delay, get_attestations shouldn't return anything. assert_eq!( op_pool - .get_attestations(&state, |_| true, |_| true, 16, spec) + .get_attestations(&state, |_, _| true, |_, _| true, 16, spec) .expect("should have attestations") .len(), 0 @@ -1161,7 +1152,7 @@ mod release_tests { *state.slot_mut() += spec.min_attestation_inclusion_delay; let block_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, 16, spec) + .get_attestations(&state, |_, _| true, |_, _| true, 16, spec) .expect("Should have block attestations"); assert_eq!(block_attestations.len(), committees.len()); @@ -1319,14 +1310,14 @@ mod release_tests { &curr, &all_attestations, &state, - |_| true, + |_, _| true, &num_valid, 32, spec, ) .unwrap(); let best_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, 32, spec) + .get_attestations(&state, |_, _| true, |_, _| true, 32, spec) .unwrap(); // There should only be attestations for 1 attestation data. @@ -1428,7 +1419,7 @@ mod release_tests { *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, 32, spec) + .get_attestations(&state, |_, _| true, |_, _| true, 32, spec) .expect("should have best attestations"); assert_eq!(best_attestations.len(), max_attestations); @@ -1525,7 +1516,7 @@ mod release_tests { *state.slot_mut() += spec.min_attestation_inclusion_delay; let best_attestations = op_pool - .get_attestations(&state, |_| true, |_| true, 32, spec) + .get_attestations(&state, |_, _| true, |_, _| true, 32, spec) .expect("should have valid best attestations"); assert_eq!(best_attestations.len(), max_attestations); From 376c40845753f782bca5ab1cc910a6b734050c41 Mon Sep 17 00:00:00 2001 From: geemo Date: Tue, 14 Nov 2023 07:30:05 -0600 Subject: [PATCH 36/37] avoid deadlock by sequential iter and collecting then par iter --- beacon_node/operation_pool/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 8f45fb9846e..37b8e44f950 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -259,7 +259,7 @@ impl OperationPool { return Ok(vec![]); }; let mut cliques_from_aggregates: Vec<_> = aggregate_attestations - .into_par_iter() + .iter() .filter(|(data, _)| { data.slot + spec.min_attestation_inclusion_delay <= state.slot() && state.slot() <= data.slot + T::slots_per_epoch() @@ -272,6 +272,8 @@ impl OperationPool { num_valid.fetch_add(aggregates.len(), Ordering::Relaxed); (data, aggregates) }) + .collect::>() + .into_par_iter() .map(|(data, mut aggregates)| { // Take the N aggregates with the highest number of set bits // This is needed to avoid the bron_kerbosch algorithm generating millions of From 32dd188da5eb2ae693af6b38115c4cccb654b70c Mon Sep 17 00:00:00 2001 From: geemo Date: Mon, 22 Jan 2024 23:51:25 -0600 Subject: [PATCH 37/37] remove rayon from bron-kerbosch processing --- beacon_node/operation_pool/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 37b8e44f950..c3c55c04b05 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -272,8 +272,6 @@ impl OperationPool { num_valid.fetch_add(aggregates.len(), Ordering::Relaxed); (data, aggregates) }) - .collect::>() - .into_par_iter() .map(|(data, mut aggregates)| { // Take the N aggregates with the highest number of set bits // This is needed to avoid the bron_kerbosch algorithm generating millions of