Skip to content

Commit

Permalink
Add feature flag for LastIndex and Erasure duplicate proofs
Browse files Browse the repository at this point in the history
  • Loading branch information
AshwinSekar committed Dec 7, 2023
1 parent 74c54a7 commit 412385b
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 3 deletions.
64 changes: 61 additions & 3 deletions core/src/window_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,12 @@ use {
solana_metrics::inc_new_counter_error,
solana_perf::packet::{Packet, PacketBatch},
solana_rayon_threadlimit::get_thread_count,
solana_sdk::clock::Slot,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
clock::{Slot, DEFAULT_MS_PER_SLOT},
epoch_schedule::EpochSchedule,
feature_set::{self, FeatureSet},
},
std::{
cmp::Reverse,
collections::{HashMap, HashSet},
Expand Down Expand Up @@ -137,17 +142,58 @@ impl WindowServiceMetrics {
}
}

fn should_send_index_and_erasure_conflicts(
shred_slot: Slot,
feature_set: &FeatureSet,
epoch_schedule: &EpochSchedule,
) -> bool {
match feature_set.activated_slot(&feature_set::index_erasure_conflict_duplicate_proofs::id()) {
None => false,
Some(feature_slot) => {
let feature_epoch = epoch_schedule.get_epoch(feature_slot);
let shred_epoch = epoch_schedule.get_epoch(shred_slot);
// Has a 1 epoch delay, as we don't have enough information
// on the epoch boundary of the feature activation
feature_epoch < shred_epoch
}
}
}

fn run_check_duplicate(
cluster_info: &ClusterInfo,
blockstore: &Blockstore,
shred_receiver: &Receiver<PossibleDuplicateShred>,
duplicate_slots_sender: &DuplicateSlotSender,
bank_forks: &Arc<RwLock<BankForks>>,
) -> Result<()> {
let (mut feature_set, mut epoch_schedule) = {
let root_bank = bank_forks.read().unwrap().root_bank();
(
root_bank.feature_set.clone(),
root_bank.epoch_schedule().clone(),
)
};
let mut last_updated = Instant::now();
let check_duplicate = |shred: PossibleDuplicateShred| -> Result<()> {
if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT {
// Grabs bank forks lock once a slot
last_updated = Instant::now();
let root_bank = bank_forks.read().unwrap().root_bank();
feature_set = root_bank.feature_set.clone();
epoch_schedule = root_bank.epoch_schedule().clone();
}
let shred_slot = shred.slot();
let send_index_and_erasure_conflicts =
should_send_index_and_erasure_conflicts(shred_slot, &feature_set, &epoch_schedule);
let (shred1, shred2) = match shred {
PossibleDuplicateShred::LastIndexConflict(shred, conflict) => (shred, conflict),
PossibleDuplicateShred::ErasureConflict(shred, conflict) => (shred, conflict),
PossibleDuplicateShred::LastIndexConflict(shred, conflict)
| PossibleDuplicateShred::ErasureConflict(shred, conflict) => {
if send_index_and_erasure_conflicts {
(shred, conflict)
} else {
return Ok(());
}
}
PossibleDuplicateShred::Exists(shred) => {
// Unlike the other cases we have to wait until here to decide to handle the duplicate and store
// in blockstore. This is because the duplicate could have been part of the same insert batch,
Expand Down Expand Up @@ -342,6 +388,7 @@ impl WindowService {
let outstanding_requests = Arc::<RwLock<OutstandingShredRepairs>>::default();

let cluster_info = repair_info.cluster_info.clone();
let bank_forks = repair_info.bank_forks.clone();

let repair_service = RepairService::new(
blockstore.clone(),
Expand All @@ -366,6 +413,7 @@ impl WindowService {
blockstore.clone(),
duplicate_receiver,
duplicate_slots_sender,
bank_forks,
);

let t_insert = Self::start_window_insert_thread(
Expand All @@ -392,6 +440,7 @@ impl WindowService {
blockstore: Arc<Blockstore>,
duplicate_receiver: Receiver<PossibleDuplicateShred>,
duplicate_slots_sender: DuplicateSlotSender,
bank_forks: Arc<RwLock<BankForks>>,
) -> JoinHandle<()> {
let handle_error = || {
inc_new_counter_error!("solana-check-duplicate-error", 1, 1);
Expand All @@ -405,6 +454,7 @@ impl WindowService {
&blockstore,
&duplicate_receiver,
&duplicate_slots_sender,
&bank_forks,
) {
if Self::should_exit_on_error(e, &handle_error) {
break;
Expand Down Expand Up @@ -507,9 +557,11 @@ mod test {
solana_gossip::contact_info::ContactInfo,
solana_ledger::{
blockstore::{make_many_slot_entries, Blockstore},
genesis_utils::create_genesis_config,
get_tmp_ledger_path_auto_delete,
shred::{ProcessShredsStats, Shredder},
},
solana_runtime::bank::Bank,
solana_sdk::{
hash::Hash,
signature::{Keypair, Signer},
Expand Down Expand Up @@ -556,6 +608,8 @@ mod test {
#[test]
fn test_run_check_duplicate() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let genesis_config = create_genesis_config(10_000).genesis_config;
let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));
let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap());
let (sender, receiver) = unbounded();
let (duplicate_slot_sender, duplicate_slot_receiver) = unbounded();
Expand Down Expand Up @@ -587,6 +641,7 @@ mod test {
&blockstore,
&receiver,
&duplicate_slot_sender,
&bank_forks,
)
.unwrap();

Expand Down Expand Up @@ -616,6 +671,8 @@ mod test {
Arc::new(keypair),
SocketAddrSpace::Unspecified,
));
let genesis_config = create_genesis_config(10_000).genesis_config;
let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config));

// Start duplicate thread receiving and inserting duplicates
let t_check_duplicate = WindowService::start_check_duplicate_thread(
Expand All @@ -624,6 +681,7 @@ mod test {
blockstore.clone(),
duplicate_shred_receiver,
duplicate_slot_sender,
bank_forks,
);

let handle_duplicate = |shred| {
Expand Down
5 changes: 5 additions & 0 deletions sdk/src/feature_set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -736,6 +736,10 @@ pub mod drop_legacy_shreds {
solana_sdk::declare_id!("GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy");
}

pub mod index_erasure_conflict_duplicate_proofs {
solana_sdk::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa");
}

lazy_static! {
/// Map of feature identifiers to user-visible description
pub static ref FEATURE_NAMES: HashMap<Pubkey, &'static str> = [
Expand Down Expand Up @@ -915,6 +919,7 @@ lazy_static! {
(disable_rent_fees_collection::id(), "Disable rent fees collection #33945"),
(enable_zk_transfer_with_fee::id(), "enable Zk Token proof program transfer with fee"),
(drop_legacy_shreds::id(), "drops legacy shreds #34328"),
(index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"),
/*************** ADD NEW FEATURES HERE ***************/
]
.iter()
Expand Down

0 comments on commit 412385b

Please sign in to comment.