Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
alindima committed Sep 8, 2023
1 parent dae34a5 commit 667d870
Show file tree
Hide file tree
Showing 6 changed files with 321 additions and 4 deletions.
15 changes: 15 additions & 0 deletions polkadot/erasure-coding/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,21 @@ pub fn obtain_chunks<T: Encode>(n_validators: usize, data: &T) -> Result<Vec<Vec
Ok(shards.into_iter().map(|w: WrappedShard| w.into_inner()).collect())
}

/// Reconstruct the v1 available data from the set of systematic chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
pub fn reconstruct_from_systematic_v1<'a, I: 'a>(
_n_validators: usize,
_chunks: I,
) -> Result<AvailableData, Error>
where
I: IntoIterator<Item = (&'a [u8], usize)>,
{
todo!()
}

/// Reconstruct the v1 available data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ impl FetchTaskConfig {
request: ChunkFetchingRequest {
candidate_hash: core.candidate_hash,
index: session_info.our_index,
// TODO: this will no longer be our index. we need to take into account the per-height shuffling of systemic chunks.
},
erasure_root: core.candidate_descriptor.erasure_root,
relay_parent: core.candidate_descriptor.relay_parent,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,16 @@ pub struct Requester {

/// Prometheus Metrics
metrics: Metrics,
// TODO: use helper function to compute the chunk index we need to request and pass it to
// FetchTaskConfig::new() and save it in a HashMap<Height, ChunkIndex>. I think It's useful to
// cache it because we may hold availability chunks for multiple parablocks in the same relay
// parent. use an LruCache with the capacity of the number of relay blocks in a session and
// fallback to the helper function if the entry is not present in the cache.
}

// TODO: helper function for computing the chunk index for the validator, starting from the block
// height and array of all the validators (not just in group)

#[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)]
impl Requester {
/// How many ancestors of the leaf should we consider along with it.
Expand Down Expand Up @@ -231,6 +239,8 @@ impl Requester {
// guaranteed to be fetchable by the state trie.
leaf,
leaf_session_index,
// TODO: query the hashmap and pass in the chunk index to
// FetchTaskConfig.
|info| FetchTaskConfig::new(leaf, &core, tx, metrics, info, span),
)
.await
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,9 @@ impl SessionCache {
// Get our group index:
let our_group = info.validator_info.our_group;

// TODO: This shuffling is fine, as it only is needed for requesting the chunk
// for blocks pending availability.

// Shuffle validators in groups:
let mut rng = thread_rng();
for g in validator_groups.iter_mut() {
Expand Down
22 changes: 19 additions & 3 deletions polkadot/node/network/availability-recovery/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@ use futures::{
task::{Context, Poll},
};
use schnellru::{ByLength, LruMap};
use task::{FetchChunks, FetchChunksParams, FetchFull, FetchFullParams};
use task::{
FetchChunks, FetchChunksParams, FetchFull, FetchFullParams, FetchSystematicChunks,
FetchSystematicChunksParams,
};

use fatality::Nested;
use polkadot_erasure_coding::{
Expand Down Expand Up @@ -90,6 +93,8 @@ pub enum RecoveryStrategyKind {
BackersFirstIfSizeLower(usize),
/// We always recover using validator chunks.
ChunksAlways,
/// First try the backing group. Then systematic chunks.
BackersThenSystematicChunks,
/// Do not request data from the availability store.
/// This is the useful for nodes where the
/// availability-store subsystem is not expected to run,
Expand Down Expand Up @@ -474,12 +479,23 @@ async fn handle_recover<Context>(
group_name: "backers",
validators: backing_validators.to_vec(),
skip_if: skip_backing_group_if,
erasure_task_tx,
erasure_task_tx: erasure_task_tx.clone(),
}))),
RecoveryStrategyKind::ChunksAlways => {},
_ => {},
};
}

if recovery_strategy_kind == RecoveryStrategyKind::BackersThenSystematicChunks {
recovery_strategies.push_back(Box::new(FetchSystematicChunks::new(
FetchSystematicChunksParams {
validators: (0..recovery_threshold(session_info.validators.len()).unwrap())
.map(|i| ValidatorIndex(u32::try_from(i).unwrap()))
.collect(),
erasure_task_tx,
},
)));
}

recovery_strategies.push_back(Box::new(FetchChunks::new(fetch_chunks_params)));

launch_recovery_task(
Expand Down
Loading

0 comments on commit 667d870

Please sign in to comment.