Skip to content

Commit

Permalink
chore: move recovery code to its own module (#290)
Browse files Browse the repository at this point in the history
* add new recovery module

* add recovery errors

* refactor code to use recovery method and move recovery out of the verifier

* add back comment
  • Loading branch information
kevaundray authored Sep 26, 2024
1 parent fe72775 commit 0fc446a
Show file tree
Hide file tree
Showing 5 changed files with 228 additions and 195 deletions.
49 changes: 32 additions & 17 deletions eip7594/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use erasure_codes::errors::RSError;
pub enum Error {
Prover(ProverError),
Verifier(VerifierError),
Recovery(RecoveryError),
Serialization(SerializationError),
}

Expand Down Expand Up @@ -38,35 +39,56 @@ impl From<SerializationError> for Error {
Error::Serialization(value)
}
}
impl From<RecoveryError> for Error {
fn from(value: RecoveryError) -> Self {
Error::Recovery(value)
}
}

/// Errors that can occur while calling a method in the Prover API
#[derive(Debug)]
pub enum ProverError {
RecoveryFailure(VerifierError),
RecoveryFailure(RecoveryError),
}

impl From<VerifierError> for ProverError {
fn from(value: VerifierError) -> Self {
impl From<RecoveryError> for ProverError {
fn from(value: RecoveryError) -> Self {
ProverError::RecoveryFailure(value)
}
}

/// Errors that can occur while calling a method in the Verifier API
#[derive(Debug)]
pub enum VerifierError {
NumCellIndicesNotEqualToNumCells {
num_cell_indices: usize,
num_cells: usize,
},
CellIndicesNotUnique,
/// Errors that can occur while calling the recovery procedure
pub enum RecoveryError {
NotEnoughCellsToReconstruct {
num_cells_received: usize,
min_cells_needed: usize,
},
NumCellIndicesNotEqualToNumCells {
num_cell_indices: usize,
num_cells: usize,
},
TooManyCellsReceived {
num_cells_received: usize,
max_cells_needed: usize,
},
CellIndexOutOfRange {
cell_index: CellIndex,
max_number_of_cells: u64,
},
CellIndicesNotUnique,
ReedSolomon(RSError),
}

impl From<RSError> for RecoveryError {
fn from(value: RSError) -> Self {
RecoveryError::ReedSolomon(value)
}
}

/// Errors that can occur while calling a method in the Verifier API
#[derive(Debug)]
pub enum VerifierError {
CellIndexOutOfRange {
cell_index: CellIndex,
max_number_of_cells: u64,
Expand All @@ -82,20 +104,13 @@ pub enum VerifierError {
cells_len: usize,
proofs_len: usize,
},
ReedSolomon(RSError),
FK20(kzg_multi_open::VerifierError),
PolynomialHasInvalidLength {
num_coefficients: usize,
expected_num_coefficients: usize,
},
}

impl From<RSError> for VerifierError {
fn from(value: RSError) -> Self {
VerifierError::ReedSolomon(value)
}
}

impl From<kzg_multi_open::VerifierError> for VerifierError {
fn from(value: kzg_multi_open::VerifierError) -> Self {
VerifierError::FK20(value)
Expand Down
1 change: 1 addition & 0 deletions eip7594/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ compile_error!("feature_a and feature_b cannot be enabled simultaneously");
pub mod constants;
mod errors;
mod prover;
mod recovery;
mod serialization;
mod trusted_setup;
mod verifier;
Expand Down
14 changes: 12 additions & 2 deletions eip7594/src/prover.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
use bls12_381::fixed_base_msm::UsePrecomp;
use erasure_codes::ReedSolomon;
use kzg_multi_open::{
commit_key::CommitKey,
{Prover, ProverInput},
};

use crate::{
constants::{
CELLS_PER_EXT_BLOB, FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENTS_PER_CELL,
CELLS_PER_EXT_BLOB, EXPANSION_FACTOR, FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENTS_PER_CELL,
FIELD_ELEMENTS_PER_EXT_BLOB,
},
errors::Error,
recovery::recover_polynomial_coeff,
serialization::{
deserialize_blob_to_scalars, serialize_cells_and_proofs, serialize_g1_compressed,
},
Expand All @@ -23,6 +25,7 @@ use crate::{
#[derive(Debug)]
pub struct ProverContext {
kzg_multipoint_prover: Prover,
rs: ReedSolomon,
}

impl Default for ProverContext {
Expand Down Expand Up @@ -54,8 +57,15 @@ impl ProverContext {
use_precomp,
);

let rs = ReedSolomon::new(
FIELD_ELEMENTS_PER_BLOB,
EXPANSION_FACTOR,
CELLS_PER_EXT_BLOB,
);

ProverContext {
kzg_multipoint_prover,
rs,
}
}
}
Expand Down Expand Up @@ -117,7 +127,7 @@ impl DASContext {
with_optional_threadpool!(self, {
// Recover polynomial
//
let poly_coeff = self.recover_polynomial_coeff(cell_indices, cells)?;
let poly_coeff = recover_polynomial_coeff(&self.prover_ctx.rs, cell_indices, cells)?;

// Compute proofs and evaluation sets
//
Expand Down
170 changes: 170 additions & 0 deletions eip7594/src/recovery.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
use std::collections::HashSet;

use bls12_381::Scalar;
use erasure_codes::{BlockErasureIndices, ReedSolomon};
use kzg_multi_open::recover_evaluations_in_domain_order;

use crate::{
constants::{CELLS_PER_EXT_BLOB, FIELD_ELEMENTS_PER_EXT_BLOB},
errors::{Error, RecoveryError},
serialization::deserialize_cells,
CellIndex, CellRef,
};

pub(crate) fn recover_polynomial_coeff(
rs: &ReedSolomon,
cell_indices: Vec<CellIndex>,
cells: Vec<CellRef>,
) -> Result<Vec<Scalar>, Error> {
// Validation
//
validation::recover_polynomial_coeff(&cell_indices, &cells)?;

// Deserialization
//
let coset_evaluations = deserialize_cells(cells)?;
let cell_indices: Vec<usize> = cell_indices
.into_iter()
.map(|index| index as usize)
.collect();

// Computation
//
// Permute the cells, so they are in the order that you would expect, if you were
// to compute an fft on the monomial form of the polynomial.
//
// This comment does leak the fact that the cells are not in the "correct" order,
// which the API tries to hide.
let (cell_indices_normal_order, flattened_coset_evaluations_normal_order) =
recover_evaluations_in_domain_order(
FIELD_ELEMENTS_PER_EXT_BLOB,
cell_indices,
coset_evaluations,
)
// This should never trigger since:
// - cell_indices is non-empty
// - all coset evaluations are checked to have the same size
// - all coset indices are checked to be valid
.expect("infallible: could not recover evaluations in domain order");

// Find all of the missing cell indices. This is needed for recovery.
let missing_cell_indices = find_missing_cell_indices(&cell_indices_normal_order);

// Recover the polynomial in monomial form, that one can use to generate the cells.
let recovered_polynomial_coeff = rs
.recover_polynomial_coefficient(
flattened_coset_evaluations_normal_order,
BlockErasureIndices(missing_cell_indices),
)
.map_err(RecoveryError::from)?;

Ok(recovered_polynomial_coeff)
}

fn find_missing_cell_indices(present_cell_indices: &[usize]) -> Vec<usize> {
let cell_indices: HashSet<_> = present_cell_indices.iter().cloned().collect();

let mut missing = Vec::new();

for i in 0..CELLS_PER_EXT_BLOB {
if !cell_indices.contains(&i) {
missing.push(i);
}
}

missing
}

mod validation {
use std::collections::HashSet;

use crate::{
constants::{BYTES_PER_CELL, CELLS_PER_EXT_BLOB, EXPANSION_FACTOR},
errors::RecoveryError,
CellIndex, CellRef,
};

/// Validation logic for `recover_polynomial_coeff`
pub(crate) fn recover_polynomial_coeff(
cell_indices: &[CellIndex],
cells: &[CellRef],
) -> Result<(), RecoveryError> {
// Check that the number of cell indices is equal to the number of cells
if cell_indices.len() != cells.len() {
return Err(RecoveryError::NumCellIndicesNotEqualToNumCells {
num_cell_indices: cell_indices.len(),
num_cells: cells.len(),
});
}

// Check that the Cell indices are within the expected range
for cell_index in cell_indices.iter() {
if *cell_index >= (CELLS_PER_EXT_BLOB as u64) {
return Err(RecoveryError::CellIndexOutOfRange {
cell_index: *cell_index,
max_number_of_cells: CELLS_PER_EXT_BLOB as u64,
});
}
}

// Check that each cell has the right amount of bytes
//
// This should be infallible.
for (i, cell) in cells.iter().enumerate() {
assert_eq!(cell.len(), BYTES_PER_CELL, "the number of bytes in a cell should always equal {} since the type is a reference to an array. Check cell at index {}", BYTES_PER_CELL, i);
}

// Check that we have no duplicate cell indices
if !are_cell_indices_unique(cell_indices) {
return Err(RecoveryError::CellIndicesNotUnique);
}

// Check that we have enough cells to perform a reconstruction
if cell_indices.len() < CELLS_PER_EXT_BLOB / EXPANSION_FACTOR {
return Err(RecoveryError::NotEnoughCellsToReconstruct {
num_cells_received: cell_indices.len(),
min_cells_needed: CELLS_PER_EXT_BLOB / EXPANSION_FACTOR,
});
}

// Check that we don't have too many cells
// ie more than we initially generated from the blob
//
// Note: Since we check that there are no duplicates and that all cell_indices
// are between 0 and CELLS_PER_EXT_BLOB. This check should never fail.
// It is kept here to be compliant with the specs.
if cell_indices.len() > CELLS_PER_EXT_BLOB {
return Err(RecoveryError::TooManyCellsReceived {
num_cells_received: cell_indices.len(),
max_cells_needed: CELLS_PER_EXT_BLOB,
});
}

Ok(())
}

/// Check if all of the cell indices are unique
fn are_cell_indices_unique(cell_indices: &[CellIndex]) -> bool {
let len_cell_indices_non_dedup = cell_indices.len();
let cell_indices_dedup: HashSet<_> = cell_indices.iter().collect();
cell_indices_dedup.len() == len_cell_indices_non_dedup
}

#[cfg(test)]
mod tests {

use super::are_cell_indices_unique;

#[test]
fn test_cell_indices_unique() {
let cell_indices = vec![1, 2, 3];
assert!(are_cell_indices_unique(&cell_indices));
let cell_indices = vec![];
assert!(are_cell_indices_unique(&cell_indices));
let cell_indices = vec![1, 1, 2, 3];
assert!(!are_cell_indices_unique(&cell_indices));
let cell_indices = vec![0, 0, 0];
assert!(!are_cell_indices_unique(&cell_indices));
}
}
}
Loading

0 comments on commit 0fc446a

Please sign in to comment.