Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: cleanup remaining mentions of compress with pedersen in cpp and ts #3074

Merged
merged 3 commits into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ WASM_EXPORT void pedersen__init() {}

WASM_EXPORT void pedersen__commit(uint8_t const* inputs_buffer, uint8_t* output)
{
std::vector<grumpkin::fq> to_compress;
read(inputs_buffer, to_compress);
grumpkin::g1::affine_element pedersen_commitment = crypto::pedersen_commitment::commit_native(to_compress);
std::vector<grumpkin::fq> to_commit;
read(inputs_buffer, to_commit);
grumpkin::g1::affine_element pedersen_commitment = crypto::pedersen_commitment::commit_native(to_commit);

serialize::write(output, pedersen_commitment);
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ WASM_EXPORT void pedersen___init() {}

WASM_EXPORT void pedersen___commit(fr::vec_in_buf inputs_buffer, affine_element::out_buf output)
{
std::vector<grumpkin::fq> to_compress;
read(inputs_buffer, to_compress);
grumpkin::g1::affine_element pedersen_commitment = crypto::pedersen_commitment::commit_native(to_compress);
std::vector<grumpkin::fq> to_commit;
read(inputs_buffer, to_commit);
grumpkin::g1::affine_element pedersen_commitment = crypto::pedersen_commitment::commit_native(to_commit);

serialize::write(output, pedersen_commitment);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ WASM_EXPORT void pedersen_hash__init() {}

WASM_EXPORT void pedersen__hash_with_hash_index(uint8_t const* inputs_buffer, uint32_t hash_index, uint8_t* output)
{
std::vector<grumpkin::fq> to_compress;
read(inputs_buffer, to_compress);
std::vector<grumpkin::fq> to_hash;
read(inputs_buffer, to_hash);
crypto::GeneratorContext<curve::Grumpkin> ctx; // todo fix
ctx.offset = static_cast<size_t>(hash_index);
auto r = crypto::pedersen_hash::hash(to_compress, ctx);
auto r = crypto::pedersen_hash::hash(to_hash, ctx);
barretenberg::fr::serialize_to_buffer(r, output);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ WASM_EXPORT void pedersen_hash_with_hash_index(uint8_t const* inputs_buffer,
uint32_t const* hash_index,
uint8_t* output)
{
std::vector<grumpkin::fq> to_compress;
read(inputs_buffer, to_compress);
auto r = crypto::pedersen_hash::hash(to_compress, ntohl(*hash_index));
std::vector<grumpkin::fq> to_hash;
read(inputs_buffer, to_hash);
auto r = crypto::pedersen_hash::hash(to_hash, ntohl(*hash_index));
barretenberg::fr::serialize_to_buffer(r, output);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ barretenberg::fr compute_signing_data(join_split_tx const& tx)
const auto nullifier1 = compute_nullifier(input_note_1, tx.account_private_key, tx.num_input_notes >= 1);
const auto nullifier2 = compute_nullifier(input_note_2, tx.account_private_key, tx.num_input_notes >= 2);

std::vector<grumpkin::fq> to_compress{ public_value, tx.public_owner, grumpkin::fq(public_asset_id),
output_note_1, output_note_2, nullifier1,
nullifier2, tx.backward_link, tx.allow_chain };
std::vector<grumpkin::fq> to_hash{ public_value, tx.public_owner, grumpkin::fq(public_asset_id),
output_note_1, output_note_2, nullifier1,
nullifier2, tx.backward_link, tx.allow_chain };

return crypto::pedersen_hash::hash(to_compress);
return crypto::pedersen_hash::hash(to_hash);
}

} // namespace join_split
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ using namespace crypto::schnorr;

signature sign_join_split_tx(join_split_tx const& tx, key_pair<grumpkin::fr, grumpkin::g1> const& keys)
{
fr compressed = compute_signing_data(tx);
fr hashed = compute_signing_data(tx);

std::vector<uint8_t> message(sizeof(fr));
fr::serialize_to_buffer(compressed, &message[0]);
fr::serialize_to_buffer(hashed, &message[0]);

crypto::schnorr::signature signature =
crypto::schnorr::construct_signature<Blake2sHasher, grumpkin::fq, grumpkin::fr, grumpkin::g1>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,12 @@ field_ct compute_nullifier(field_ct const& note_commitment,
is_note_in_use,
};

// We compress the hash_inputs with Pedersen, because that's cheaper (constraint-wise) than compressing
// We hash the `hash_inputs` with Pedersen, because that's cheaper (constraint-wise) than hashing
// the data directly with Blake2s in the next step.
const auto compressed_inputs = pedersen_hash::hash(hash_inputs, GeneratorIndex::JOIN_SPLIT_NULLIFIER);
const auto hashed_inputs = pedersen_hash::hash(hash_inputs, GeneratorIndex::JOIN_SPLIT_NULLIFIER);

// Blake2s hash the compressed result. Without this it's possible to leak info from the pedersen compression.
// Blake2s hash the pedersen hash's result. Without this it's possible to leak info from the pedersen hash because
// it is not a random oracle.
/** E.g. we can extract a representation of the hashed_pk:
* Paraphrasing, if:
* nullifier = note_comm * G1 + hashed_pk * G2 + is_note_in_use * G3
Expand All @@ -38,7 +39,7 @@ field_ct compute_nullifier(field_ct const& note_commitment,
* Notably, at the point someone withdraws, the observer would be able to connect `hashed_pk * G2` with a specific
* eth address.
*/
auto blake_input = byte_array_ct(compressed_inputs);
auto blake_input = byte_array_ct(hashed_inputs);
auto blake_result = proof_system::plonk::stdlib::blake2s(blake_input);
return field_ct(blake_result);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ fr compute_nullifier(grumpkin::fq const& note_commitment,
hashed_pk.y,
static_cast<int>(is_note_in_use),
};
auto compressed_inputs = crypto::pedersen_hash::hash(buf, GeneratorIndex::JOIN_SPLIT_NULLIFIER);
auto hashed_inputs = crypto::pedersen_hash::hash(buf, GeneratorIndex::JOIN_SPLIT_NULLIFIER);

auto blake_result = blake2::blake2s(to_buffer(compressed_inputs));
auto blake_result = blake2::blake2s(to_buffer(hashed_inputs));

return from_buffer<fr>(blake_result);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,10 @@ namespace proof_system::plonk {
* defined in polynomials/evaluation_domain.hpp, and `polynomial` is a bberg library which does not depend on `crypto`
* in its CMakeLists.txt file. (We'd need `crypto` to be able to call native pedersen functions).
*
* @param domain to compress
* @param circuit_type to use when choosing pedersen compression function
* @return barretenberg::fr compression of the evaluation domain as a field
* @param domain to hash
* @return barretenberg::fr hash of the evaluation domain as a field
*/
barretenberg::fr hash_native_evaluation_domain(barretenberg::evaluation_domain const& domain, proof_system::CircuitType)
barretenberg::fr hash_native_evaluation_domain(barretenberg::evaluation_domain const& domain)
{
barretenberg::fr out = crypto::pedersen_hash::hash({
domain.root,
Expand All @@ -30,14 +29,14 @@ barretenberg::fr hash_native_evaluation_domain(barretenberg::evaluation_domain c
}

/**
* @brief Compress the verification key data.
* @brief Hash the verification key data.
*
* @details Native pedersen compression of VK data that is truly core to a VK.
* @details Native pedersen hash of VK data that is truly core to a VK.
* Omits recursion proof flag and recursion input indices as they are not really
* core to the VK itself.
*
* @param hash_index generator index to use during pedersen compression
* @returns a field containing the compression
* @param hash_index generator index to use during pedersen hashing
* @returns a field containing the hash
*/
barretenberg::fr verification_key_data::hash_native(const size_t hash_index) const
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ verification_key_data rand_vk_data()
}

/**
* @brief expect that two vk data compressions are equal for a few different hash indices
* @brief expect that two vk data hashes are equal for a few different hash indices
*
* @param vk0_data
* @param vk1_data
*/
void expect_compressions_eq(const verification_key_data& vk0_data, const verification_key_data& vk1_data)
void expect_hashes_eq(const verification_key_data& vk0_data, const verification_key_data& vk1_data)
{
// 0 hash index
EXPECT_EQ(vk0_data.hash_native(0), vk1_data.hash_native(0));
Expand All @@ -45,16 +45,16 @@ void expect_compressions_eq(const verification_key_data& vk0_data, const verific
}

/**
* @brief expect that two vk data compressions are not-equal for a few different hash indices
* @brief expect that two vk data hashes are not-equal for a few different hash indices
*
* @param vk0_data
* @param vk1_data
*/
void expect_compressions_ne(const verification_key_data& vk0_data, const verification_key_data& vk1_data)
void expect_hashes_ne(const verification_key_data& vk0_data, const verification_key_data& vk1_data)
{
EXPECT_NE(vk0_data.hash_native(0), vk1_data.hash_native(0));
// EXPECT_NE(vk0_data.hash_native(15), vk1_data.hash_native(15));
// ne hash indices still lead to ne compressions
// ne hash indices still lead to ne hashes
// EXPECT_NE(vk0_data.hash_native(0), vk1_data.hash_native(15));
// EXPECT_NE(vk0_data.hash_native(14), vk1_data.hash_native(15));
}
Expand Down Expand Up @@ -82,14 +82,14 @@ TEST(VerificationKey, StreamSerialization)
EXPECT_EQ(vk_data, result);
}

TEST(VerificationKey, BasicCompressionEquality)
TEST(VerificationKey, BasicHashEquality)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data; // copy
expect_compressions_eq(vk0_data, vk1_data);
expect_hashes_eq(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionInequalityIndexMismatch)
TEST(VerificationKey, HashInequalityIndexMismatch)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data; // copy
Expand All @@ -98,60 +98,60 @@ TEST(VerificationKey, CompressionInequalityIndexMismatch)
// EXPECT_NE(vk0_data.hash_native(14), vk1_data.hash_native(15));
}

TEST(VerificationKey, CompressionInequalityCircuitType)
TEST(VerificationKey, HashInequalityCircuitType)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data; // copy
vk0_data.circuit_type = static_cast<uint32_t>(CircuitType::ULTRA);
expect_compressions_ne(vk0_data, vk1_data);
expect_hashes_ne(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionInequalityDifferentCircuitSize)
TEST(VerificationKey, HashInequalityDifferentCircuitSize)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data;
vk0_data.circuit_size = 4096;
expect_compressions_ne(vk0_data, vk1_data);
expect_hashes_ne(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionInequalityDifferentNumPublicInputs)
TEST(VerificationKey, HashInequalityDifferentNumPublicInputs)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data;
vk0_data.num_public_inputs = 42;
expect_compressions_ne(vk0_data, vk1_data);
expect_hashes_ne(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionInequalityDifferentCommitments)
TEST(VerificationKey, HashInequalityDifferentCommitments)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data;
vk0_data.commitments["test1"] = g1::element::random_element();
expect_compressions_ne(vk0_data, vk1_data);
expect_hashes_ne(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionInequalityDifferentNumCommitments)
TEST(VerificationKey, HashInequalityDifferentNumCommitments)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data;
vk0_data.commitments["new"] = g1::element::random_element();
expect_compressions_ne(vk0_data, vk1_data);
expect_hashes_ne(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionEqualityDifferentContainsRecursiveProof)
TEST(VerificationKey, HashEqualityDifferentContainsRecursiveProof)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data;
vk0_data.contains_recursive_proof = false;
vk1_data.contains_recursive_proof = true;
expect_compressions_eq(vk0_data, vk1_data);
expect_hashes_eq(vk0_data, vk1_data);
}

TEST(VerificationKey, CompressionEqualityDifferentRecursiveProofPublicInputIndices)
TEST(VerificationKey, HashEqualityDifferentRecursiveProofPublicInputIndices)
{
verification_key_data vk0_data = rand_vk_data();
verification_key_data vk1_data = vk0_data;
vk1_data.recursive_proof_public_input_indices.push_back(42);
expect_compressions_eq(vk0_data, vk1_data);
expect_hashes_eq(vk0_data, vk1_data);
}
} // namespace proof_system::plonk::test_verification_key
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,6 @@ cycle_group<C> pedersen_commitment<C>::commit(const std::vector<std::pair<field_
return cycle_group::batch_mul(scalars, points);
}

// template <typename C>
// field_t<C> pedersen_commitment<C>::compress(const std::vector<field_t>& inputs, const GeneratorContext context)
// {
// return commit(inputs, context).x;
// }
INSTANTIATE_STDLIB_TYPE(pedersen_commitment);

} // namespace proof_system::plonk::stdlib
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ TYPED_TEST(VerificationKeyFixture, VkDataVsRecursionHashNative)

EXPECT_EQ(vk_data.hash_native(0), RecursVk::hash_native(native_vk, 0));
// EXPECT_EQ(vk_data.hash_native(15), RecursVk::hash_native(native_vk, 15));
// // ne hash indeces still lead to ne compressions
// // ne hash indices still lead to ne hashes
// EXPECT_NE(vk_data.hash_native(0), RecursVk::hash_native(native_vk, 15));
// EXPECT_NE(vk_data.hash_native(14), RecursVk::hash_native(native_vk, 15));
}
Expand Down
8 changes: 4 additions & 4 deletions barretenberg/cpp/src/barretenberg/transcript/transcript.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ std::array<uint8_t, Keccak256Hasher::PRNG_OUTPUT_SIZE> Keccak256Hasher::hash(std
std::array<uint8_t, Blake3sHasher::PRNG_OUTPUT_SIZE> Blake3sHasher::hash(std::vector<uint8_t> const& buffer)
{
grumpkin::fq input = grumpkin::fq::serialize_from_buffer(&buffer[0]);
grumpkin::fq compressed = crypto::pedersen_hash::hash({ input });
std::vector<uint8_t> res = to_buffer(compressed);
grumpkin::fq hashed = crypto::pedersen_hash::hash({ input });
std::vector<uint8_t> res = to_buffer(hashed);
std::array<uint8_t, PRNG_OUTPUT_SIZE> result;
for (size_t i = 0; i < PRNG_OUTPUT_SIZE; ++i) {
result[i] = res[i];
Expand Down Expand Up @@ -217,8 +217,8 @@ void Transcript::apply_fiat_shamir(const std::string& challenge_name /*, const b
break;
}
case HashType::PedersenBlake3s: {
std::vector<uint8_t> compressed_buffer = to_buffer(crypto::pedersen_hash::hash_buffer(buffer));
base_hash = Blake3sHasher::hash(compressed_buffer);
std::vector<uint8_t> hashed_buffer = to_buffer(crypto::pedersen_hash::hash_buffer(buffer));
base_hash = Blake3sHasher::hash(hashed_buffer);
break;
}
default: {
Expand Down
4 changes: 2 additions & 2 deletions circuits/cpp/src/aztec3/circuits/abis/c_bind.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,10 @@ WASM_EXPORT void abis__compute_function_selector(char const* func_sig_cstr, uint
}

/**
* @brief Hash/compress verification key data.
* @brief Hash verification key data.
* This is a WASM-export that can be called from Typescript.
*
* @details Pedersen compress VK to use later when computing function leaf
* @details Pedersen hash VK to use later when computing function leaf
* or constructor hash. Return the serialized results in the `output` buffer.
*
* @param vk_data_buf buffer of bytes representing serialized verification_key_data
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ export async function buildPayload(calls: FunctionCall[]): Promise<{
};
}

/** Compresses an entrypoint payload to a 32-byte buffer (useful for signing) */
/** Hashes an entrypoint payload to a 32-byte buffer (useful for signing) */
export async function hashPayload(payload: EntrypointPayload) {
return pedersenHashWithHashIndex(
await CircuitsWasm.get(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ export class PedersenWithCounter extends Pedersen {
public hashCounter = 0;

/**
* Compresses two 32-byte hashes.
* Hashes two 32-byte arrays.
* @param lhs - The first 32-byte array.
* @param rhs - The second 32-byte array.
* @returns The new 32-byte hash.
Expand Down